From 093a2579b7ab3fb07ef31250225b2033390b3b42 Mon Sep 17 00:00:00 2001 From: "E.N. Postolec" Date: Tue, 8 Apr 2025 14:08:10 +0200 Subject: [PATCH 001/105] Change in escape/wrapper.py to put a warning if M_vols too low --- src/proteus/escape/wrapper.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/proteus/escape/wrapper.py b/src/proteus/escape/wrapper.py index c8b496e8e..5ee58af52 100644 --- a/src/proteus/escape/wrapper.py +++ b/src/proteus/escape/wrapper.py @@ -127,6 +127,11 @@ def calc_new_elements(hf_row:dict, dt:float, reservoir:str): res[e] = hf_row[e+key] M_vols = sum(list(res.values())) + # To avoid division by zero or very small values error for M_vols + if M_vols < 10.0: + log.warning("M_vols is too small (%.2e). Setting all target elements to zero." % M_vols) + return {e: 0.0 for e in res.keys()} + # calculate the current mass mixing ratio for each element # if escape is unfractionating, this should be conserved emr = {} From 13d9272105baced42079610840f8b5ce4c738836 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Fri, 18 Apr 2025 13:43:07 +0200 Subject: [PATCH 002/105] Minor doc update --- mkdocs.yml | 2 +- src/proteus/config/_escape.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mkdocs.yml b/mkdocs.yml index 9269e3f03..f14f22f9d 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -11,8 +11,8 @@ nav: - Troubleshooting: troubleshooting.md - Data files: data.md - Kapteyn cluster: kapteyn_cluster_guide.md - - Snellius cluster: snellius_cluster_guide.md - Habrok cluster: habrok_cluster_guide.md + - Snellius cluster: snellius_cluster_guide.md - Contact: contact.md - Contributing: CONTRIBUTING.md - Code of Conduct: CODE_OF_CONDUCT.md diff --git a/src/proteus/config/_escape.py b/src/proteus/config/_escape.py index 8e16bca6c..f092151d3 100644 --- a/src/proteus/config/_escape.py +++ b/src/proteus/config/_escape.py @@ -25,7 +25,7 @@ class Zephyrus: Attributes ---------- Pxuv: float - Pressure at which XUV radiation become opaque in the planetary atmosphere [bar] + Pressure at which XUV radiation become opaque in the planetary atmosphere (should be within 0 < Pxuv < 10 bars) [bar] efficiency: float Escape efficiency factor tidal: bool From ff6bae5a8e48b38790bd1bc69d92781a74790d8e Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Fri, 18 Apr 2025 14:12:18 +0200 Subject: [PATCH 003/105] Add toi561b toml file in input/planets --- input/planets/toi561b.toml | 316 +++++++++++++++++++++++++++++++++++++ 1 file changed, 316 insertions(+) create mode 100644 input/planets/toi561b.toml diff --git a/input/planets/toi561b.toml b/input/planets/toi561b.toml new file mode 100644 index 000000000..3830e36a8 --- /dev/null +++ b/input/planets/toi561b.toml @@ -0,0 +1,316 @@ +# PROTEUS configuration file (version 2.0) + +# This is a comprehensive outline of all configuration options. It includes variables +# which have default values, in order to showcase the range of potential options available. +# Variable defaults are defined in `src/proteus/config/*.py` + +# Root tables should be physical, with the exception of "params" +# Software related options should go within the appropriate physical table + +# The general structure is: +# [root] metadata +# [params] parameters for code execution, output files, time-stepping, convergence +# [star] stellar parameters, model selection +# [orbit] planetary orbital parameters +# [struct] planetary structure (mass, radius) +# [atmos] atmosphere parameters, model selection +# [escape] escape parameters, model selection +# [interior] magma ocean model selection and parameters +# [outgas] outgassing parameters (fO2) and included volatiles +# [delivery] initial volatile inventory, and delivery model selection +# [observe] synthetic observations + +# ---------------------------------------------------- +# Metadata +version = "2.0" +author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" + +# ---------------------------------------------------- +# Parameters +[params] + # output files + [params.out] + path = "scratch/toi561b_test_escape_7Gyr" + logging = "DEBUG" + plot_mod = 5 # Plotting frequency, 0: wait until completion | n: every n iterations + plot_fmt = "png" # Plotting image file format, "png" or "pdf" recommended + write_mod = 1 # Write CSV frequency, 0: wait until completion | n: every n iterations + + # time-stepping + [params.dt] + minimum = 3e2 # yr, minimum time-step + minimum_rel = 1e-5 # relative minimum time-step [dimensionless] + maximum = 1e11 # yr, maximum time-step + initial = 1e3 # yr, inital step size + starspec = 3e6 # yr, interval to re-calculate the stellar spectrum + starinst = 1e2 # yr, interval to re-calculate the instellation + method = "adaptive" # proportional | adaptive | maximum + + [params.dt.proportional] + propconst = 52.0 # Proportionality constant + + [params.dt.adaptive] + atol = 0.02 # Step size atol + rtol = 0.07 # Step size rtol + + # Termination criteria + # Set enabled=true/false in each section to enable/disable that termination criterion + [params.stop] + + # Require criteria to be satisfied twice before model will exit? + strict = false + + # required number of iterations + [params.stop.iters] + enabled = true + minimum = 5 + maximum = 9000 + + # required time constraints + [params.stop.time] + enabled = true + minimum = 1.0e3 # yr, model will certainly run to t > minimum + maximum = 1.0e10 # yr, model will terminate when t > maximum + + # solidification + [params.stop.solid] + enabled = false + phi_crit = 0.005 # non-dim., model will terminate when global melt fraction < phi_crit + + # radiative equilibrium + [params.stop.radeqm] + enabled = false + atol = 0.1 # absolute tolerance [W m-2] + rtol = 1e-3 # relative tolerance + + [params.stop.escape] + enabled = false + p_stop = 1.0 # bar, model will terminate with p_surf < p_stop + + +# ---------------------------------------------------- +# Star +[star] + + # Physical parameters + mass = 0.806 # M_sun from Lacedelli et al., 2022 + age_ini = 0.100 # Gyr, model initialisation/start age + + module = "mors" + [star.mors] + rot_pcntle = 50.0 # rotation percentile + rot_period = 'none' # rotation period [days] + tracks = "spada" # evolution tracks: spada | baraffe + age_now = 7 # 10 Gyr, current age of star used for scaling from Weiss et al., 2021 + spec = "stellar_spectra/Named/sun.txt" # stellar spectrum + + [star.dummy] + radius = 0.843 # R_sun from Lacedelli et al., 2022 + Teff = 5372.0 # K from Lacedelli et al., 2022 + +# Orbital system +[orbit] + semimajoraxis = 0.0106 # AU from Brinkman et al. 2023 + eccentricity = 0.0 # dimensionless from Brinkman et al. 2023 + zenith_angle = 48.19 # degrees + s0_factor = 0.375 # dimensionless + + module = "none" + + [orbit.dummy] + H_tide = 1e-11 # Fixed tidal power density [W kg-1] + Phi_tide = "<0.3" # Tidal heating applied when inequality locally satisfied + + [orbit.lovepy] + visc_thresh = 1e9 # Minimum viscosity required for heating [Pa s] + +# Planetary structure - physics table +[struct] + #mass_tot = 2.24 # M_earth from Brinkman et al. 2023 + radius_int = 1.37 # R_earth from Brinkman et al. 2023 + corefrac = 0.55 # non-dim., radius fraction 0.20 from Brinkman et al. 2023 + core_density = 10738.33 # Core density [kg m-3] + core_heatcap = 880.0 # Core specific heat capacity [J K-1 kg-1] + +# Atmosphere - physics table +[atmos_clim] + prevent_warming = true # do not allow the planet to heat up + surface_d = 0.01 # m, conductive skin thickness + surface_k = 2.0 # W m-1 K-1, conductive skin thermal conductivity + cloud_enabled = false # enable water cloud radiative effects + cloud_alpha = 0.0 # condensate retention fraction (1 -> fully retained) + surf_state = "fixed" # surface scheme: "mixed_layer" | "fixed" | "skin" + surf_greyalbedo = 0.2 # surface grey albedo + albedo_pl = 0.0 # Enforced Bond albedo (do not use with `rayleigh = true`) from Lacedelli et al. 2022 + rayleigh = true # Enable rayleigh scattering + tmp_minimum = 0.5 # temperature floor on solver + tmp_maximum = 5000.0 # temperature ceiling on solver + + module = "janus" # Which atmosphere module to use + + [atmos_clim.agni] + p_top = 1.0e-5 # bar, top of atmosphere grid pressure + spectral_group = "Honeyside" # which gas opacities to include + spectral_bands = "256" # how many spectral bands? + num_levels = 60 # Number of atmospheric grid levels + chemistry = "none" # "none" | "eq" + surf_material = "greybody" # surface material file for scattering + solve_energy = true # solve for energy-conserving atmosphere profile + solution_atol = 1e-2 # solver absolute tolerance + solution_rtol = 5e-2 # solver relative tolerance + overlap_method = "ee" # gas overlap method + condensation = true # volatile condensation + real_gas = true # use real-gas equations of state + + [atmos_clim.janus] + p_top = 1.0e-5 # bar, top of atmosphere grid pressure + p_obs = 1.0e-3 # bar, observed pressure level + spectral_group = "Honeyside" # which gas opacities to include + spectral_bands = "256" # how many spectral bands? + F_atm_bc = 0 # measure outgoing flux at: (0) TOA | (1) Surface + num_levels = 60 # Number of atmospheric grid levels + tropopause = "none" # none | skin | dynamic + overlap_method = "ee" # gas overlap method + + [atmos_clim.dummy] + gamma = 0.7 # atmosphere opacity between 0 and 1 + +# Volatile escape - physics table +[escape] + + module = "zephyrus" # Which escape module to use + reservoir = "bulk" # Escaping reservoir: "bulk", "outgas", "pxuv". + + [escape.zephyrus] + Pxuv = 1e-2 # Pressure at which XUV radiation become opaque in the planetary atmosphere [bar] + efficiency = 1.0 # Escape efficiency factor + tidal = false # Tidal contribution enabled + + [escape.dummy] + rate = 0.0 # Bulk unfractionated escape rate [kg s-1] + +# Interior - physics table +[interior] + grain_size = 0.1 # crystal settling grain size [m] + F_initial = 1e5 # Initial heat flux guess [W m-2] + radiogenic_heat = true # enable radiogenic heat production + tidal_heat = false # enable tidal heat production + rheo_phi_loc = 0.4 # Centre of rheological transition + rheo_phi_wid = 0.15 # Width of rheological transition + bulk_modulus = 260e9 # Bulk modulus [Pa] + + module = "spider" # Which interior module to use + + [interior.spider] + num_levels = 220 # Number of SPIDER grid levels + mixing_length = 2 # Mixing length parameterization + tolerance = 1.0e-10 # solver tolerance + tolerance_rel = 1.0e-8 # relative solver tolerance + solver_type = "bdf" # SUNDIALS solver method + tsurf_atol = 20.0 # tsurf_poststep_change + tsurf_rtol = 0.01 # tsurf_poststep_change_frac + ini_entropy = 3300.0 # Surface entropy conditions [J K-1 kg-1] + ini_dsdr = -4.698e-6 # Interior entropy gradient [J K-1 kg-1 m-1] + + [interior.aragog] + num_levels = 220 # Number of Aragog grid levels + tolerance = 1.0e-10 # solver tolerance + ini_tmagma = 3200.0 # Initial magma surface temperature [K] + + [interior.dummy] + ini_tmagma = 3500.0 # Initial magma surface temperature [K] + +# Outgassing - physics table +[outgas] + fO2_shift_IW = 0 # log10(ΔIW), atmosphere/interior boundary oxidation state + + module = "calliope" # Which outgassing module to use + + [outgas.calliope] + include_H2O = true # Include H2O compound + include_CO2 = true # Include CO2 compound + include_N2 = true # Include N2 compound + include_S2 = true # Include S2 compound + include_SO2 = true # Include SO2 compound + include_H2S = true # Include H2S compound + include_NH3 = true # Include NH3 compound + include_H2 = true # Include H2 compound + include_CH4 = true # Include CH4 compound + include_CO = true # Include CO compound + T_floor = 2310.0 # Temperature floor applied to outgassing calculation [K]. + + [outgas.atmodeller] + some_parameter = "some_value" + +# Volatile delivery - physics table +[delivery] + + # Radionuclide parameters + radio_tref = 4.55 # Reference age for concentrations [Gyr] 4.55 before need to discuss w Tim + radio_K = 310.0 # ppmw of potassium (all isotopes) + radio_U = 0.031 # ppmw of uranium (all isotopes) + radio_Th = 0.124 # ppmw of thorium (all isotopes) + + # Which initial inventory to use? + initial = 'elements' # "elements" | "volatiles" + + # No module for accretion as of yet + module = "none" + + # Set initial volatile inventory by planetary element abundances + [delivery.elements] + # H_oceans = 0.0 # Hydrogen inventory in units of equivalent Earth oceans + H_ppmw = 109.0 # Hydrogen inventory in ppmw relative to mantle mass + + # CH_ratio = 1.0 # C/H mass ratio in mantle/atmosphere system + C_ppmw = 109.0 # Carbon inventory in ppmw relative to mantle mass + + # NH_ratio = 0.018 # N/H mass ratio in mantle/atmosphere system + N_ppmw = 20.1 # Nitrogen inventory in ppmw relative to mantle mass + + # SH_ratio = 2.16 # S/H mass ratio in mantle/atmosphere system + S_ppmw = 235.0 # Sulfur inventory in ppmw relative to mantle mass + + # Set initial volatile inventory by partial pressures in atmosphere + [delivery.volatiles] + H2O = 0.0 # partial pressure of H2O + CO2 = 0.0 # partial pressure of CO2 + N2 = 0.0 # etc + S2 = 0.0 + SO2 = 0.0 + H2S = 0.0 + NH3 = 0.0 + H2 = 0.0 + CH4 = 0.0 + CO = 0.0 + +# Atmospheric chemistry postprocessing +[atmos_chem] + + module = "vulcan" # Atmospheric chemistry module + when = "manually" # When to run chemistry (manually, offline, online) + + # Physics flags + photo_on = true # Enable photochemistry + Kzz_on = true # Enable eddy diffusion + Kzz_const = "none" # Constant eddy diffusion coefficient (none => use profile) + moldiff_on = true # Enable molecular diffusion in the atmosphere + updraft_const = 0.0 # Set constant updraft velocity + + # Vulcan-specific atmospheric chemistry parameters + [atmos_chem.vulcan] + clip_fl = 1e-20 # Floor on stellar spectrum [erg s-1 cm-2 nm-1] + clip_vmr = 1e-10 # Neglect species with vmr < clip_vmr + make_funs = true # Generate reaction network functions + ini_mix = "profile" # Initial mixing ratios (profile, outgas) + fix_surf = false # Fixed surface mixing ratios + network = "SNCHO" # Class of chemical network to use (CHO, NCHO, SNCHO) + save_frames = true # Plot frames during iterations + yconv_cri = 0.05 # Convergence criterion, value of mixing ratios + slope_cri = 0.0001 # Convergence criterion, rate of change of mixing ratios + +# Calculate simulated observations +[observe] + + # Module with which to calculate the synthetic observables + synthesis = "none" From 3ce106620a207290445323c2fedb6308de4571c3 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Fri, 18 Apr 2025 14:20:30 +0200 Subject: [PATCH 004/105] toi561b toml file --- input/planets/toi561b.toml | 316 ------------------------------------- 1 file changed, 316 deletions(-) delete mode 100644 input/planets/toi561b.toml diff --git a/input/planets/toi561b.toml b/input/planets/toi561b.toml deleted file mode 100644 index 3830e36a8..000000000 --- a/input/planets/toi561b.toml +++ /dev/null @@ -1,316 +0,0 @@ -# PROTEUS configuration file (version 2.0) - -# This is a comprehensive outline of all configuration options. It includes variables -# which have default values, in order to showcase the range of potential options available. -# Variable defaults are defined in `src/proteus/config/*.py` - -# Root tables should be physical, with the exception of "params" -# Software related options should go within the appropriate physical table - -# The general structure is: -# [root] metadata -# [params] parameters for code execution, output files, time-stepping, convergence -# [star] stellar parameters, model selection -# [orbit] planetary orbital parameters -# [struct] planetary structure (mass, radius) -# [atmos] atmosphere parameters, model selection -# [escape] escape parameters, model selection -# [interior] magma ocean model selection and parameters -# [outgas] outgassing parameters (fO2) and included volatiles -# [delivery] initial volatile inventory, and delivery model selection -# [observe] synthetic observations - -# ---------------------------------------------------- -# Metadata -version = "2.0" -author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" - -# ---------------------------------------------------- -# Parameters -[params] - # output files - [params.out] - path = "scratch/toi561b_test_escape_7Gyr" - logging = "DEBUG" - plot_mod = 5 # Plotting frequency, 0: wait until completion | n: every n iterations - plot_fmt = "png" # Plotting image file format, "png" or "pdf" recommended - write_mod = 1 # Write CSV frequency, 0: wait until completion | n: every n iterations - - # time-stepping - [params.dt] - minimum = 3e2 # yr, minimum time-step - minimum_rel = 1e-5 # relative minimum time-step [dimensionless] - maximum = 1e11 # yr, maximum time-step - initial = 1e3 # yr, inital step size - starspec = 3e6 # yr, interval to re-calculate the stellar spectrum - starinst = 1e2 # yr, interval to re-calculate the instellation - method = "adaptive" # proportional | adaptive | maximum - - [params.dt.proportional] - propconst = 52.0 # Proportionality constant - - [params.dt.adaptive] - atol = 0.02 # Step size atol - rtol = 0.07 # Step size rtol - - # Termination criteria - # Set enabled=true/false in each section to enable/disable that termination criterion - [params.stop] - - # Require criteria to be satisfied twice before model will exit? - strict = false - - # required number of iterations - [params.stop.iters] - enabled = true - minimum = 5 - maximum = 9000 - - # required time constraints - [params.stop.time] - enabled = true - minimum = 1.0e3 # yr, model will certainly run to t > minimum - maximum = 1.0e10 # yr, model will terminate when t > maximum - - # solidification - [params.stop.solid] - enabled = false - phi_crit = 0.005 # non-dim., model will terminate when global melt fraction < phi_crit - - # radiative equilibrium - [params.stop.radeqm] - enabled = false - atol = 0.1 # absolute tolerance [W m-2] - rtol = 1e-3 # relative tolerance - - [params.stop.escape] - enabled = false - p_stop = 1.0 # bar, model will terminate with p_surf < p_stop - - -# ---------------------------------------------------- -# Star -[star] - - # Physical parameters - mass = 0.806 # M_sun from Lacedelli et al., 2022 - age_ini = 0.100 # Gyr, model initialisation/start age - - module = "mors" - [star.mors] - rot_pcntle = 50.0 # rotation percentile - rot_period = 'none' # rotation period [days] - tracks = "spada" # evolution tracks: spada | baraffe - age_now = 7 # 10 Gyr, current age of star used for scaling from Weiss et al., 2021 - spec = "stellar_spectra/Named/sun.txt" # stellar spectrum - - [star.dummy] - radius = 0.843 # R_sun from Lacedelli et al., 2022 - Teff = 5372.0 # K from Lacedelli et al., 2022 - -# Orbital system -[orbit] - semimajoraxis = 0.0106 # AU from Brinkman et al. 2023 - eccentricity = 0.0 # dimensionless from Brinkman et al. 2023 - zenith_angle = 48.19 # degrees - s0_factor = 0.375 # dimensionless - - module = "none" - - [orbit.dummy] - H_tide = 1e-11 # Fixed tidal power density [W kg-1] - Phi_tide = "<0.3" # Tidal heating applied when inequality locally satisfied - - [orbit.lovepy] - visc_thresh = 1e9 # Minimum viscosity required for heating [Pa s] - -# Planetary structure - physics table -[struct] - #mass_tot = 2.24 # M_earth from Brinkman et al. 2023 - radius_int = 1.37 # R_earth from Brinkman et al. 2023 - corefrac = 0.55 # non-dim., radius fraction 0.20 from Brinkman et al. 2023 - core_density = 10738.33 # Core density [kg m-3] - core_heatcap = 880.0 # Core specific heat capacity [J K-1 kg-1] - -# Atmosphere - physics table -[atmos_clim] - prevent_warming = true # do not allow the planet to heat up - surface_d = 0.01 # m, conductive skin thickness - surface_k = 2.0 # W m-1 K-1, conductive skin thermal conductivity - cloud_enabled = false # enable water cloud radiative effects - cloud_alpha = 0.0 # condensate retention fraction (1 -> fully retained) - surf_state = "fixed" # surface scheme: "mixed_layer" | "fixed" | "skin" - surf_greyalbedo = 0.2 # surface grey albedo - albedo_pl = 0.0 # Enforced Bond albedo (do not use with `rayleigh = true`) from Lacedelli et al. 2022 - rayleigh = true # Enable rayleigh scattering - tmp_minimum = 0.5 # temperature floor on solver - tmp_maximum = 5000.0 # temperature ceiling on solver - - module = "janus" # Which atmosphere module to use - - [atmos_clim.agni] - p_top = 1.0e-5 # bar, top of atmosphere grid pressure - spectral_group = "Honeyside" # which gas opacities to include - spectral_bands = "256" # how many spectral bands? - num_levels = 60 # Number of atmospheric grid levels - chemistry = "none" # "none" | "eq" - surf_material = "greybody" # surface material file for scattering - solve_energy = true # solve for energy-conserving atmosphere profile - solution_atol = 1e-2 # solver absolute tolerance - solution_rtol = 5e-2 # solver relative tolerance - overlap_method = "ee" # gas overlap method - condensation = true # volatile condensation - real_gas = true # use real-gas equations of state - - [atmos_clim.janus] - p_top = 1.0e-5 # bar, top of atmosphere grid pressure - p_obs = 1.0e-3 # bar, observed pressure level - spectral_group = "Honeyside" # which gas opacities to include - spectral_bands = "256" # how many spectral bands? - F_atm_bc = 0 # measure outgoing flux at: (0) TOA | (1) Surface - num_levels = 60 # Number of atmospheric grid levels - tropopause = "none" # none | skin | dynamic - overlap_method = "ee" # gas overlap method - - [atmos_clim.dummy] - gamma = 0.7 # atmosphere opacity between 0 and 1 - -# Volatile escape - physics table -[escape] - - module = "zephyrus" # Which escape module to use - reservoir = "bulk" # Escaping reservoir: "bulk", "outgas", "pxuv". - - [escape.zephyrus] - Pxuv = 1e-2 # Pressure at which XUV radiation become opaque in the planetary atmosphere [bar] - efficiency = 1.0 # Escape efficiency factor - tidal = false # Tidal contribution enabled - - [escape.dummy] - rate = 0.0 # Bulk unfractionated escape rate [kg s-1] - -# Interior - physics table -[interior] - grain_size = 0.1 # crystal settling grain size [m] - F_initial = 1e5 # Initial heat flux guess [W m-2] - radiogenic_heat = true # enable radiogenic heat production - tidal_heat = false # enable tidal heat production - rheo_phi_loc = 0.4 # Centre of rheological transition - rheo_phi_wid = 0.15 # Width of rheological transition - bulk_modulus = 260e9 # Bulk modulus [Pa] - - module = "spider" # Which interior module to use - - [interior.spider] - num_levels = 220 # Number of SPIDER grid levels - mixing_length = 2 # Mixing length parameterization - tolerance = 1.0e-10 # solver tolerance - tolerance_rel = 1.0e-8 # relative solver tolerance - solver_type = "bdf" # SUNDIALS solver method - tsurf_atol = 20.0 # tsurf_poststep_change - tsurf_rtol = 0.01 # tsurf_poststep_change_frac - ini_entropy = 3300.0 # Surface entropy conditions [J K-1 kg-1] - ini_dsdr = -4.698e-6 # Interior entropy gradient [J K-1 kg-1 m-1] - - [interior.aragog] - num_levels = 220 # Number of Aragog grid levels - tolerance = 1.0e-10 # solver tolerance - ini_tmagma = 3200.0 # Initial magma surface temperature [K] - - [interior.dummy] - ini_tmagma = 3500.0 # Initial magma surface temperature [K] - -# Outgassing - physics table -[outgas] - fO2_shift_IW = 0 # log10(ΔIW), atmosphere/interior boundary oxidation state - - module = "calliope" # Which outgassing module to use - - [outgas.calliope] - include_H2O = true # Include H2O compound - include_CO2 = true # Include CO2 compound - include_N2 = true # Include N2 compound - include_S2 = true # Include S2 compound - include_SO2 = true # Include SO2 compound - include_H2S = true # Include H2S compound - include_NH3 = true # Include NH3 compound - include_H2 = true # Include H2 compound - include_CH4 = true # Include CH4 compound - include_CO = true # Include CO compound - T_floor = 2310.0 # Temperature floor applied to outgassing calculation [K]. - - [outgas.atmodeller] - some_parameter = "some_value" - -# Volatile delivery - physics table -[delivery] - - # Radionuclide parameters - radio_tref = 4.55 # Reference age for concentrations [Gyr] 4.55 before need to discuss w Tim - radio_K = 310.0 # ppmw of potassium (all isotopes) - radio_U = 0.031 # ppmw of uranium (all isotopes) - radio_Th = 0.124 # ppmw of thorium (all isotopes) - - # Which initial inventory to use? - initial = 'elements' # "elements" | "volatiles" - - # No module for accretion as of yet - module = "none" - - # Set initial volatile inventory by planetary element abundances - [delivery.elements] - # H_oceans = 0.0 # Hydrogen inventory in units of equivalent Earth oceans - H_ppmw = 109.0 # Hydrogen inventory in ppmw relative to mantle mass - - # CH_ratio = 1.0 # C/H mass ratio in mantle/atmosphere system - C_ppmw = 109.0 # Carbon inventory in ppmw relative to mantle mass - - # NH_ratio = 0.018 # N/H mass ratio in mantle/atmosphere system - N_ppmw = 20.1 # Nitrogen inventory in ppmw relative to mantle mass - - # SH_ratio = 2.16 # S/H mass ratio in mantle/atmosphere system - S_ppmw = 235.0 # Sulfur inventory in ppmw relative to mantle mass - - # Set initial volatile inventory by partial pressures in atmosphere - [delivery.volatiles] - H2O = 0.0 # partial pressure of H2O - CO2 = 0.0 # partial pressure of CO2 - N2 = 0.0 # etc - S2 = 0.0 - SO2 = 0.0 - H2S = 0.0 - NH3 = 0.0 - H2 = 0.0 - CH4 = 0.0 - CO = 0.0 - -# Atmospheric chemistry postprocessing -[atmos_chem] - - module = "vulcan" # Atmospheric chemistry module - when = "manually" # When to run chemistry (manually, offline, online) - - # Physics flags - photo_on = true # Enable photochemistry - Kzz_on = true # Enable eddy diffusion - Kzz_const = "none" # Constant eddy diffusion coefficient (none => use profile) - moldiff_on = true # Enable molecular diffusion in the atmosphere - updraft_const = 0.0 # Set constant updraft velocity - - # Vulcan-specific atmospheric chemistry parameters - [atmos_chem.vulcan] - clip_fl = 1e-20 # Floor on stellar spectrum [erg s-1 cm-2 nm-1] - clip_vmr = 1e-10 # Neglect species with vmr < clip_vmr - make_funs = true # Generate reaction network functions - ini_mix = "profile" # Initial mixing ratios (profile, outgas) - fix_surf = false # Fixed surface mixing ratios - network = "SNCHO" # Class of chemical network to use (CHO, NCHO, SNCHO) - save_frames = true # Plot frames during iterations - yconv_cri = 0.05 # Convergence criterion, value of mixing ratios - slope_cri = 0.0001 # Convergence criterion, rate of change of mixing ratios - -# Calculate simulated observations -[observe] - - # Module with which to calculate the synthetic observables - synthesis = "none" From 71123430dab336a4c1754616ebbf5307b6dbb23b Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Fri, 18 Apr 2025 20:26:02 +0200 Subject: [PATCH 005/105] update --- input/planets/toi561b.toml | 294 +++++++++++++++++++++++++++++++++++++ 1 file changed, 294 insertions(+) create mode 100644 input/planets/toi561b.toml diff --git a/input/planets/toi561b.toml b/input/planets/toi561b.toml new file mode 100644 index 000000000..d186cd992 --- /dev/null +++ b/input/planets/toi561b.toml @@ -0,0 +1,294 @@ +# PROTEUS configuration file (version 2.0) + +version = "2.0" +author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" + +# ---------------------------------------------------- +# Parameters +[params] + # output files + [params.out] + path = "scratch/toi_561b_tests/toi561b_test_escape_1.5Gyr_bug_branch_stop_escape_true" + logging = "DEBUG" + plot_mod = 5 # Plotting frequency, 0: wait until completion | n: every n iterations + plot_fmt = "png" # Plotting image file format, "png" or "pdf" recommended + write_mod = 1 # Write CSV frequency, 0: wait until completion | n: every n iterations + + # time-stepping + [params.dt] + minimum = 3e2 # yr, minimum time-step + minimum_rel = 1e-5 # relative minimum time-step [dimensionless] + maximum = 1e11 # yr, maximum time-step + initial = 1e3 # yr, inital step size + starspec = 3e6 # yr, interval to re-calculate the stellar spectrum + starinst = 1e2 # yr, interval to re-calculate the instellation + method = "adaptive" # proportional | adaptive | maximum + + [params.dt.proportional] + propconst = 52.0 # Proportionality constant + + [params.dt.adaptive] + atol = 0.02 # Step size atol + rtol = 0.07 # Step size rtol + + # Termination criteria + # Set enabled=true/false in each section to enable/disable that termination criterion + [params.stop] + + # Require criteria to be satisfied twice before model will exit? + strict = false + + # required number of iterations + [params.stop.iters] + enabled = true + minimum = 5 + maximum = 9000 + + # required time constraints + [params.stop.time] + enabled = true + minimum = 1.0e3 # yr, model will certainly run to t > minimum + maximum = 1.0e10 # yr, model will terminate when t > maximum + + # solidification + [params.stop.solid] + enabled = false + phi_crit = 0.005 # non-dim., model will terminate when global melt fraction < phi_crit + + # radiative equilibrium + [params.stop.radeqm] + enabled = false + atol = 0.1 # absolute tolerance [W m-2] + rtol = 1e-3 # relative tolerance + + [params.stop.escape] + enabled = true + p_stop = 1.0 # bar, model will terminate with p_surf < p_stop + + +# ---------------------------------------------------- +# Star +[star] + + # Physical parameters + mass = 0.806 # M_sun from Lacedelli et al., 2022 + age_ini = 0.100 # Gyr, model initialisation/start age + + module = "mors" + [star.mors] + rot_pcntle = 50.0 # rotation percentile + rot_period = 'none' # rotation period [days] + tracks = "spada" # evolution tracks: spada | baraffe + age_now = 10 # 10 Gyr, current age of star used for scaling from Weiss et al., 2021 + spec = "stellar_spectra/Named/sun.txt" # stellar spectrum + + [star.dummy] + radius = 0.843 # R_sun from Lacedelli et al., 2022 + Teff = 5372.0 # K from Lacedelli et al., 2022 + +# Orbital system +[orbit] + semimajoraxis = 0.0106 # AU from Brinkman et al. 2023 + eccentricity = 0.0 # dimensionless from Brinkman et al. 2023 + zenith_angle = 48.19 # degrees + s0_factor = 0.375 # dimensionless + + module = "none" + + [orbit.dummy] + H_tide = 1e-11 # Fixed tidal power density [W kg-1] + Phi_tide = "<0.3" # Tidal heating applied when inequality locally satisfied + + [orbit.lovepy] + visc_thresh = 1e9 # Minimum viscosity required for heating [Pa s] + +# Planetary structure - physics table +[struct] + #mass_tot = 2.24 # M_earth from Brinkman et al. 2023 + radius_int = 1.37 # R_earth from Brinkman et al. 2023 + corefrac = 0.55 # non-dim., radius fraction 0.20 from Brinkman et al. 2023 + core_density = 10738.33 # Core density [kg m-3] + core_heatcap = 880.0 # Core specific heat capacity [J K-1 kg-1] + +# Atmosphere - physics table +[atmos_clim] + prevent_warming = true # do not allow the planet to heat up + surface_d = 0.01 # m, conductive skin thickness + surface_k = 2.0 # W m-1 K-1, conductive skin thermal conductivity + cloud_enabled = false # enable water cloud radiative effects + cloud_alpha = 0.0 # condensate retention fraction (1 -> fully retained) + surf_state = "fixed" # surface scheme: "mixed_layer" | "fixed" | "skin" + surf_greyalbedo = 0.2 # surface grey albedo + albedo_pl = 0.0 # Enforced Bond albedo (do not use with `rayleigh = true`) from Lacedelli et al. 2022 + rayleigh = true # Enable rayleigh scattering + tmp_minimum = 0.5 # temperature floor on solver + tmp_maximum = 5000.0 # temperature ceiling on solver + + module = "janus" # Which atmosphere module to use + + [atmos_clim.agni] + p_top = 1.0e-5 # bar, top of atmosphere grid pressure + spectral_group = "Honeyside" # which gas opacities to include + spectral_bands = "256" # how many spectral bands? + num_levels = 60 # Number of atmospheric grid levels + chemistry = "none" # "none" | "eq" + surf_material = "greybody" # surface material file for scattering + solve_energy = true # solve for energy-conserving atmosphere profile + solution_atol = 1e-2 # solver absolute tolerance + solution_rtol = 5e-2 # solver relative tolerance + overlap_method = "ee" # gas overlap method + condensation = true # volatile condensation + real_gas = true # use real-gas equations of state + + [atmos_clim.janus] + p_top = 1.0e-5 # bar, top of atmosphere grid pressure + p_obs = 1.0e-3 # bar, observed pressure level + spectral_group = "Honeyside" # which gas opacities to include + spectral_bands = "256" # how many spectral bands? + F_atm_bc = 0 # measure outgoing flux at: (0) TOA | (1) Surface + num_levels = 60 # Number of atmospheric grid levels + tropopause = "none" # none | skin | dynamic + overlap_method = "ee" # gas overlap method + + [atmos_clim.dummy] + gamma = 0.7 # atmosphere opacity between 0 and 1 + +# Volatile escape - physics table +[escape] + + module = "zephyrus" # Which escape module to use + reservoir = "bulk" # Escaping reservoir: "bulk", "outgas", "pxuv". + + [escape.zephyrus] + Pxuv = 1e-2 # Pressure at which XUV radiation become opaque in the planetary atmosphere [bar] + efficiency = 1.0 # Escape efficiency factor + tidal = false # Tidal contribution enabled + + [escape.dummy] + rate = 0.0 # Bulk unfractionated escape rate [kg s-1] + +# Interior - physics table +[interior] + grain_size = 0.1 # crystal settling grain size [m] + F_initial = 1e5 # Initial heat flux guess [W m-2] + radiogenic_heat = true # enable radiogenic heat production + tidal_heat = false # enable tidal heat production + rheo_phi_loc = 0.4 # Centre of rheological transition + rheo_phi_wid = 0.15 # Width of rheological transition + bulk_modulus = 260e9 # Bulk modulus [Pa] + + module = "spider" # Which interior module to use + + [interior.spider] + num_levels = 220 # Number of SPIDER grid levels + mixing_length = 2 # Mixing length parameterization + tolerance = 1.0e-10 # solver tolerance + tolerance_rel = 1.0e-8 # relative solver tolerance + solver_type = "bdf" # SUNDIALS solver method + tsurf_atol = 20.0 # tsurf_poststep_change + tsurf_rtol = 0.01 # tsurf_poststep_change_frac + ini_entropy = 3300.0 # Surface entropy conditions [J K-1 kg-1] + ini_dsdr = -4.698e-6 # Interior entropy gradient [J K-1 kg-1 m-1] + + [interior.aragog] + num_levels = 220 # Number of Aragog grid levels + tolerance = 1.0e-10 # solver tolerance + ini_tmagma = 3200.0 # Initial magma surface temperature [K] + + [interior.dummy] + ini_tmagma = 3500.0 # Initial magma surface temperature [K] + +# Outgassing - physics table +[outgas] + fO2_shift_IW = 0 # log10(ΔIW), atmosphere/interior boundary oxidation state + + module = "calliope" # Which outgassing module to use + + [outgas.calliope] + include_H2O = true # Include H2O compound + include_CO2 = true # Include CO2 compound + include_N2 = true # Include N2 compound + include_S2 = true # Include S2 compound + include_SO2 = true # Include SO2 compound + include_H2S = true # Include H2S compound + include_NH3 = true # Include NH3 compound + include_H2 = true # Include H2 compound + include_CH4 = true # Include CH4 compound + include_CO = true # Include CO compound + T_floor = 2310.0 # Temperature floor applied to outgassing calculation [K]. + + [outgas.atmodeller] + some_parameter = "some_value" + +# Volatile delivery - physics table +[delivery] + + # Radionuclide parameters + radio_tref = 4.55 # Reference age for concentrations [Gyr] 4.55 before need to discuss w Tim + radio_K = 310.0 # ppmw of potassium (all isotopes) + radio_U = 0.031 # ppmw of uranium (all isotopes) + radio_Th = 0.124 # ppmw of thorium (all isotopes) + + # Which initial inventory to use? + initial = 'elements' # "elements" | "volatiles" + + # No module for accretion as of yet + module = "none" + + # Set initial volatile inventory by planetary element abundances + [delivery.elements] + # H_oceans = 0.0 # Hydrogen inventory in units of equivalent Earth oceans + H_ppmw = 109.0 # Hydrogen inventory in ppmw relative to mantle mass + + # CH_ratio = 1.0 # C/H mass ratio in mantle/atmosphere system + C_ppmw = 109.0 # Carbon inventory in ppmw relative to mantle mass + + # NH_ratio = 0.018 # N/H mass ratio in mantle/atmosphere system + N_ppmw = 20.1 # Nitrogen inventory in ppmw relative to mantle mass + + # SH_ratio = 2.16 # S/H mass ratio in mantle/atmosphere system + S_ppmw = 235.0 # Sulfur inventory in ppmw relative to mantle mass + + # Set initial volatile inventory by partial pressures in atmosphere + [delivery.volatiles] + H2O = 0.0 # partial pressure of H2O + CO2 = 0.0 # partial pressure of CO2 + N2 = 0.0 # etc + S2 = 0.0 + SO2 = 0.0 + H2S = 0.0 + NH3 = 0.0 + H2 = 0.0 + CH4 = 0.0 + CO = 0.0 + +# Atmospheric chemistry postprocessing +[atmos_chem] + + module = "vulcan" # Atmospheric chemistry module + when = "manually" # When to run chemistry (manually, offline, online) + + # Physics flags + photo_on = true # Enable photochemistry + Kzz_on = true # Enable eddy diffusion + Kzz_const = "none" # Constant eddy diffusion coefficient (none => use profile) + moldiff_on = true # Enable molecular diffusion in the atmosphere + updraft_const = 0.0 # Set constant updraft velocity + + # Vulcan-specific atmospheric chemistry parameters + [atmos_chem.vulcan] + clip_fl = 1e-20 # Floor on stellar spectrum [erg s-1 cm-2 nm-1] + clip_vmr = 1e-10 # Neglect species with vmr < clip_vmr + make_funs = true # Generate reaction network functions + ini_mix = "profile" # Initial mixing ratios (profile, outgas) + fix_surf = false # Fixed surface mixing ratios + network = "SNCHO" # Class of chemical network to use (CHO, NCHO, SNCHO) + save_frames = true # Plot frames during iterations + yconv_cri = 0.05 # Convergence criterion, value of mixing ratios + slope_cri = 0.0001 # Convergence criterion, rate of change of mixing ratios + +# Calculate simulated observations +[observe] + + # Module with which to calculate the synthetic observables + synthesis = "none" From f426496fecd9c91fd2d8c91611add73ae729295d Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Fri, 18 Apr 2025 20:30:27 +0200 Subject: [PATCH 006/105] Add the post_processing_grid.py script and create a new folder to test it --- post_processing_grids/post_processing_grid.py | 316 ++++++++++++++++++ 1 file changed, 316 insertions(+) create mode 100644 post_processing_grids/post_processing_grid.py diff --git a/post_processing_grids/post_processing_grid.py b/post_processing_grids/post_processing_grid.py new file mode 100644 index 000000000..17d0aee5a --- /dev/null +++ b/post_processing_grids/post_processing_grid.py @@ -0,0 +1,316 @@ +import os +from pathlib import Path +import pandas as pd +import toml +import seaborn as sns +import matplotlib.pyplot as plt +import re + +def load_grid_cases(grid_dir: Path): + """ + Load information for each simulation of a PROTEUS grid. + + Read runtime_helpfile.csv, init_parameters (from toml file), and status + files for each simulation of the grid. + + Parameters + ---------- + grid_dir : Path + Path to the grid directory containing the case_* folders + + Returns + ---------- + combined_data : list + List of dictionaries, each containing: + - 'init_parameters' (dict): + Parameters loaded from init_coupler.toml + - 'output_values' (pandas.DataFrame): + Data from runtime_helpfile.csv + - 'status' (str or None): + Status string from status file (if available) + """ + combined_data = [] + grid_dir = Path(grid_dir) + + # Load all cases from the grid + for case in grid_dir.glob('case_*'): + runtime_file = case / 'runtime_helpfile.csv' + init_file = case / 'init_coupler.toml' + status_file = case / 'status' + + if runtime_file.exists() and init_file.exists(): + try: + df = pd.read_csv(runtime_file, sep='\t') + init_params = toml.load(open(init_file)) + + # Read status file if it exists, otherwise set to None + status = None + if status_file.exists(): + with open(status_file, 'r') as sf: + status = sf.read().replace('\n', ' ').strip() + + combined_data.append({ + 'init_parameters': init_params, + 'output_values': df, + 'status': status + }) + + except Exception as e: + print(f"Error processing {case.name}: {e}") + + statuses = [str(case.get('status', 'unknown')).strip() or 'unknown' for case in combined_data] + status_counts = pd.Series(statuses).value_counts().sort_values(ascending=False) + print('-----------------------------------------------------------') + print(f"Total number of simulations: {len(statuses)}") + print('-----------------------------------------------------------') + print("Number of simulations per status:") + for status, count in status_counts.items(): + print(f" - '{status}': {count}") + print('-----------------------------------------------------------') + return combined_data + +def plot_grid_status(cases_data, plot_dir: Path, status_colors: dict = None): + """ + Plot the status of simulation from the PROTEUS grid. + + Parameters + ---------- + cases_data : list + List of dictionaries containing the status of all simulation from the grid. + + plot_dir : Path + Path to the plots directory + + status_colors : dict, optional + A dictionary mapping statuses to specific colors. If None, a default palette is used. + + Returns + ------- + Plot saved to the specified directory. + """ + + # Extract and clean statuses + statuses = [case.get('status', 'unknown') or 'unknown' for case in cases_data] + status_counts = pd.Series(statuses).value_counts().sort_values(ascending=False) + # print("Unique statuses found:", pd.Series(statuses).unique()) + + # Set colors for the bars + if status_colors: + palette = {str(status): status_colors.get(str(status), 'gray') for status in status_counts.index} + else: + palette = sns.color_palette("Accent", len(status_counts)) + palette = dict(zip(status_counts.index, palette)) + + # Prepare dataframe for plotting + plot_df = pd.DataFrame({ + 'Status': status_counts.index, + 'Count': status_counts.values + }) + + #sns.set(style="white") + plt.figure(figsize=(10, 7)) + ax = sns.barplot( + data=plot_df, + x='Status', + y='Count', + hue='Status', # required to apply the palette + palette=palette, + dodge=False, + edgecolor='black' # edge color added here + ) + + # Remove legend if it was created + if ax.legend_: + ax.legend_.remove() + + # Add text on top of bars + total_simulations = len(cases_data) + for i, count in enumerate(status_counts.values): + percentage = (count / total_simulations) * 100 + ax.text( + i, count + 1, + f"{count} ({percentage:.1f}%)", + ha='center', va='bottom', fontsize=10 + ) + + + plt.title(f"Total number of simulations: {total_simulations}", fontsize=16) + plt.xlabel("Simulation status", fontsize=16) + plt.ylabel("Number of simulations", fontsize=16) + plt.yticks(fontsize=12) + plt.xticks(fontsize=12) + plt.tight_layout() + output_path = plot_dir+'simulations_status_summary.png' + plt.savefig(output_path, dpi=300) + plt.close() + +def get_grid_parameters(grid_dir: str): + """ + Extracts grid parameters names and values from the manager.log file + + Parameters + ---------- + grid_dir : str + Path to the grid directory + + Returns + ------- + dict + A dictionary where each key is the parameter name and the value is a list of parameter values. + """ + # Dictionary to store the parameters and their values + parameters_dict = {} + + # Open and read the log file + log_file = os.path.join(grid_dir, 'manager.log') + with open(log_file, 'r') as file: + lines = file.readlines() + + # Regular expression patterns to find the relevant lines + param_pattern = re.compile(r'-- dimension: (.+)') + value_pattern = re.compile(r'values\s+:\s+\[([^\]]+)\]') + + current_dimension = None + + for line in lines: + # Look for dimension lines to start processing the parameters + dimension_match = param_pattern.search(line) + if dimension_match: + current_dimension = dimension_match.group(1).strip() + + # If we have found a dimension, look for the values line + if current_dimension and 'values' in line: + value_match = value_pattern.search(line) + if value_match: + # Extract the values from the line and convert them into a list of floats or strings + values_str = value_match.group(1).strip() + values = [eval(value.strip()) for value in values_str.split(',')] + + # Store the values under the current dimension + parameters_dict[current_dimension] = values + current_dimension = None # Reset current dimension after processing + + # Print the extracted parameters + print('-----------------------------------------------------------') + print("Extracted Parameters:") + print("-----------------------------------------------------------") + for param, values in parameters_dict.items(): + print(f"{param}: {values}") + print("-----------------------------------------------------------") + return parameters_dict + +def extract_grid_output(cases_data, parameter_name): + """ + Extract a specific parameter from the 'output_values' of each simulation case. + + Parameters + ---------- + cases_data : list + List of dictionaries containing simulation data. + + parameter_name : str + The name of the parameter to extract from 'output_values'. + + Returns + ------- + parameter_values : list + A list containing the extracted values of the specified parameter for all cases. + """ + parameter_values = [] + columns_printed = False # Flag to print columns only once + + for case in cases_data: + df = case['output_values'] + + # Check if the parameter exists in the output dataframe + if parameter_name in df.columns: + # Extract the last value of the parameter from the last row + parameter_value = df[parameter_name].iloc[-1] + parameter_values.append(parameter_value) + else: + if not columns_printed: + print(f"Warning: Parameter '{parameter_name}' does not exist in case '{case['init_parameters'].get('name', 'Unknown')}'") + print(f"Available columns in this case: {', '.join(df.columns)}") + columns_printed = True + + # Print the extracted values + print('-----------------------------------------------------------') + print(f"Extracted output (at last time step) : {parameter_name} ") + print('-----------------------------------------------------------') + + return parameter_values + +def extract_solidification_time(cases_data, phi_crit): + """ + Extract the solidification time for planet that reach Phi_global < phi_crit. + + Parameters + ---------- + cases_data : list + List of dictionaries containing simulation data. + + phi_crit : float + The critical value of melt fraction at which a planet is considered solidified. + + Returns + ------- + solidification_times : list + A list containing the solidification times for all solidified planets of the grid. + If a planet never solidifies, it will not be included in the list. + """ + solidification_times = [] + columns_printed = False + + for i, case in enumerate(cases_data): + df = case['output_values'] + + if 'Phi_global' in df.columns and 'Time' in df.columns: + condition = df['Phi_global'] < phi_crit + if condition.any(): + first_index = condition.idxmax() # gives the first True index + solid_time = df.loc[first_index, 'Time'] + solidification_times.append(solid_time) + print(f"[Case {i}] Solidification at index {first_index} → Time = {solid_time}, Phi = {df.loc[first_index, 'Phi_global']}") + + else: + if not columns_printed: + print("Warning: 'Phi_global' and/or 'Time' columns not found in some cases.") + print(f"Available columns: {', '.join(df.columns)}") + columns_printed = True + + print('-----------------------------------------------------------') + print(f"Extracted solidification times ") + print('at timesteps when Phi_global < {phi_crit}') + print('-----------------------------------------------------------') + print(len(solidification_times)) + + return solidification_times + + + +if __name__ == '__main__': + + # Paths + grid_path = '/home2/p315557/outputs_Norma2/good_grids/escape_grid_4_params_Pxuv_a_epsilon_fO2/' + plots_path = '/home2/p315557/PROTEUS/post_processing_grids/plots' + + # Plots : True or False + plot=True + + # Load simulation cases + cases_data = load_grid_cases(grid_path) + + # Plot = True or False, if True, it will plot the status of all grid simulations + if plot: + plot_grid_status(cases_data, plots_path) + + # Extract grid parameters + grid_parameters = get_grid_parameters(grid_path) + + # List of output to extract + output_to_extract = ['esc_rate_total', 'Phi_global', 'P_surf', 'atm_kg_per_mol'] + for param in output_to_extract: + extracted_values = extract_grid_output(cases_data, param) + # Extract the solidification time + phi_crit = 0.005 # Critical melt fraction for solidification + solidification_times = extract_solidification_time(cases_data, phi_crit) \ No newline at end of file From 22e7fd9ebb7e2fc563d82bc1807e37c98dd1060b Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Fri, 18 Apr 2025 20:32:30 +0200 Subject: [PATCH 007/105] Add histo plot --- .../simulations_status_summary.png | Bin 0 -> 153237 bytes post_processing_grids/post_processing_grid.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 post_processing_grids/plots/escape_grid_4_params_Pxuv_a_epsilon_fO2/simulations_status_summary.png diff --git a/post_processing_grids/plots/escape_grid_4_params_Pxuv_a_epsilon_fO2/simulations_status_summary.png b/post_processing_grids/plots/escape_grid_4_params_Pxuv_a_epsilon_fO2/simulations_status_summary.png new file mode 100644 index 0000000000000000000000000000000000000000..e19cacd4d0746219453882a0a0b2c28b90686657 GIT binary patch literal 153237 zcmeFZcU09^*ELE^OrA)h0RbzZC?Y5jRHTbBAOZpcD$*mUNNCbK$C$@RN2w}Jkz%0; zQU$>n4o6WTf&x;NA|TBJNH5=9C;Gha_uV`08269+#~tHkj6_j6=XdtrYpuEFoNEUi z($)BW+1h1XTwLF49#Avj;^Hpm;`-OFe|?QV5sy&Pz%Pn!>c`v+old!V9(Os(rE}cv zjJ=bay{**-kCQI0woZ;arDT4TlG(oDw42)*S4C-QhyVNwDJK^jX;-I(9=yu8XAT&< za&bv*qW`||N>laV`htr~Q|+h2UI~M3o|mIvP0J74#I=9buP%P>=Gn97zE_u7om;e; zd-vVA1EG>0l}%PbviiA3q5RzYb6Q&|NU?O z=iC3jR{yga|2M0#Ce0|v;!;dZ%n1A0&e~}AUoU#pr|W3X1UR*qc^hYZeSWw5`ma)B>zKW` z@#kvA0?CG*waQWBvO}-#7~$3M0DG@SOMg1jXC)gOEg6FN&;Pr%ySBQxxKt|&a$WmQ z(z0AxK-yycwxgMWLxn9;g&hI2H_b|1j5C&UaXl2+&pG6WMDRYVR&F`Cbzs--JLaVx zCli-&+2b65VmG1HY|x}o^# z*kf~nfy?+MDy2#X4N~mziR_>96@Q6eMXee#jdN#*OXJ+v#>X7kT&=j2t4ZX+SdEhN za7pj|*d=>q#)sRK{#Y(((m9Z6S|n8+CvD;VFm%n%&VojZkUxL_hV|j?U)*MwD}H@- zd+J@hQ)j$WdCIUxdpQ&7!9kI+zh*TK=`)BK!@qXKgSyP>>yMnIl zw2AFoi>GMWv2+-}aIN{}&BNbh99xV1=2?QCW^RK`xi81QIICk^=H;B={q9O|%Wr1G zHpd6@PdthA!t-_Z#94;x-`Jnl(_9fSKjYOCW7HXAWD(RAt|TSIm}B2GEpl9&hYk3} z?)pPPyupSqzgsW=W0L>GYrpyD@p48#e6_dAdoY(}#qMNkvOJn?GvvEMcc!@Z*68B} zZ<-YRT5eq(v(4zynk8w^=eOG@{;@(>$bV*BJa_8LZ z+j{03`XpS-dTwPTX*DxiomtGYUwoP3(o>z5z{7PnZDzVRx$EYQ@ZePzUK@4yOn2w$ z;wR32ca8IaTwGi4PLK7nDuY&zeKg8ECNncRl2a_mRk-oh?IR%_^TVDm(zN6HUfwbs z%&*sdaqXwyZ({qmTa?Madmg_knX%ygoshD;h3``m8yvmDPD9f%*&5V>?fJE6Zmqjx~H@7nKHk1uXyhOgk_t662~cPYQZt}$Cb z$KruO&8ep^vP_HeTi5+rn2qpzamOepPxni$){NwUwQcv^w+-xyynX2UzV*I?xz02F z8LYP#goXIG9X;q{mZ<2SqMN9g+u~TNj+g4JQ5p%olYL*p!n-FXm6c;&x?5Luew+?Z z%KosmwzaX`1rHKX0`?&bsF8Q=F6PD+sA9|Jex{+q%@^>38urPCdVB8{%89Bd300xnN|t zq;cNnc05&n7Nvz)jsHAKkXlNnAs3gHCNkNLet z+w6*yJtFGlMIOA=m4B-6-RGti0sem2FqspNFHVgNX4#G#?5P|bGHt(KY zk@1`QFg4NQ)HOR&5fQXftO|c+)nAvEi%__USATg^f78`nw!HMDHHk{BjThGQ;_zsk zIC+xa=YhkMXmP{N=W-pPJe&4c%60gy9P6!(z!F#CC54eFLj*lrWtJ;+UzG0*+?1jd zzx!mOW7TYbMsjCXQG4+9pD$Dubp)(aU7T~>Zd$ng_TgK+?gI^B;ety0iyYfJ5j-J^ z?(g{&7n*XdyTat!17)nMDknP_!rZH*Lh&~H>9ZK$JC)m;b%nnV>jn#5K-coLX%3%I&DM*48lc%wXbHkn$AuDy|H+moi7(92F7c}2yj`$>q>0+>#8B6@-rg)WMtWLPa10Q~c9JkdwPaOd;G7{*xp@E?aGQCgR5tUiY!rV@(#L6FYB$zYMh%M zvu?V3oQ;j*tR8AxAT35+nzl8fLjU@{^EdI1h@`D1h4yQFhf8eM`+fK`7u8X{#HA;= zz$3k_Hum4b0aF{lULmx8lSX(b9Tn6UEBtt_W83|kh`5(orgD?+LnxkZBkd{@hi@KA zS76M4Y?L|k=HY7DUl03Dwt43M^5?l@_fF}&%r;kg-jKrb> zSIP56c*{O)c<6eC9UV;wb1l`4h=m;*OYr{ywL=A! z3Y)wqhFauo=a7j6lqPZvR4zmYkIheZh%J1$pt?El-aYPdlcTo}i#wy*ZHbk(w78~B zXHgCLW!YTmudT(-oh7|V+s4P*nouU9s+_QW<<{S<5K3tB?C|GKu*M?riY0kjS=FU# zGHYC2@yLmle|{ru_NJpYCjy6YbIqRF!OHF@QT34;KXvF?`hPsqS-Dd0Aku;3J%w_l zHQOgy^40HsV%{ehwQ3kA)*2zX6c1#V@|F(Yx62*Pv8)K_M844Wn`5)i`!MKMCq6QRZha+uibT^^~_)22jL{~ ziASiV3=Z2MhxezB(K?E##&tB`J^r)R!?RxmA#toyoJ1sV&Mq6Ru0#OTM`|BW8UT{x z{YJ>^m+PY{Z~wiL*D(E{rgnCP#DMEi?AEs4Aj#+7iX~hepGJkveJo=*@A>arh8gM7 z&Nv2IhduCrE_-WqH54|v^}0$-PBKnu`d4||72+%dymvXa z$}d+L|J#eu1Qu33Toxd-`2FoWDabo)glD?UQ%NI{OMw7L3W}b?>qpAQbT{(`EvXgU zapHj6`*{BqS_(jy$B(>RPM9Nk$Q>3ztWWk$71_ zVo9qhErklJmOoT@o}{8wqdfLZ9!qPD!?ZNXh!52}Y^+vQm7Z+1ijUiGy$(%xHxfKJ z-I#5zJoV;x(a08HMJqtdpc@f;6gfw8F(3fx=(gAUTN#vCou5h;RsQk)I<3vMmOb6w zl~i%*?oj5jv3JiySi%hD`<^37FB;2f z|7B-pI~Ii~G;yF+hbtBrre+3mxZRW4rj)5$eFm~#+&*%L-nlNJKh|z;ik&;z?#lv} zGD&rPcWx1P5zNZs#Ck;jLjRCNC8Y}GoaO2^V60lhub zCrn&k3)B|yS*B^lL{6;wI0rcYu;HJs>aLD)iOb0Zw>nK{Hggp#@8r{9!I3*T<-tV2_kSYJ}vUO*JsgsUftK{?M0=upV#+>!|K%+D=v1RIN z1qFo{MNS>XGw<)zC$z=(*J(?KJ(aFdMgSV7P7@ZVPmP0|;b(o%rZyZnK+XbrOr&=b z>AK1`Bk`!!Qu%F0*`bvE3BZc5D5cF!*D}Is8?(-PtWX(0GzjR;W-LxJnBfH;L(K$E z)}t=u0u}E%5!Z3weV~(&H}F>0V@7h?laCVqNnV{n@~Au3z*e>qiMN=ZEi)yqb{|o| zSmXWm^=n;Uq^LXBDD+mIUuAJjg4uLs5I8jgKv}GOZuF@Y%LJLs%;WV*X9BdNJx}W| zn!ALok`DiQxx!{4Br%ail`QN8uw9uKN>oUUc6y>+sr!I{v9YnHFtumC;e2g~&!~yU zOyhgeJAvsoME!%73)cXG1#gN_-a#ibzselYHNmsp+Zln7COaIv<%WhM6DphTArs;imdaB|{rBw(`C!0+pmBLs_)Xt#OKpb%M)JeUlpZEMcAUHNgK zF#@d;3&6c|Ag3a{sNH89;C#%Ecl<_XJR0kr)lkKYQtNBw@lzIaLr#pR^>HyV{JtX( zJlDHElX;P@6Mq`<3;4&xF|n=;1+5@1K{qV~bm00pMh%skmhpJS~sL}O? zH};1S9_{-3=M`c2`TY-kJ`!$V*XyY=@xrU9Ndu}E3uFBCy1)>7|C z?LVP1AaEI*nuIB=Z6_r=IyKlg?P+7aaM#{Kv2EM75mHN3@(f3D)j;IBj=bG%-}D1c zWiUGSDjel5giCw(aC3yp#IZpDPihA1e3z_wGgQ>kKIoo{Ufg-~i5LqoVkI^DKu|0M zy%kyu+EaQ7!x}*amlFwoqfa=sap=AP4`t!rZH_tUBHf0XWtXeYeP}}H+`y~pm-|gm zAt6~myDY<|^bY!9D4D{f^!-W0JJk7)U38WmcnY^pOP2*)?wPY_FgZY{Dih2mNq`L0H3cCpdxw4DKuTYAeauc;*5IFY&(4O;?z^i zg~((1r(bO8(!07#Ja?pgHa&E)S9P)E?^*_`hNktaOyiyUh%jY;P)fRGIBr2eCpuMg zDE+)hx7O9lcMji*q>$lGv;(bBhRPAI94F*l$rFU-aoDYkuTB)01A}+}S}tx4Q1bKn z8tEgMM40K7+SaEBAw#?0V^gns8U5__v8~|;D=b8L(_F(fFX$w$;6UIlx(UZ_dZA1n zESsNb339Db5Stu9=i9OU!`$J9V7R9O$9E4K6`N17NG)Z2_-zt_&H*YD=fh<|`%qVHP%eaU z(w-ZzT-3@0&W&;|UzizC>#a$Y^Ij+i^HeuswocA5AtMn4W#8+2HdbJ~;zQ#Ab-j=& zs7ywsIGGx9GWC8y4m5Gk#}^4Z}a1bp|p z+UjbpmpPWI&W|@<*qjY?UWtZo_v|E{@h68kEYOL3H@@Q9%QBlRulG3#Kk%KDIxI7R z9JEqtDBrN!>%w}M1J{#nYz5p=w|5e?BSt-_6_ZH0&db)}n$wEP?8=qu!KI=C#ZGg@ z;10R@Cvj2_&>P!k&U_pYL+566)63N0t>sS^wfG0p6gNv)R8I}7?!WzGor`KW54%F7 zBVy0Y&4;-N%Sh_e>Y9L^CC5i3j{G)T6+z`o#&=?H3Co<``s|m>53I@2h!8qoz4Ow6 z&8KRU67&9b7i?INhLCsa-!j;dc&c=Dmx=j(c4cSAA@;IS6Ct<1e+oMJG?rhnvF@&@ zSq^WIWX3~(G$5K+dr_(M2b-w;wz@Z(2Z4Fu^U}5DD|Xt}TO--87+14o5_n5AT=D(Q z=K-gWyQ1)fl%?dvs9An$tZ0Yd*(qVOPDksiNexuFEK}Q<& z?CufSbMx>mgSuXdLgE{O*DJkZXl$*%fyxzxH0dz$r8&}8s?k3!Ob?cR)zdg>J#_B5 ztSZ7d{1S(^o+$(E$3iaq0X17sap8rAFcTb&WvZF>U&MwG@I&QDJLddrL*|;C@|j=V zunYZDpdf0km>mZ503U%Bz)|O~-g_!lT8Y?+bF&9XmO=G$OD$AKlXfqKvAA$4(fq?= zv^RR_{T%YE4e&nKiWwXjxD^c}(dTFlEZ#J=I24(Wk94T!<>ifr%>#dljuv$wwRtVh zPu@V{Sej%AeB%Y~;Rf0fO_nO4{uBb3JBCG&05G(s(GvO53-8p4W!-+bEHLov@7Gp} zCHoP*R4rEs%7XgSWrD7TRA#+^K~j(nZNuWFvW@z8f{eNR&V>VmTh57`Y=PdLW%rr8pz*)*xCUao06+j%I#nq?QKX z%SyJ}`Qp)e?yh#gav2ACKt#3| zFbi^Uv^!O@majMj`@^F|LlImS6|@7nq>Qsmg=HVh0Xa>1x;sjoRj>cIzJW>p=}U#j zK;MP}_U=Xzjpk5?n%owJrwGc;^++?s>spgkeD|a5M1}jK+A}rnLWuq&_|L5(t)!$x z`R2LMWMjpG>~iG~5BIS;>~bEYAYwIHAbZUEyyELC7>h(NUGH^3nPQ_0O5c9w0rD94 z`T^jI5ac|C{w8e58Vy1B!`H`VUwO1Rtdg#nbE$J><2)ys9yT>Y=?}I^^4?+{oVNR& zPmBd1wO%u$Ge~SdprE!wd|gj#$w#eT6*C|&>gY99xpqP+nwsj0s4qO4-u!ZU1R)?< zcj2dd_wIE8%!LuP*gu7b6##J{?$&yk(_n%~2;MbWy)7pc9mN*GZG9sQ(>;x+?ql)Q zQFd==$4SYcNzBs~O9nhqq0TPV8h;svV{|)uXLA{-Q6_Vh<$C0&6WfjRQZwt(1U0?? zhes~_Rl4lMKI&%sn~>5c2~b?J7IuHB@7#3!f~ep+MYq!?Ax1llfylbBgF1>6;Duvd zcApf(QVaR?UAJN$D42bR2oB-*)OJU-tn$tnB6tq+Gt#9D6udK-P6641slS95F6J}9 z6sd<_c{ajWoVDVQ@EmNC@tGV>bQ?sR>pe?UD%$HT1gh@wlP6CaE6_{wPyZ}f@d`Xi zs#3cYAXd9qSC|zmB9MRsFR-buc4p)oEI+~Dx&@?|I7g9_4@;uX-6m)xQkOL^ZK{&cY17 z^s7w^s2@bs)}w=xI_NuskC-7G3#h_Wv%$^CNg$_nPZ5jPy!gt(S|MDw|4wCaKz{2 z$&;c(mWB4l;x-2N6O_a=9X<*T6Cw2dy4{-vOZ(H~g9R76qx8aeJ2Y<=)NWLm1dk@w za3NX5pu<-n|Dj4$ZNblfd>5o9a%uch|I#TUzY(rgU?l}dDi#9{CMhcrNG65|tUhTT zR^xpy*kCj^QSY4Ek^a$?NQ&XM(p6Kxhl$z_H5YI|J(5vWIB1SN+2u3u^K&?A!$|vs zuHU{93IyY_IRKpBvAe(gL~0=^q6ncNRC^}C6?*CMronmqE?^zRl+(@Sw*P|mHTP*_ zvWPjxY3xX~&PenysXiC_s0l%6Ma5Zh0x`<3v=D2BqM+Bj2)GyRvgKrNw4tuJM`)Wz zizJe3at;ISjd;l3!lP)f{7`1EACvKDb8n2czU8r9@+JuO&5~P8K)W{w7=gqO2Z?mI z$(i~cYD4RC&n09)80JMJKX0@|k(Hn6yAhJuku!nwb`vaI`GTJ6e122z3i&r@(J-s$ zc180w-Wk6G1Xs7<=J$(8l!B+hF>1_T-Z7H4T*L=8{rYuu5_D3rAt$uS)YwScrOuqG z1HDRGC^I}c+_x6HqGG#)M&MvKD=kuG0NDIZ(3i5{1> zlW6*|wXz(QqIS=0rRBnCoELDgHQtAfr0X7aet74pXMu*g3ZngTqtWTyQpFp3zBx^D zL#@iB8Pp6JNjqvTvMUy5R-@*iyV^uzG_evy)8i9{+=OfxEp54numB-K`UKezCYq@@ zTR-~uw?uoZd3d~c33l$PVydq)kB&7%T6?U=k+$x>n!&j=82;J^6(YkS@i0^LhDL;L z$)ThXT`7;q+a0+iv_DyTKkcVVg(j;ZoP5w+uuZy*=o-U;=%(Ys4j}+cci>Xn%T2C(o zi--_Q5N*A*WWiAj&tQ!O+Mj!0(<|aoAh#uf*@^Cg^u)-`*;(O3{4w+o=Ygyu0r69i zW4dtg!*GB!AO;!=O@gt@C^G_$dClu{*yL0!DB3c>Y!E!MHMIe4rqf&jba8o?twD3c z6F2m0HH}4}2ah5BtT|zLAN^cZ-Y+?K3z64T=UYsEn-%gM)E`{^WRDvU&kM9lsY*Q= z+XC(f7=Z`S)lLWuMD-9IT12O5HQZVfpP+Q=+2z#veu=#^SzC0jZXQ}hEehtB6I4Gd z3c7I~0!*@tO(!+y#0cgOUQ4h=5(_>td4?zV^}t`TI8Dal6*3!M$tCfrgD8#)@Y*2q z(YxYWbD=|gtaf#R!$5RCT45>j3>1U-!s!)yk!@Z| zsD;iaF%yrgtDC#KP)oGd7Hwe4nBE6INS8c{*o>TTvOCY{_tG)s*H0gqjFAYq-#*sA zeS;J5LUr!*#0&%ew?j~-nbqP~NW>UQ27=4&Ouu)aY;&V8fysn;O%&D6lHF0S&};xP^O2lb$Mzn3$K;kSJ9Jy&!Q#R!u{+Tn zM_qyIhz{45&3M!Vu9|dKE#n_m1Lmft`ffyMDQrV3&$46}HDg`59v?!`={6H^NOMnw zdaxh$BWn1oD-stD6943dMzSj{&LY^UV!lwVp-I5wjNXyG27DCX&D59YXXT3C1bWJ~ zPW6$j=kYaU*MBT+J{f0T$1B#!INwDX&@y0Jw4*7##t;cRjHGD6S;7k0$zCG#PnokT@c7G;_72Y2m>E6DS_fRaktRFPy7R^{sVao5lCT zl)&F;?rCo3{_2P(%hIPmO@6Y~Ra-O;Twawh7z~axgH70kLw64rT0~M$LXBh>z>!TI zJI=yKoTP1o8W;zE894QgCsy>AJF<}FQ|CqX`z}kIy1l--nTUR1>|k`)EI<^o0YuepM{9Iy&-S&GC@P;c1QhFnwJ$^ zPjMs>)VzYj%hsKJbO(sD**aZOFHJ%*I51RvmnkT<+;>Lkl60;_f7f^?vL0#=N#OKz z=aZD^Kmu2GkzWjNuU4C+LR6#ZMNz(Ioy?G~$%;k)1ZZBG6_<1c&ZgE>F?R;&^tWnF@(eKXk;kh@6lh;x@P zRqkz#d}|i(jUT?`G6=Q?2Br+XWFexB5&oirBpdr7&$Wrve^S5m^V;aiIC z=xBhC%|jh89hvF{=;%}Dk!TLinyAA#Z3)4b#LUdfqbM&b3PB)E3h=^t?Ts~=q0X1Qm2QDCsGbaHor zb)l={6X)#b+tK)IDoo&@$2`7@lo|xI%K}vt6)p$v6uI<9lQTW$S?X#gZ%(7!%B+9YBr=x8IEZQbU&GC6c-9dCl|M2!+Xu* z*N~gRC3AE8NX6W!_Ij7c8)Z&bN2hi{tPTSCwiVecW}-k_+qNPd`~1n6_weYpSBriY zVBMMVA#O*(l*%35t>xtjI zkCXAqb`hx>Z=|Vk6nqXT(79ueKHkp6Lz3Qo0}L2Du3`>^JNV1e)w}JFm4OOi6F&t} z*q{c8+@0iFGFN4J$h~tol5XC+fR<(RkhEpFAMqE?@2=PgY+ESw;N&2;{<;I3dDUkw z?vCt2rp-yq=CM0xQ^P>Eicbo&CH8!7zWq5e> z4#2JEEXbMJ{A?l*juoFde%)oO1L12V8&N@Dgi45cYdIV%$IAV@bFD7!x#dycfu`1^ zGsCAM$^r`H0n<$Ca6n%~y4f4{-l_^)2b<)RNlo^i1Hz4Wo{Lp<%z*HbJ5m@> zo0B!5A9udXVulP}EYph?xNBRR>R6UwFz`JgbopAN3PYl!@|+&kH3R z3utR9_GmsVBb zjCX;V4noD9kEene#x`Q4?t76;2*M zZL3|d5-}>A7(4aXg%x_*>+3chab_Z@>UkHzPB@u#-nF&#W2-k?`9llRoE-q(LU-QU z-AZZCb`cvF^3SJzjaH-!Mu!OM!1|{ke?n#oDWVm?$*xkW ziN#WF(J%EFO0|a0zZh5OPPT!|N}i5&xzsE=4A@I1*wstFnzvL_Bcp{N15lXFQeBXN zr8FwFtLJH41hvv^7%s}2>`n$p2I!zIoBGek2HH^rAy$`5Y>2}^YO~H6^bKL4srk3= zzuko(G3_o3*!HR>0m)lid+xCM_t9Y_)YvYPGl+K)$Vgjz{q7<1=s^d{uk-<988xx$ zgxqsvQL!OmR4%ZZ(#-0R#n+M9MlE^Ua>{ntE&^XzgM~~>cwXpWdcVd&bMlO+ziPbh zt!xHy&CTTs)`m*qPLid@abNW|kFP=Uug(R_!MDu=xGe$eK#JiO!CENVFy~2;>C-AJ z%2sQX_H~2wrxSdKET{PUErPzN?Dct7v>MT+0|KWq?gMII%L{%za%r^r*zHFtZiDc# zq03(9RHi>DpQ?Ae?MrmT16C{xxG!*az!5Z+LCRsF>I4h+f?GIPvS|p0#k|+kMCK8n zZ^^*ZDMQg>)RhSKEpgDU7nWp-ReG46T&Gx@kK=Rr8XLI(P zY-rVjy^9k?72tNT{Fg?*T$PdFqLtg_G?ZW$GD2Qq6xl6;O5GQKPC?C852s*b8S1IT zxXeLmjDY-?@tD2y6}ghu?Y2Ll=r?W5wicrsZ1IL?$y$@MG3mq*G zb`r7vrk}nMG;s zjkok4bN2Y%0yUr&MPEHh@9p<60QL;WFO@+z6utBSc&-5%*kyEaoh3bI+kAHrw^q@LP&rt3=-nop=Gp{D5VMvg#GiQ9%b)g&sSJ z3dgf^)W@4a9MWV_W`Wc!;WFs(o2vsm45^t|+vqNv1ZVKalTZW5W)+ z^!*zX84J@g&{y}>n&PyL55C^nqabyJZN39Az$xL(qi;1?WN{=nomT4Od2C+ODUZMo zgKQ(y;6@8y$t?#jUu?JxYR;{0()+JJzQ2Lkw>1xW-g1<)rl)EW)}V*mYVeGhPVtO{ z$cZsK&5qji7THF#C>QEpLeTHZkekp2N{kdYe(z)hStO-0sk` zU5CD**?Iv`k7I^HDF$2w)YOFxD#Bbq4(2IpbHbKeHDMV(%%HRO;}1gj6Ru{1wmDn#7i?TWM;FsW$HLdIQ5^eSu; zWe;=7JVa4Q6;kDi{#F9P@Q-irQ4qT#svTRI%Mx0Lx?o<%rZRQX`LqP_TEu*4vW8LT z$P#CTlYBn-$}?nkPD>Ayb=C0;f&xGY+dQ60GGos?Xf`OrUeX8v6Ag$ke7@T~%Ft6Q z_tdnDCAt~pb{TrLRJ&(W!w7`j^LK0(Rx_i-{K^Pxv6+)FQ%9)I7S8S4ksDgtwiyps> zZjDL0>b~A%*tlqBIzjk+yGAKT*lfLki#Vh_ntK*}KT#txK+mZ%q{q2rlnXQFWG3)Z zXpcUe8Z!g9BkgYj+K}>6Rnk;nqT%PNjjpSMmADU*WnC`1E%Mn%=1ol(kv*vi!bKMf z__HB9q@m`~%_Sh7--)}?$UzG7I$5Be3+$!r=7Cc{wzWDoGAN7VzD)3dG6iFym@%{gv^@I^BIQx@s(aCH_wKR!QDODX9Ujz**w7peSA87gu!Eo74t_Q2AO2mWJ>T1c8E$wP6Jclz~|?XJeM?tI4T0iD@W!lKvz1HWY$B6^mr%f#z`m# z`$iUY-o6Ccn_b))OnQW42ZW~^HnmC4&~unD1h1uFCjTRj$13taf=S%{^>?d(fM7qr z1)cQ>BZ6R~cn(crU|K9GTFGkxfRy9W1(LNQ`y$z~=xCG7K@WurP195)1dxykYxSPb zBNABI^aBbG4u$_7colmZUN%qzo|R+`r{-i$U~69rGXj+1(iHPX^^I+(19wg zJ{F(EF|5xP#Qkb2A?S&jIHA^~-B04fDuAAu6A`Sg0BM}~z2k05 zSwaMIF1cW^@t3iG+Pjue3{LKW7dCD+^z&+K5)Ei-Cxma2jO38U(r)%M=~Ut%dsU}g z>x6Y8u@ekaI83a2dSt*!gymYZ1SEzbZSj#i>d>FOX?HMKNCT4OsY!D4gEGU0Eu3lU z^x(*w%Vfi%=^t&QVEaeO3*#B&RT&$USkFU4KZqrzV7@VA>F0*KEcxghki15OO*GSw zGA0c?f$Iq_XegAZ&oMH+Po%+{KY!;Qe=ZuIhg8WT4@i3E)?kMoMI&;?V8gl~Q)_!a z!`vXZ`Dt&I9hQzh%MALRjwPs zJLs9dZ3e-ymgI30t6Xo`$k}-FRo;!&VTs@c**F|&qDM(1g=LmT6!R-LX<+zQS5bRYL2Fggmc&ZqawGYf?Cw22=aVFKjp&vQ=v%R1t8eCNsA#7SW zbo6VP;YElX7J{pbW@3K!;6bSmCP|!(E#$${eijCG(U20;;~J?M#@gG2UGPwBoSaqL zmW$>fQpsHPdd$$OEh_L>Fv2Us8J=d%$m*6S{LeoiurztoV<| zVHhy`{`=djcbz^I^x%SaS3u!rj;R>{jD=t_9y~_*j*>zNt0*YiE>qOzF zDBO<(TYuG(8O-06!1El86rx6SCcS$0o zRvJ}n`ofFb;Fdue@}V5zj~95SCAD`jtwqDm<#3hup?UfC)Z=vWc#%L$hB2$02NMz> z%J8yNFwp$a;WwsZhJz#;X^OhY5#^jaD~kv7o(3Ozd%pjns!w@TI`5 za6ZWrp?ST;K zG!W=CI!ty28oeaz0-2`Lw)4^W%;s4=9kPcXz0TcVfUu)MC?b|=K$RG^XYq2<Gz*}M z#UN?;*1OoKsFlR$Q8q`CUqdz+Dq0{_tU+q(BZ)L&NQv{@@Om}DaLU0B)Ei)w-zu)* zc~e7<5wqaT+efQml_w;!MK5VRXq<2Y0G}o-$w*7DV}+^c5FBG#D^AS>x|bu{N}g`= zl4fuVU^A@Aa!LLcYUrr#0b$QaL?Jd#4eMr&(i#E`{UgmxMlVGhk{t$S&yU2N*7w}` za=j>$fp99Q61PF4o@;iV>Q0iT;aYs85E{!uB3Og13jqbFfjE7tIJ5(B+Kq#y1eJD^ zRi9P@MPD2!mWOtb5E61G z5}iQejLE`KD{mU1e2J!6G@k49rx(DM7N)i8j?frU<{Fz_qGJ%tc`+qxuH~9QF z`1OZ>E_3+wCuhIYFV9@bRcC|OcvdfjAInFz49)wm7rVBd+-~G*Yn`Z)O}~N*0PNMN ziOy)lxjPLQyXGVc(WkGyTbhcic14~1e}2#Z{SiVx-`>D^jkEvrEnT+p>FfW$t8vyK z7jsoNG2}rCCHOz&&zqTsR@~0$rrwo1(JUS0v6G3spFgW;s-LkK3FX8M(F_=kN?@o) zpS*Td)|$|>Sb=?$7#P>$6m;t60s~HHzLkDjc{5L<2E%18oxBy8vL= z+-q%voJkasni>)C-W-tW`BDqcR4-is zfKo+M;vr;Qk}Bzr>_gv-0VI)j{4)`i3l(sgzrZMYUt^Bt9GrgVB^)CH=}s0iAe)jQ z`l+Lt$MjRpKCk{``(>?Pa4j%svYHI_wjpwunB-i>0E)Ergv>A%VoEhc#O7j@BR|XX z5Q^J&w(QKr&^o#-B`^@^Px`*ckV&WA!tGs2md2Sej+^#}27b9*e#8CVTjr(&LFqS~ZlxO(LD>Kf^&E-aJ6V<(#j79#h2XNu;PV znvXQec658V#3DS9G+#XidM!=`2hBnziP2CM_=ia8*^d%wDzwy@1dh{lpcEy@K-4+` zx3PK3fJ0_#GMuXDi%x^fyM@Ol)gek(Q5@hVsRibjEFuAkF{#aEOh2`>>VAA_X>cj# zMmYS+EH%C40TX8-H&#;P+Di1IAknx6+dn`34kOM?>V}o%Z<2vr6c~sr8k)Cwnsp?Q z!TpI?@_M{g^$`BbnA!>FXEH2O8r9SYuRA{Y*g!M#E)l)}-yB*216O`gc4qz`U(s~Y zD#ap5pX8HI$jAX}Uf?>8TPno(vKRm@A$cqH8$f_al2)W};g zKi_}0;U0}R7kU#-K^{I45ePOnVE=wVF3WFq;|c;u0m&ITsh)x~=3#|ED;e5ZBY&f^fsDZi=1&Pm0=8I1p@MR7ixsA&V5o$Ivb6}VgPN)G}0iy>gzyxh?K63 zB=E&_wTGnh=Pdg8JEWMi4$U-&21B271($7O60EmL=wYTa#M~>{gLwJSgiUW-*j7RS?sXjiN(V4O`2M zUEKZJe_`QW8QI`e+=g2f`keupYj#ewc&}TB&S3b0IADwbHU3xg!sOBP6Z$F{>L_P@ z4%((s!n=jWWLIbfJz6rvSQtwt_q7$%4=TfGek}}3OP(vCo4$c4E*Zj*IgROt#5}Dc z>#OomWV`zuBK4W!SN;g!*d`O6>EsS@Wddj*^kx-v%0{(r@zuq}q95yBFw4+Q)G0p& zH^j4fZ?t8a5tt$gBAR>h%e8W?XMUzEr1E6H8rp_2ztbm313&|q&-~}|ESQ_^IS;H! zGrYv5Q-tyGFc7nJcb3G_XLwgq<44B@Av!O;8jneG>PwT#(WMq0o1SYxf5cHX>W%73 z!<`Zq>4Fn0JLodv`@`2?!Qn~jfFc`}VQz4KJS`1;ONnZ4cenj$ljWj?h)9p?a7P7W z4!*yzz(Dd>q5(|$WBb_5kgqM;8k)q<&-r{P_P(Y`O8Y3Vt>JW^#U_yyY$6*>uYl4! zD(CnVPaTR-*=oZZ$QzoCzsu%Fn@cyhu|Xqp$j9M#-4EsD;t` zDMtNxkCG$KLW$Bn4Yj&nf85{0f>%gHq!T4eXRR2BqP`!f8wKa2q1HY7?sE?1Dm3$; z(<`ok*?+pC0x$tfjPBGK!+2?;6SxX*G~W|y;J)V_Q&Z5mom}LG&RxamJ*m;&GsvFi zgT?WqsFOVc!Wxvnvnv~BKyi)^ZnwaM5l!&bmFwp1n~4k%jKcj4*0h0S^QOsLYw$k7 z=xz6-FsFd{<;=+7`=f8XAZk4xdmCIaKjh>c;8`)W_;Dm1u2yGk$GbJcN*?w!)+izZ zl5jl?6_)x*7QYb^#8_xz%K{XF`q84#jm_TIWY@4aBve5Rh1y)sfkG(ebkAXI6LgZN z_3aQ(A~f7R_`QyyWO^Nhd25wVyBBieZ*AIgmD@^02S#ggNl35_$m*9 zHHmr5$&{J!wiUHO;-G9@U#@E_P$y;xYLF;HO8YM`W=C#2I);}(BvF?jT|Om39#gvI zaFj0}deKvTtQXCB?p{-Y;3o+}9c5?CU;b?oRRnT#gH7p~Ab$%@Yqd_$tnSEz3Y<}* zasA*$CMJt60pUn=fNFfxKQCEVk+D0V_H%l>d(wbT=E@{s0do3_bTX9EXxtTa^W_Fv z8iLL+kDkKHR@Ymgvgf+^AneBSwe1?&un=_H0>wR0Yj{01O~4Bb=!}yy?v#3EAfBrC zjxJ(DYa!bsHHG07y2PW3Y(>SmudfpZj9|)Fo^vz;LwDlPc!N%MJ$OF4s*Y~I2q%39 zJXW;QQD9~AV?$pV4n%mqyMn%(V!oigR2wnyW0zVAPAff&&3k!RBaE% z366W6-Bls^b;MLh&3OL=aZ^C{qvJ6z$9&;S9nIIieR0nf%DdtrEpiPQvmsFmY=wm0 zB7|MxAT_IdW2hIiFp7U;1^$bTv&lr2vjU)}ed26Ae-TpqBvqIFm5Y1+W)IX>V5E3~ zAB62elU5;i#bjSHqz9TT<{qC5xC1i88`onheU3pc)7`K|Jv=%RsVx;m?5Ua>2-_|U z5=dOar_%T75^UqP>Jn@Txi>bAF7RqbU(qg4!H#1;1>fNw02BXwr6&?jC=r!I1JS1t zbhz0?j;)b>$yrm*L3_nc83S~w^YU_=z z0_D!S__ygd`9QnOvmEAKTI` z{-Y&)33M|?$q>~gx`Izx27W=ZQ#^B^(bhu>G{37?ii5i}30GqNv#{?tQT__bcf#x# zQWH!acK0|J+~|eU^`J_pa0;gzXar|8pZv)CdhWB?U?@UJfS0Jsp-jD_&E1c~!zCeP zOuAHIu<>ptnUF5E$+A;Z19hK&cv-*pqpFJEfoC!#CrChtz8?{|I3&QR7SHHj19$Cg zMQd9NjDrGL9qqL?B&w5zxJU(MX-V&ou$L(hAy)`^{fD)^q756>-m8Pi#oo=-r(J(X zaS|VDbm)nP;0Wd#E?My(K^+~-=Pr$(PbZ{K?X$AK=*Kydc$9~r{oPmNJeyMbqRANx zGb&T$WDpUd(@a-ym0Oc0$=IgE9W z9cT!FrUCMqi$?%XAW5eACr0i-Kc+kcqga~gFg8Pg%6MR)wp7Vx0He5(%N+r(kKkJ> zdg4_fNbb$w#?X}syD+fnejsDgAG&_d?2pYj_lcg397pNk_vw94x7&eKy3*o^`d$Vr zx_7uOM7?rvn#d+)U=IYy)^gJG$R_wa`wHegux!=!o{*eZNsL&kV8^FBfmcfp!yJLO#d^VGu4rucNt`=xk02+kEhLzdz5n%*hyz0Sfn+}1r?767 zHkrA}t+nL23UJ*hU$H90(WMe+7CEB){dV#hfGJycnZ0-qMKrX@y$qcQwWLfm$*6Uu z4sep~fM4Atvlt=?DO>OUchI3pw>*_M-hybv3@!m!;yChJ$|1XwnxPrdhWhHtSQwgB zywVZA=+T}|V$S4WD|vY6j>6yGecq3Y)5{9gMPR~rK^&;zjE)W1EUMxN zjgHK%aKsfla7RC_s)+9>Z~S!BKV61<;07oG77<6v{z$Hla3Cu6B%thWLK6_%k`394 zNF#L<)hG`^RouidmXC{n>#dmDn|bL{iXJR$${04I5K$N}=OhFzrz9ebVY#jp=fpLn zn%fEj5$@5lAv?qv^A-eKeFL0*(!nnnkx5 zf`UO)P6XdIKe_~b))fc53)NKWZ{^P)X9o?Mo!wOmYXjZITQUSVwm(kFyc2^NHHN!X z;Bev0X`t7u-ML6s07Tv@pEgR9;&WkJTg+KXxZ#nSdPiUMw#;BT(i*`gIFe!W1x6#S zmgokx!We^}=)JQS4dvL%AkbL#UmtS|UX~EKTLid+CDr98t zeq>ngOJbm?jyaA_fwSwLZlhb7oN?PQlhY%Kh>3~ORdi%uc8TCzzm4SFYA=OLVQR;+ z{>2jVwa4AS1&3-s_uXfo9ogODjZ4Xy)XNbYM&8O?B%8n~OZGImL8;sUa8=S=9NLQN zwg)pt^g;xX)J-Po?$Jx;tPtFzT@y&N(3s{yF}jSwINVcv|3L0PbOPnt1HxZ5C4g3n z*4pU*6WuHcd2uzaf<3OLb{MQS-B}G9E)@1TnpHZOyho+Km8?MIT%?O|E;R~cM#%An z9}amJ^6<95%jgZsJxexpH8r|rE(9+sPb2f>Sfi=B$tKLMRpNDyTq_0;WVkG5W4ap$ z@!X!zJQY_V7j>Llzo76W(bV#b34B~@%yLn}_G1o_I!@;b-Pw^;Sve^#>8CIbx+C)T zpI^rB(t--_cd3(LJ^_{gR%0S1Ql)Q$a<0UH2S>OEv5>yqQ59F_U}wt%fiMKTvp}3s zXLXgstkdP$ORv&B3zg_ne!veu8zP@HY*}0MoP9AUrwrKp0tI9VbAHJ8Ciolbw)syB zeLl~oUy~7DwA-^{)|}>@h;~-Tu{RVd!p>E~=_G{i!s?P|b4|WEi}4DHPrg4j%B6)Y z)#Z;C&T)(q=(tLTD3T8xcSXapsuH~PN;GD{NR9k`zpo&Il1BDuyvPf9moC_0nk-r_ z&KQwa%frLDz7%c|%bU{#3CIFK$k5FXf=DpA3+Ue2B}_uwq|cWO5n+^fZ$1js?wqe* z>>@E>2*Ks6i$Z|O4n)Kj&BM^4hNSfL$4y)TIGndhE$3+TB;FuJ5Z8o;B$+1fjWdi= z&9OeCoBaup0sO$+k59@O&x=q&F+_hZ$P~bR8}8r!TEw8eY|+D~UMz ze_$YuDWafO%Wo>=BV0}uKtzNt+9u~*SLo-T+$#RhMdpRyQ#Qf8X^i_?``^g_QvB|! zUA5x(pZ}rOZ-S6?{`2+tU!?H-=j;FFxAdN8B{e}~!TfM7vmW!ek`~}cM3h>3hd;gS zW5CG08_67jI}9uvY+=wBfKcX54l~?$|Ib^W{m%N%e*i!JXvaCu6z}Kf`ai$jM~cZW z+Md(*;xt}eJ@S-3-s5B2;xa=S2_GZ=>s2MsF7Z5Tb=&0LuZMGvJfP?M{MFR?|Dhax z2=e*aFdazbU_3a+UZfZ`wDT!o^eL^7LLQWz2p)o3G#a5N2Qc&~+ z`({2rF^QmtZ*+gd4ac0Dd4YAOt2hJAxa~N%lP*G~%H%}`d74;?Xh93u35!$^t`ysj zD8?=O43dzXscwU95r$RdfOjkD9b{vs%kR*4#1(%0uk&y=5bndXl7C?)I6A*rMG)EK zUXw#n8(dD=O(&dv)haRCdjp}t>Em#o>+=?Pcr(UqSffRbOIws_C=wk^Wms~m zl+-3dDME#GFq@@=oElOxr_zyfQFPj2uapsb`-s#WWFRoC}^zijK}{r>zu@89kB z{d~W_f3`pNew(_k*WvkmJRgty{r}>p0 zJbF*t5s0&tv>`JJU*;))2;QKc)zGryiDM)aJ2c{t*7x}sXY+b`rElc4uV!VKPAUu(oaTr_kTt;@wCQhT=&rpWU2RF;< z0P}n4^hHK{kcH&9u@1lOWi35o_!CD%l52GjA!OcVk6Tj$Lh@($uA$d;{`z_unIpji zT#wyCQb&KFM5)IY6PW9FJ#YY#%G%R?7*1zsGkSZddMeTfrvekR>RPGA7FNihAPEde zU)0}5Kh^BDmXb)y4`9)IG;ABhfKuvo^S^vsOBXb`Rq$8Na@YoySTkV2%b)e(s0)s5 zk~;4Bv1*WNYPP|b{Y=PBclW6(F1O*r}0h91HH8e=l#v7saBZO%Ak(dT4%l(pQj41Oi5MinD>1Hs z>TE0u4b@#ycrY*P$Lj3s{Io`CtS%pCvI|V*Y>+18S(6gjdj@dSXvwIb4RtTx-yF5MhXi4 zpk`re02H7OJfMxOaZ6-x4J=Xt4|H|U775<|)h$EI4pD2+hbGPH8$h_n8Edz#-moc5 z(m-Z-pF1Pc(e=3z10Gcc4Ivd41e51 zwY2JGd2|TPQ|UhY+^HMfk+fkacHrsjok`aVmz|`)4X*D!jZIkZTh|Tuh7=U~ers<}ff1&s0l>Y-9|=OlOvu)i z^@?fbE7h6+Pb^CzosBHgx-JpAm`l!%ibtO+7XFpnVkxw&e4gUZFhZmxu>qf-jw;`} zvfh;a#TCFfea5L3mm&VzmPt)^UN4`tmw}j0HN_nq!yz1HsyEjG5ut_tl1rMmu>c8B z&v$1chvQm)s#Tm~EB@;x%iEJ$m#%L(p8%FmYGnV@%fDoD*Q@&%rp%?q`^B^I5gLyG zG!e;htU%2&flVn_qO1{osc$-)0xV(?i-A$wu$uOiAcgqKd>a2cCs!@E- zSmpowbE@47z`GJ@W{v~2JGm77tXxZ;tE(Gax%tI4XzZm_W9#g`Sl&Z%@t%qnwwXZR zp%|@;8{g-C#zOEKpQ_JG*k(gZ*sx@{Zaf8?G<3cvmQhzXeKOF%@}Ni;owxhkIg3Dw z$yGqCZ~?^Npxj^0P@m_Toxt=vyJee z92?6Lxh>YG<2MZmh3jQD#$`|M>V|mN@}hItSErA~ZUdhA>{a*JrqQ~{B^2cn^`Wq2uk*Kho7r z<%CggEs%iym0NT17cFubbKv$yQ`+Cw^uOUW$&~FC8BBm+qwCW{jxFUBXP`W`IerDj z4^7PWbc0d(=)m9+)1S1G?d8OvJ>B#c$I)NacD5>sjh{--g!Q9x$iA>%~SsE z__)*vB##p27qxCUaHOiHpj4=)6Ck5+{ey9d0e(PE>Tlc4j>x<3Yl8`XzdQH%fa+*nDnWL46Ip zteO$-l|tenaH4vt5&&FIKCPOh*0kYmfJlDbH>g&sRf74rA4nYK5RQmhgo2U7NA`51 zd~W&;5A!GC0&P)6qIuOJ0%4UnQS@7;FLB{Uqn|RH%@@RZ50HNIr$RMa2bSdn6bf4V-ZTK^&9|&)08Y@N=PXZ z`7vtAf2`cFNgVZ8(TTM%>f`{|tgqk$BW)XocQHR8*5toYbR z$srG;?0fm?>|&s>@TZpzIQwa+^c&>8lDk4>!XTD%@RUX}srE-vf?X8h|pD*=GWT`9$c!xbM! zf4vK6`TD+m%io9)=FDjhYTOWvb9WKdDH|F9db59#Ke#bYaQ<)2{(Au1>)q@9+BW>p zt*m$O%|9Cc^T+-mr}zK!)9CShRkme!U|Y!V%@yx{9QU0;@u>7m%img{x4>M*@rjB} zWM#{cHS-*e7x_jX9)HrpwnVGMpe##Mab|&5!;sHDYFKlya`yZIvT}YavlP!M#tj{X|*0+d3&4VpEv&HK&hd@UytF?BtWTu|1(K; zAfU*<|55t(&#{Nn|Kg?pfpjY6pMQO1$Q!WAhYO^BCV&2=p5hQ86!g!RN}tO=kN2O2 z^ydcQ|NOIMqz~ht;2?br{{#o=WBBKkkv@k1!Qk+Pqr0`&i>J4o?Kzed(|e=?-Jm+W z3ymAcY7ua!-y<6WIRC6WrA}v+sp&`GfB!voZBtZKd_Xm-x3soa(Q5>p181(9RG9rG zyaZMA&eG=|M@kZRZeqVKO8fDH1`VP(D-#{H7owuJ2H2y58O!9`a%YYP`(+C2dI0aw zW|L5)$oh}?(B5g3XeXE0ps?d`x%6!wt!F4{bP=bpQHE`;gpSZSx;Zwt|GQ;oRzK+& z6h4tQ!_4DJc##^Nw(iz-3-SsmuGd2-`v?|S(grHjQ)QVqpWP%#Pkk2~fU;oHc>e0u z&p8!?-6l|~ZqR-9!RvRye-9r%z_lHX3ady15JTurSrWYusCfXbD13Z!Iqp2)OjBDC~QU}TEy9ME-dCvs$GIjnOwKo+}g0=uA4IvU_H+n^Sqc^9#Q zi}|%a9S-G5bJ@p9KNnP4F{U)?{p%kc#CeZZLBt-1v{;gN~=tC)kw#@+IT?m zlun}+;ONaL_*Xxe1M?RSl>PSGZ!O&aN=|W2&v(vkbAPLD5!Ry(=6+L2nE`WlvuM5` z0H96)X*0<atCMtx$je&9Rh>070Z{?X6yc` zywkIkDU@}0G5hQGWn?th3@N)3NV^=}zA9bY9A!X3BW_+fjS_qA(4j+T+M;^iO-;~_ z9T9!j=OKh`Mk%=|xe1cGLy+!irNUeLuYbmUHL&qX$(+=#oTy^lZ~d>IYRm$Z6;(_H z%n##T)`$v6pl7A8v8ivm4F}(Vf|9R$;qlk+%gAiM`eDKCoHO68sMYp33f4Qh# zP-;Xd-%L?U+aGrQy1LX&3*5-8mssb%Fz1Lfb)yE7;b6*?Dbk*HIuj7)fvk`t-j3&2yxLoK}B^|GcwYLbSCU(2<3G>#2Zyyt1R#ry2 zLUiLkY|TwMRKHj>{H2G*_YNuR`j#$RsO42XmWAc7Sh?~;0|SF=P>?PudQn}S4y}y> zrp^DVlUjkkEz+%=PStte*yBTv)V#Rzie2f$!8#SUV{GQM6Uq0# z{Yl$v-XqP4^-|y=fv93U2N(EX& z#0q?#%yBF`rIDSN7u)8px>gKzcfnrW{x8vd`YyH$LQOj2=s19mF>MoYNj_-tn#>?W zce`M0x<_yv^6Mb7!?9ot5aXp=@)_J^iaX)JGZwS2?@h>yoL0u!8_`YH#U<-PJyLJ_ z`1q^`60KnokL{r~PXHXZ&)GjuG!xozO$u^A5gn!)ozR~`bSe=q_Kty2GwDjfiTU#saOf6I9R?iB-Sa|O z_ey+k|D8{&-BBkVptRJ<$%$b#_VMG#MBF(`Y6dJmJ4=dEKEi@hRCH-98$%}u@CxaO zrmHUO#MvdvOz8CmYAVNwerZN8%th`z#ewPH?(6N7w{mP?v{6{Ts9Zo(40jpe8gb&x znX&Y*0D_qba8#aRT)556nmkhihcOz^aRTtGRxx{XDe|Q?L&iQVe})W`#2Wyp%wb@8 zqo8|wV02D)c4XTd!6rMp>AmgG5ebYRfQCxxE{M$l6Jk2U6Qr#T)NynlX~=J-T9Lyo z3s+Uq{SpxBBgZ1>zoyxn7VQ_y-w`J_hcxXA%kVr2z&5g_ssE|T9Qz6piH&->sDXp)HiJ=+}bw$h^Fr#Mky$cnWT*a5dJDi&P! zkzG6_hFj7(iORMy@f2+#f9dEdRu10&1*JzeHvpQc6!HJ=*fCRbKNbd9Sy@?j&Oc=3 z*2DcOy1hT@$qa0KbqUf@+Z<5v*Q|4O(C|Ccqx_o9BJ6(U+wPt?q2U!i!^YIYLIFfe zOhFgth84a-go-S%>c3g9w2?9J5mB4o6Y}evoP}>BaMaRAn4@9Mi)`9lz=pRDTgab3G7=65!k;(nvE3l?IQACxi%&FVb=j+DW5|u_x{J^&!s0|I#^pyp~j5RVDZw zuT^U!)(b!6f2`=^pYnO?(ztPrA$&Veb@ zr{|su!SXx0BF81t?%fcAg%v<-rJ-D$o-m}@)dP5F^#jQ+d97N5C4caz0 zkX_Iz-+osgeAVM^kx_PzZ?4Adz>h6 z4X?zaiQ#uN=;p#6+8p)t^gN4kV_N|&C!_P+p+=}A)gC12t&q6WTeqNuUhnku^DBl3 zOxmE2yg;Q02mcIq_C#TfXH5ttAa)^wN3(j_bB_<$=vE1NBonjg?7wCp78Om0#tFRA z7)gz!Cv?hZpXKb1hD%>_erVtItTuKtvo(y}+!Xx)>o%z`W4|G}KMq-WRhBrbN(6(w zyQzKksixvt`ugl2GhfinO~_%ioDv(eD+dHNExN9uxzVqSzJ1d4B3E!w$5kVB4cEn) zB~8k4zR43Y2a`0H7&-^eK{~2M^;>#+I#-0$B?%H777t50fN`6UA7YhZ7qV{6n$f5$ z=$^L-T@qwp;4m0K5ffW&R$f`X6%UPc7ENQ&=W3`Gl3NmnW0~4cO~{h%|0u>(yRl%0 zTzj4m*1dO17fX`!8t=lxizcef5oq;a1&IC-xDZ?I#AH07W~gEemL0B;>)&x+$)9Xj zR#e0uk*9VXmtL^D0i}%`n}_K>mtmh@yK5R;*XWoTxl>Y~+NTPqH0vsntSnj@7jYn* z2b%S5ANczERzZJTTGdVUt)wSU!l9$~&N}@I_{e>mun}Ld0~4n~Lz@mg{y1(Pzpy$o zA^FCRoja@6R&<;4xMFUTTvCsQYai-W@Ni8pIMWFh8YyMi09-e#5v<|R)`1-d9)b!w zj}SnAMvlv%CoQ}1r>LryR+ZqHNS6ze+C8+sz=^x+LOgmWOK3ZQwLh6s*i*Wm9w$zm zc;&PXdx>pX)6r(YUZpBXQlcIunvB7|plWIc;$$cEyJIm-wXDGny7%#P#h@+4j-9eNH@`lYQTU5pUzpQgA=2SQK^OqWs!tJo~pQCHhU z4tU$Xsw5mRG8YHh?kBJ_3VhV7qMbXT zD*^}e2n(}!Z(}1pj6j^rBVCDXitPX0v<1pxDQf+HmXVQt6DhTQ)sW%W_hwLxy_UvWJ{JTAuKJ;Iw9#o(rgoY zcHMtuPejIlclBX$A2^-XJ4uAZ^3SB(iOsXDppZ~`(;|J|4$NLsI&-#pjD$beBgYEY$BUKO zKU;~}O+F|_3W~d4uP**uPk$NsqHb1%&~JH`Vf-?Ujx(XTij=##NS`WfGAF&_jsUhb z>!7YozLou|IQ|04fMyuGr;<#m1Ul#!3JlGOeFH4gn+g)E{{!W6ahb64>@$3BwG^$I ze&^4hN6m1hDe_4qCV}Wf&1VB4r^w0^=F||ssoGt4LyxCUBmf|XQDqu@^UXJGQB!@VBEiXqO>~?zPHsh%Hw{=edaxF4*|H^LKGfGK5GQBS zqzRfgPL|4qR&+cyWvGBmlhi_%Wzxus1vX)gjL<#*p9!A~VYPT#Ve9igPI8-Uc(`+I z0?NM{g{C%i5M5XUOZn3S4A1kd z=p_V#{;VGRFvjindf-J>K)VhDF5+CBl+2yk$?zAZv!iVe`ZIpvT#L$w3rFOfsz5XS zHM;Pkiv{ZBd2+2l5b2eF;oP}%7NKMN7xvTS;O>;Pl;6=2Oc0+2CRv@`d8pMDnoIyo za6v=HtgjVvc$e0Ayy^PL9uaC&VnJxqo3Hwv@iF#du(cL`RetpCCcj8?UJB9!Xm--l z(lXQPjJBH#z1|8+YA{oC%WEW#BfSr)F>wFb^Z~}Ge9@SRkrUSq&aHTD18V>XTIbMf|XsIq41p+D{OeK11D>r=Se-PYHDiO)Fv0) z=?6doxea!QtgPG&H~kH-lI!iOuZiL7Tvb}p=kmOju$>Z#K0vnkvM!M&TM7%i8X z*uB9svZ3~*FCKd~RElP!*ya8O`g89TheG+&%}ZfUz-}1$<7rQ_aiPH+z`M!HVRe#E z05Ob&amMaKF~>UqhKMW_hM8(!GNv2)jn)oaLPx=ISTV7WbR>TmOGeRP!KWm+o|H)eh{Q)S3)ivUbwe^nVj1(Z2ESNALDB?Z}pvi zE8I|w_^M7O$S*;=vUPWITu@FD?`8v60%PZ_5EJQhCYnWl8k0uA{Q3)WKQG#rf`B67)H!f#K z9m`)ZDf%_-m=cMuGf}1d;H+U7c>?wL>Z;bkBPIlEoWV5NKzkLpUcTz);>5(n*RU(% zD?6(+$_0#H=b3lp6H=kIHjf8eRj@gLeR!O(x+y31k4tKtM{~8*nPPCBadO7Z(*vGK zT27|kMNf2*!n0rXJ_pDZRa}(PE>bC?tigUZ#>iNN=)zvht|9bds{lxRX1{&E7$;Gj zExPJr9!nL_{n(NVV`Xe1+>d%C+Q3aa*w}3 zi6SZt{P9P04+L z6sAB-kznZh6uDMm6k?c0$>gbAs7ORe3)84yjLa)@q=x5Ws2`ynnP0I)>?u*}>k5_Z39t`^CDD?aO(D*v z{*4ch4Cu(Bb_x$t&bqhGtP0beBTwm~<8cP(PGRPR%k3LrFkZ_rsHg$yCAzhA<54Vu z99K6N=fWo6c>Ah(6Oj-`Hwyx$BXEvx|U+c^ax<`P_qvJ9-&llY$%!k(1sk zh<%)|QPFUV#uRKUgq&wgD_#?)>8-pl4`xb9mEzYq4fl;g!_i%&-PmEm>`QiU1==~- z5OOakx+l~Wo|5Ykz{Ze$~Q;7f`@j_)y%jdmdu5*3LJ2oJTH;)G!S+qIe?Vo34 zjvL7kn6Z_{mB8Ge#nghQEqsMsB&wi`g#ZZWS5A^0l!*ifKk#f}mphm?i;z@sy;0^S z!j#B_AsJ3IY@3NeES}oPsbOHlO>Vt}G9UmZ5$7Hs=|%T>c=LE#_eCXRvct+M_u&J< zwB-cloSI>Cvq={slsKM{;RVICazbvlH9&kaJ3HH#k>&x{HFO||if|yl_pebA2hbvY z64clOlPg=5uZ88;a=@6BFN9=(;vdd;a)Qxf2IUHuE?qJgUuUfcQI$dn+vC`!SeOJ} zOC$P#x=C znk@;QVPjZ0GQ-u~y-2qSBsN}rvGpSu+=`dfV5&w$L+0Y^sC_r&P@+`Y!9kUO^3SjS z=AWyBs04%y$O-2@3>HbW7S?Vqu?uv5P6jc<@M$(bz1O$7TRs)^Ub;9-@(QOLvlR`U z+i_{VBw&!IG^Hyqi9w|oWsWWBD(*w>ZwO9lrY)!QAx^|+Y|4W|!QKVUZDxtK7$H

y1Ihg)1j4u_M1j?HpqXX}kO;+THGFo;TzOd@6kxz9ATo!f#`;LRP*{tKA zYl)#Q;M`Kz=e6wagD+0_0pk;2v4JdTJi;=_f1EHjXqz4rPt7ExXVw>Xg7q2=8#Kw8 z8S`*NvG6td&(yfNs518KUX>ArAKLOnF6X}AFR8gt^$?d<7^0L$VRRRn z7k#F2Wt8X9!HF8q7cZWqba-;06&P;wRt^lr;ob4F+ZjpG=T`88VQHt1AY$sq=33;c zff}g*SlT=HASsYCnlx*agXd{n0(@ymsdD5^ql*K~uPl+|vwPRCi_Z<0TX0KEM?fqU zR?a?uYXM;K9g*cZ7*werOI$FRGhb5Lg)}SNLKw0FmtM-AsFAw>Ewf`dc*_7;wUiR( zBqR3SLJ5Y&+5Q#uB4^FwF=aa^HG%YO*Voi)j z#qsoLp7j#znDbaT);+RqjG`iRdh)D(k*L@{_+i-Ck63SA3jGG{?K~A|*Obu%(8-@( z5V0N$&c{fd*CAsy?MVX34=d_L+U4QDnnMl6<~N(!-NlcNR%n^0-h;+w@yZ@2r!x0P z&&I@Kd-Fiie)eaZ;kj?-9Xa#(K;^rQt7DMj5T;6iE6;fjB!m=1TBdU|P{o4U>U=_i zCWi)cE)6Hkel%^(@>(m{`S{S5Oj?U06XF_mf~8O<{-fy(Ydjt@*(uu-TZAga-hRm228D+wg86GUzde<#?j0 za1ouJ@6tW#36cb;79UwP<)bH5I%o^OpmDe~dmV;cas(^+ZoBI-SQyPe==OY^(vjR$ zP*=8tHi}e=P`#%RJrDE9xac|E>`mM(7=WcZ6fYEqV<~1J1e*aUF*CY=Cw607#ROUh zBO@{-n2Wv5VH#R{y4_6RQ5_KRtL zzwE?IsZ<<$lFOeHbKBelIa;m?s^i#`@nbdYMloH6W}zTk5X`rK&h)&08OrE=_eS^L zclV(5d$o;Gh|+BU2BSXuC<6_)6Codu=~Mwe(>iO_mDP0Rb{2{k^cL=TIIxj!n{d;b zk5#XLEJdz$m)5#~zx}cCwdX7kj3_&aNnI82qF@A*U9sok*(I0Si@OF0;1#dBEf4pL z(ce9m4m?{cQj!(bD^@+QI=P@b<^7uel3GR~=$)}r3%JAdtwOS;c)t5u;uL{G8YV|z7;4mK&ZnA9MlmWxS=5wuO~eS$2RlY8t0$Ns8JM$Z{Pz^8!ZN`GJ9AMFGcckECoTO`PZHZNDOY)i)OiYhis zX$jn$@NWNA$Y?xzKA;0S<|uQVH&Cn#{_2T7%PzowQRW*T39d@!{$u9b>`{d-!5RMk*GPNg0y4dghAUBcoCD{R8f8Lf2!pe4B`In8+d1A=P>rj z^AKmR1A-%>aSachu?JFq0~F0trjQCWoISx{DWp?FA-Zm=7d!#=(StWWx3aX9jtk^G z9UQjnvG)Q~K>Fa-MEoH_@x^S#2BJA9WUz)F9(^fEGy88|@4(4b74lC*ISw{~R<>AR z(%H-KO&U$+5?i9yO#fB82-lZRXG+pAyXu}y#h9e=G``pN{G$KkyxuNaFfxIj6#>}P z=?Fr;95So&McZl>lls^eZRyzA;ye#pKKs%ny{&hJV4Ds9a+uC`HC0t#x}3mPo!Pkg z3OD`L^P`h*WmQPhpSt6}%vgWdn<+P1849{K34VR^y&d)90piEV*gj$-6Ij00-!)Xb z^mh5SJ}Z7EP92@Ci#m(gAOzA<%CdHoK)$^>Tw&L94;nqNA;8<)PbN7o>?X}XkTz8_ zJ*7mI;zH&D$oQTn?b#_cod9!0BP^A(xqa#GF782MCTVaRqpw3lQ-B~sY}d29x?hZj z8|pl>hZei`c7cHMry(8+F4?f)9ZOJ3032`xFH46?GI{rpD`#RT(h1ocCqof^_^ZCs zMu`y3;!wcaHLX%9D5oW}NRB8Ur1FYbPvFClKYphp6XjR{7bbH^|B&Y4c|a z39|D;{w4OV8g9NNDgGsB>_gZP z26)Tu_Y*-}P_>&5%s#ZO1V2DYZ?STX;LWR#A`1ZmYL>@z&-2|7HK*i(F{8?s*~;VE zKPCt<0lmxQ1c1QERh<0cB?Yz2Q;XGVM!-iFlSo4367x_6r3&eo?dR-(Bq87e;WX(9 zUUG>_R&m?#+hUM`?e&6iCNo?e7p116l0}WbLu&a?aXg%ES__HmvN?I zZRZ7{8nTmqqsnbTXOK)NVPd@h$sx-&J)nC_@7^EapMSej+;V#ytRDZ1FXa@nm`>z6 z1RC*D8j9wDgPV#pBSYIIS@_|_%y>`?L=~eD=hWOg9&;tEXn3law`Mg8qIOyhez>n7 z)>AkX(p=(;62`NALuP86bz5tjK)9vlcK$rV&+YlG|B1vNXFxP14F{NJKUpiRbSA+w*#RInH`KdpMVqspMq1K*L7a&_#8! zvZx{`GV%6~3cF_Z(zmI&t%i1rocN*D`?uRSK5Upd6e(X@ix~^q?od{iD(bO*zckyX zV_j;NwVI?PUz}x)P;?Z%Qv89?-dCH%Z94^FV%%>E`<_pbr5BJPh!~>vlfSGeh%w$$ zvF<@%dPiXD&H3LoAzTqng6bk+gsvd;>)qM4d|;!jEYNO$y!Q+?Ci76aWV|`{aMs!1 z%KGBXn>!oTt!V582=W0#cob$ru}hMN zKRVM0&v8s0@(_8f96v-xoj)f=Ml}WWzap83=f+*%FOK2i2;|oXsb)ckiQw;tTd>zk z@E^S2w1N3Rm^1zlx2=Ee3IZoa#aeRGv+JZqZ1f0|XM(unHE?hvCk0_QH?+4!eA}!Y z9X&#LHC_>)%-*hca#>isZGnH0>e{svk9{OeUzJu=zTKgg@vFw6!s8$OsQgKsAaVcl z7StkwyOz;c>Up=(t@V$c#>)ghrg>Fq_IAmm0FTvb_JW5LK2dlbG9P~Yf2=v& zE&x~3_Fr5uxUt%Jh3*=GW=%%(i284D01R*c@WvnE7F2W%S!jis;3;ZOdbh0pDJZZw zTX6Sp)P7!X`=%uyc7VFf{>RWz>k)GVk5lvN5d0d)(n#5XH*3_vAMJM=?*>*tHH9%? z$t8{~i%xw{@cZK?4QzD&&05lCP2m`RWadk_Ax4A=E5&h|e3S)kz^d-DjLYm`=^B^| zD0fWlu3Nc{@dz!r^tBY!p=W0lGVW;2+dcOis@bihHBYcI&;6wJ^?(jaXSjatQay`K zX`^T(OESVS(7jwfw7eWy1G|^oYl{WXXV}kgjZ9c{*GhYT^7PPhtJ5u4lOZCg_N0jH z5imXXy|ep&T5sX+lH%K~n$dMFo;^jU$FzTAz4{~hNdw<6TkiDD3MILmz@AS%R;n9i zUwUjgYi+c;hHCzUL07&~bv4-FWTrlH#z=z&3uN_=7<{ew&cRmkg^I7r%HDq9H>>Q9 zc3w@D(}bO&^*T)*&g0W0R;?~)h9)f95PHPwbk;AgOw2p>1pZKx?f&a~*4~(Hb3Rur z2(>Q`d}}%4h;f#&qjA1Enx!T(561I)vG?yOrBFON{4(41TQ~h` z@$vDwabMS}|2DmXpW1%-l;av8r`zD07kJ{tT{MY*xcHdex{U0Zz;N$S=a}PLBWT4;eR(cK1YEj5#N9Y1LcAoK(;73Il zC;dGgjsIEiJ1-#!(x$&6W^lObmY|n=1$GoN5g#`VXhh*|PbFuOjo|Z^8|{&2V(5d0 z`lv*+xF;(B4`8)VPax*0Tp zVbJBHm$xV_K89hM5|2Cf3j^I#6NTtt_^qVj3D%fb!v16furd1jrstAx%^sd%cH`tZ z!AsK18CAX4>()$EDG89|O;%L-_P6S=nukx_!Mzj;W@%;zZpi8ayLIBP(ItYp~fuq_HKJ6PV)MTNr&FMywy zm0hv?i2J!-x<^7qia`sguqln96wH@HX-V+WUD@Daa?c;_ghA+!;uvyLkT%~Nxo6Dw z)Y{8uIC)I&r`DEGSy`|o{(u?`VOJ25ybXCVr6dHdDPXes^_TCPVRf!WM-xDizDkSv zQvF%e(J`YFZW(5!v4TA1$j!KbX?br`v6Ozl0kLG?>Xo`939t7bpU&Pny&JducbOW4nS~is-cT*em}1 z*E<&|nG$2Nqe*=iT>>gEcDAf4125G`Cd|KEaJlNEc)5UuuvAx+A}h8}ssIfi(Y6m6 zJHeYVVF@RH{4oRBmsQo6{x6kL_%`XigxEFL6eDi{y)oP3urVwNlZ@g@X6>t8YMm7U zSNK&0<$@J=?&JHnz1%=Pm_`H9F|jDc`4Lrusx(r2Ar?TK9f=W9hv3lL{3RX;L4+s^ zl+zK{Z$J}dE<*aAP%;ZZ(3Q%a$CR+i@S<*)hy|CXu7rDvopvz=$+=2&^s9j#&~Bub z%!)!kdgKkbjoJzOJ0Bi-u`VqcmxPZpjE;D#13K3Y(IFT}DcVHuVN(a_5&w%j;lu_z zjLV39TmmM;WZ$x5N{`k3(Vm)EGV3dNbVRcA{lEAo@FC`+tN}HF#h*X7a>VU5L(gkw ze-K9hpGOu#kzm&#cm@w2;_BStiJqN+(+GNNPm?VTsG3%7>4hXHW`~668)BE@;$uF2 zh<5TRzkjdNB>aF6s__BfkL5zM7rx|_8eo50og6^T%pA~yBPi&y6TNL9sJ)rcM_UbU ziStbd;oY+`uvzd)3Q>6DuCQ)ESy4>76TQdZkZT%$%|Vc z3ue4CV!}AAdw=hHPcf?;L}+-Os_;d9i5C^%t%*+fxB)LQLeDs`@GO*eQ0rJag))48 z2%~`j77UCEP!xRAL)k9Hu^s?{Gf9V@J1|^2-NnYJE05xIn1t9j4H8^;%W@ua?@@zH zog{R`3tC}rKb>=uoo|UVxY?w24M;_QrNt_B)nYnHII;)dx%!)Se>s#?#02s>z7Sme zFun4#&&$!C?ITuNOurAh;G1me9VBo00SJKDHETT-_8n7_>lkZY{cC&6rfhp%l&kBZ zCW*}vp2_!?A~32!Sr6oqUaGTV*f7lk^|KhBw5*d|!167*Q(wKHV_594R8?1={Xmk; z2X7PbHb*=W(orIE@-1MDnK1-;m(6QfeW{I7b-oWyfr?sHq8k6rkQ5LuRPdrmpJ!LS zPH|Ou-I+dYLDc!_V(3 z2YYW?&oAwg9&Lv#JAoPI|H(-X3)CCACykCCI+E9v@g1`4dQqWbR)Wv;m61gam=YAL+j#dN?@igH{gp zGwL6piP{Fan>78PO%xs!R`EDYpIrJ18->V^Wq{yi?TIXZr;G|a%YfyPE*LJ1|Gk1C zQg^8>q!0$7em3~t8^tvo-_DZ~l;b=CwSxv}h(l#wp;w?8Jr6~j8!`ZPTXUdOcVasc zeoMeR%{97`xfr&Xbiam=nu$8CR`!hlqyL3@S%%+ytAm3Nk15@68!bW%VPAPnX82WX zH_jlbf}qNcqUHjNwXps9Q-78YgySRXKi(hx`MJZNKGSkctp2gHydy;(oz(qr17_LvGwskNKp`?s>*!WDQY6}Ykx?{$mN%{88;b|`@^8rF;P3~{KfrIW)2 z9Sr^>Swq!~XohXb9w=vQASxUqLI;@+7uS?2PkWm7QJF(aEsnLtS)Ov<0}(eJ64AKy zw*Ix1cDbO`ON6{|4B)myE5!6tlF)wFd14O(@lMAnqgN$_H z)G~+}BCdPjv?SBR=|QS?NUVH#$fD)N)LsC|=`E~E*kbnzhuXae7TR-9U94k&1mqz1 z!!-b0fu~v*?M~COs1mH%-aUr>E5hp?aUMQP^A`UM`wLe$5b2~ z&>z}9A-D!qG^$x3MSl43-Go&(xUZ~;hO2jm^TH_wGLfc#*NZR)X!)hy{=MZDF*U1A zX6qTQRjO-D-C=-r<&g*SNk_}QwU;PFWS7-6%=bat-}yf!wND{JvVq8vmKoG=`PZBU zXKfmd^`cP{3D16W5%GJ&Hw=UtpFD&*X&fI(I6~%hj&xFJ=-?p&-D0Q-+gcuk%$q}l zLbhN~0YU(rwutKkwA;f?18~UD1;j8K2OOGOEH9kq?DnV78`W!ki;N7DjB zlq8^biZ-kDf2aip((=RIc+796D4*qHF=UoJtZJ z@vOi!qD#H(n80L!=`qbofzj(A|MC|U7+wcs90N)}ZDJ|nbZ6L{4FysU6mT!1c9m(< z{LrUz7sGa>CkD1C9YSWEr68MIoBd}QaPkjz9w3I?cQpQJRkkbNXzeSrl`D^#fQrLv zDt}x;@%`>pfAlDQPIH%N4OSPTzW%$sD#8-lNJ$N|fk=;E-c`N}Wt3HP^7LE3o#&iv zij>Z{Q=a975#kvXNfBJ`NtJX+)+2t6$>Rtcs`EI6VD{}I;Jop6`0?n%vau1#Njpt}I&*oS<@c+~ENAZ=4<7wwk8}Mmtce`^Jl$ zuJ(2#o8goe*X3*}39UKtyX9I8zu>pgan7$BJ$Vyp!$$YQ$UF?#y4jiQnlh3OM~3d8 zOwJ5TK!4)=NYm1{I`E#J;FY0$rotx{oM3^)EWFN~K0(c<=loyTeZ0oX`ipjr zYZze2rbBw?TquVSlXGTSfiF64z!OUKc^}jyTV0Gm?8n>TXpd@sKQKBvYLO9+xT)-9 zu^0D82|ahSBlT~EhjPpzM}UzaxKNIELw@Qj_(RPNdX-8x30VGv@EmZI@%A(W95m>( zC z0yjn-v}GCTpnJ87&On9^fJoT?pwK3P6=$3sQ!;;{by|z&^AxAwlcvXbdn+wQ4rJ8+ ztPmXs^QEd3pa!$*DA2_0568%U9IVv3R3#Pf)VSRuiNoD)ftF$JN0 zqP`Tp;YewVik-*`uVFwK6|YH<<4R4R2rp5O_Hl>05oKIX=<+i}b!O*&@D__zO;`_A z!WJtx{3g5)seuP4x$W?$k!29n#MwbGUGa({rFjth9r7!od;;faw2K59gbf8q;$q(@ zLVL`t-YIiMsMC7JFc^%M^nXQma;i1oi1BO->$1Sz`jX4#IVrVZ%bjVKgAhN^arRJM3gS?;f_1fPt08gj>dKdZ| zG?6AnT<<$m(PL}c028yBb{8m7=vnJr1R+3H5Bgf;?* ze9aAx*n#ya?ScnLFsqI=Ae!uym`G~X&chbmjShMHBoB6X!%}NrB(Z5j;3E;&GvSfA z9zy>yBARukbmeTzK&3S2whQbXov;WuYP*sugqAZ7 zoqfbi*AbHZFgIq?6DXM#fP8+<*7DrFsqC)CGrx<>K#qgyZGfzpSanGHDkQ#y3@KVq zu?ILH@;vk$#l?f67bUn@Urfi#01~CLt`$uL(h-O>j16zX@*yE**VbPz$;8Q_zTJ0R zX|dnv8H&89FOMlHD*l=D9ipLJW`1uGE1umD*~isU=7RmGIX$JbQlUwk*#ty$H#*Dy zlvTfG*x^BkFLPsze1s^x8Hg=_56V~A9K{MFHmPMv#{JG}T6wVs146ahK9_smqdh(} z9o5EtJeFz@g%a#xYCbpUQ^_bfIp*|I z-{|)z%Z=~EgVcp6O`!Mkx+RY*JYI&Vil zKrER+(P0%j5d4u;6)q9n#q(5>6*}EW-{bA*i}|xsAe7|0XgZ*rOqnHVzbYyv_4S!R z(p;QIzRY8JT5@!nx3oh5+mCG+OgrJm5EGwB8VF9z+oUs`AhJ#8M9_(zJx&Y^Xibro zl1k_>#?CO^B=GCjTd4fpYA`nFfhY@{PCGtd+-n98X$su2`(Nxm0TVkUirt^N3zksM5#h7tdxl}*6Sbs zR1lY+&K{EBxr}#Sw*Ag9=FptUsam)A*y??F%{#GBGzaIu@zH!M>pI~#xmsELBalgY z?O}EtMXb94kyUX<6JSiU;HOqV%@68&Vk;s2x9VFV_}(W$NnfoCz?-ZZ*mi5>1iOIF zibD<6Keg}R=6B1sHwGeD7jv+n)=$1PJDK`v4az%qUcx)NW!@L9|O=wZ+%n6f5Q z>X)Sbv$j!wbYUbkTTbj^!Z~JxQEh;z5(@STZt8_^()$Uay!g0nRMDgBhFt|X)UE-5 z$b=AP3>C!3Jp>mrY%$UX5FW?t1vRKl_~rk5h}?gV<^iuem%?K*3X}w5ecc8rS-7lS zNI}UD1HnSHgt#V=Aok#JJ2JV=KMVLA`>5`^AmXRph*>aDs-y-0ypUi_Gcc&z=g(f= zGCZsw>X7+*;hu@xClXUYQ`cc+f;S)QktZy%_7)tnfAWO4P^?y>@jYwEO=S>-f@QH> z_k}}RZ(R0tUuq!Ce|Axh5WKf1#=OhvbaJODtKZ0yeDxenk^7xA#25qcWC&sgVjrQNyA- zQCa4ILl|q2%35rji3E3JG6gj3*OLL{d*aMf(b+b7M+K5%8#r++vll$c@Gx?u4sJN^ z}ZD{d0o^am*_s3l9TX zMW6{w!Qr0@Ue`G-T#N^72R0Ni`TqBd>BV#dKU@ns0i;wFUmbw8o%01%nar8vu5WdC zdsFy}D@gC9JqswIykGSNFL5jPp9Fp!YUEV51epd`LcA}T&%R&F0KxU9efnZ!n)c#j zouEz?m};l@GI3vO-Mx~mMy4=GEt(J@S9${@~Um17z3MmvDC4l1P34B+h;x>j&v zvbVRlx>1#`q{~=pRe)q=5&VvkEwp>X-dP_>9tV7gbTUbI)Vm2@+l%8#BXIFl_BjHg z=14v1Gz*+bIFa(z6%|47jA3b(Kjsaw!R%OUX&k_PoluKUl!B%Scwl~;vhFlGe^Vvg zIvcXL{#9ZH8Qi0 zV9ha2zJ{&z<*|#3WmX>R#Vm11XtO;mbCuv(U%iIUH$FQ*v9k5x#~rktXvVCG89xXR zc$uqnkvE*XVFc8sA$;AyNY*g52 zn2Qz;pQ|+ZxZt9fk4J=H-=L`j22lVr75v@mMdgFapf5gU!p_`(-=*b%-mxey+`!r% zXC!zpdM4mUd%fNselH>i-V^`+g}RuU9dC#7<+s9#f?M&jBb|*RN7|PoMMVipPR=j*re(*fRWhh)gDpJLQdEzL0@vEU=IQYk4V!ulst1lgN6b| zHaL=Oc}xi`?Z+K0d1H0rfwO4My`P*UFa?k~BL%X~42?VKS+4MN>oB2w>*|(0GaGd-5VuQ{vR|_|NRdm{Y|?Z3^M&p&-BH|5_;Q9?XS<4zo9OG zs%6?VpL+lTHaw6MS9b3LG$K zH@90>D0ucq#{S=Y$oAn;QPbd;J{nOdae^R!oTN4+!R!v~!K7k&Btr%$#%&OaywO6< z+`Wp&J1Rg_jc23AM8P^c_f}T+$TDjXf(QcI>p8kPfinV3++tZKVzRqD=N??# zq5uF5uh*>0mGfZOGRg@Vwu62ZoE}fFBMqoP=#tyt+7gLl?zL?}D0EBgT$zY+*~dG4 zNc!B^#MyPE3tnE!8ssQESp7cVdrXOd(IWI{M~Lz|(44&+!iV@T z8miqKpV};}{4>75$n#7R$x}G^@Dc?No1>@yQJ`efA zg^L%JKz4ovHHhP-utZOs%skhwy|YcQ_#R&6hobV2?zEinRR!`Qv}7k_xuK6GgW_$U zw7p$hUZHlFTN6BzkSO{(xELs%y9TH>lU)@FAPD2XXv7HtqQ<_a`i-ZCTLvi>Vp0#- zM$}FKkYWS#HYARjNVik(JEJt-O7sFDUSHqfX)l)%Od69d1SP+Wx;m3^K4v?nVWM_& zoKh7Um*fS=OU|rwV=EpY+W0!cfg)SMb!^^@WL5`2xzF+ZY-gZ97H;#=n`iDuGcAIU zpT#%;TOr(5IjUm^J|wbGSgH;)!hI}VvLw!q-Lx&;a3Z{mAgbEPlOEPkn2r#!#Emdh zWIGj4djv_V*TE?`^#O!rMVM%unjlNnh?#{h+(09e7NJ~G)lpvjAs!x-6_H88ZSXOY z^VgcPsHmu{dW=6`xy2o%qe(ffzRX3il`-2Rj7|$rAn}1MP?QBamja7)AvyxiY8Z{0 z8Le$@tM>$eNZC;EMr@s}hKmCSkQI>wmz5>@v+^bJr@bJ|gv+6!PTuY;fB`@o{+L}&wrHn9*R ztgN=6SR_*8fKb1y*G6p@Hh)xSu+m~|cDAZb!}N;ydbT@qB~3tooB;UjMOy8GiF#vPN}TXBr$ z5s`&xNdNbH<&AL=wrSbP&`J+Hq)dQ?mbF2&SioAg-g2ZDxpYkX`%rkM#ffsSg#*?x8p|GxbvQELzE)6|hr>cWw5qy>rB?i2!|7`V z1_k2D-GJ1swlF&kudNsC74;!|_I&F`FAY4NNX>Njs^7aa%pms43zT5ZZ{qb9G_EsG zbI0*|7)mPl!*c{7<(?;U9q8Whj;hd1b8krENl95}$?~AfI2L@-#VY1*EvW0Fm>JXg ztgda^v)cnBT13Aa3a+B-GaQz(vbtUOjVoz33Zi56z-M!mjq>?!2?k#SDra(=^|;xV zUa&n;?ZWD2n32_B^1{}-v_;tAK~kvL+#FXc+v`0 znt*rI7pcLKPB*VpaF@frI4~sPB(r)-N0zngRG)b5LUKG=aE2*-8#K{ekYC7%zI>yZ zWAB20{`mJp_4*G_a=0RB}d)}lKCG8Xl8yy!sat$w2fj8Kdn&{+Lg zJ*Q0|n3Gw(3i74!_ZA=95L_vE(R#fnPrl_UxUd`AZL?pihAEf+@xWri-#t~}1>g(K zMd`lsZz4oD4(=VheH$QnQzQ&lrQHVvzjxK-2{c9WP;+B*RDuRcsE@h5LsJNaI$tS& zr>p}yAD=vkYSGw%(oGYdu9zJ<&CEuyQkO51rsm^d%|$N^HN`%+y?_dFwD+)k9S?iX zVXeAwc9ei7`V^A>$%@p>2R63Ri#IZ7-Dyhp00RZ*3t^jFI7r@`d=QP8|jkfoardl9N&yz`C0)#S2+p>tN7pWC-8uTa#Y*LQ=qc>h5+DVMryxf%Ti z*{ez>1T5NlWK3*JC4EIU%(=u3vf3GNB9VqCzy@@yqE9tEfIevl8&8vh5W!GJuY*z2 zkmm9GfrW4_N_{BJ_>cDR5*LOhzuDaT1QgRSvwESHrf^US%ER5s`E)0q2i3U)H2 zLY1>_i{9k5>U!U8tY*t4=jPGwj<2E<$F3aoS>7Cg1Y{9?32UG(n*Ybcg>uSaXdyuy z0DX5XzqkYQ8-v>`o}!I?95ROLn(|GM9Fl;`B!Z-g)XV;QCe%n133l@oFR-`?0~<{M zE|h=z(F<0iZoDiFRHey7gEI{tY%1J4=pL4VD#xs>OfG z*9D8@%gikklEE5sdlE!B51O6>syaJTK{mij2aR#w zGEmjbb|13LnsflUeDfxC=1@vdp>LYt4(P@otkopEgX1D}D#aYiUpDVEt}Gu5^Zq^AFM z$egnU^rAg-puKhO=1b-Dp; zYPvwUm_pV|{0*0el60;XvJZy6;Pm~$0$O~9^H)$9bVi!%FJ-n_odlR2v0?0*bJ9fPv0c--%XgKhf+nF_)#80|5{N|M^l9^JGi5|S?Gh)FRtunAq@Wc z)CyI|^lAJBndt1>H{iiX#{4OMd}f_gDuaC#`O6=&#yVXanD$(_0oX-4&Uw(FGZJGe zBcb?o-JtDC;VQ`I$R`3+E8G(0Gb#$8E!u|31rV9TZ0BB0iIl6sTLY(^+|;R2JoXCS5%;1d)lHA6MAptJR+0w#B#9jwnDhPn&l@uMrwAUlQ& z!j&cU{b!O_R<%i5GBo#`isn~O*uQPfo_eXY)ueh<8v>egqzkZlv&asP*aM1pKl~YV zS8FYy8~l(GIKybf!`FfdUCcI(voom`lr&o{*n(lir_J%wMKS^Lu_W5J z*DVp6(fLRmIX7BP1f>?5T9B5AgetE-)!1d*H`T*j!o(C~iEZ(<@U12=VCVpW3Ly10 z4SnbzN9Pg()0uUviaVrIS~q#<5ilTN7InJbFc&a&6F_LsQwNq^xrUG1Sb)p^At&ZE ztf5UEun1#UNYK1_Zi%=kOc`DRzcI7o?66X!9=#8S9&%zV zo3>lJ{afkk76bB|bSzkp*_M>Eu{DWj1?Z#;2vW4WqX7rG9BF7neZEuMF;^QB5UF(0 z0&BJsy)q@QZlN;YPyk6`m_p7&Y+echz|;FsA$#+F4Q!)qU>`k3gdIu&oqsp|Rn0{? zLM6=X*q`5Fv0sVd28Zbp!-3Xl3ABR1!Na$nGGHhvogOY2oDQWC9c&NdpxgHSq51$S zp{7HG7u8Lx3Il^P5@nIo6dgrIQ;i_Bu6@oY$(9RMI}rZOpN3lFRtqbfU}Gn3Z0l7$ z}yGdx>00O3evAXc_1Hh=akUUHN_Ib`D;kx3JadL zWkEpm5qTg(cBVO@?AhwnfM$U>J8&%;XE#=f==6Y|$|W69^*Bdy%EvT&jHOnS6{Lkd zvJw+}!4-XNZ~ysQ9gG^EoV5H9SLO!HPu32^QHeM(Rt*y*cqtT|@br~-C1ST1mXx*S zU{lGHf-&vo7!=4a9G%>OR7jVs#@Vnq5pFF)eP4=bZ_$ru9B)l!8&pw=NB>e;ULp5l zuUGUph{iPBq~M6nNk}i17O!*)Ed5{Xy$4uUXWA}`DJC&72^v(y7VKDv1+Zfbpkk+n zZd62&qS!%7l8FO~Vxg$0sEE=&Q0Xc~MNm*s1jLV~AQli1kuI?Bd#S^6X3pN{zxF<7 z@9Uh&^CbW8p;*am2+0(E;pwlmDoZJs`KX zY}%{j;JVBQ4g)sRU^vL}=8qh}nSZtDNj84E|NOyQ^a{SJgyfcCiaR5d);s>u`Lw{k z%KwKd6Nm2CAEwLAV-bZp~}NRf2H?vx@uiz^j0Q(F03HQ$z1k7k-D`I8~5Z2tI|M@GCXOj6#4{8YYXOmW^{hvwsAZNkMP{~LdT!R2N=g6^`Tx2$E;+*ekq%b# z; zepJ`D)t8#jiirdLL*QF*`EQ_f(3M^1-D1&l1skmp`Yp)+!5(59!5OXDh(RBOB6BBQ zmmI~u`_N^zZxZKgP{T=KCVYF-&%+DKIXE;k$v3i;fR=GJEGXgklF5=A)Zh$>xMxmJ zIFj(~5Yo-ikw5-GNH=B^{^&i^?|60D7*ryHV32vhigRa-vQ?= z@RbuDI8vgPwtWk&7~|?O2p}@VH|G_N6|l-wopG=;fp~`2-QAoxn`aVhBgY6HR5b0I zYErOMuFdB*7nH;S@e#%VWvjP{ufu(y$B>M9qnVLw7D$JBSO=$}NwGMW)DE2;VXDb( z{t5u4k2gj3rm_F}w=>6^gTwH0!b7+kCYKAqCtYiY_zC4JoCoEDA5uCL>?yEqq^QSK2=mcKo3TZ89oBd57A-g|{I7BWPm892w+2j{TN zMUx;OOG_lSldvR1OScCY1Yi2{DAHr}tpg{FMf7E_bO)>tYRzoK30!Xtfpfz$TC+O` zHvB;7CSab5(Qjj&fD3(i%kH;W@%TVTIVP5)g8q6%3m+s6zYf~|==a!yaEK|5eru&> z0z#zl*c=+uP*-J!)ZnD$n65jK3aEWJY$K=%aA`WYitv0lULZRArJTM4<=cAEZGYs( z>(Axzm)CbN%}8ck-}U>e*G7~8N@o0`5BOS%eam+Ao|CH}r1ZvW(swFvebz{yW%yRi zVAoskP3o7Z$%>gsp^8h0MEER!%S&*c15uY4bY(u3h}*=Kyz$BPp;B2{vrC&FhE35c zaDa&1WXrBFmMi6lSMT#=!4_1C3Ps>HfMagSg)q0Si_UoYSZax@KWklL1aJD=D#Mh_ z5IMMdpe8X2ZY+EIR4v+u&y4S~jc(`z%kgmD328(g@~-cOng@$~Uvn$*LEsvJk7?Rs zvjH35H{@?}9D37F?!A6@i{zp!PMM;xDYn_zF1f#{(f zX2O%z4i*A$_KtY~=92WS=;botux{MLJFOV9)K74IC859-! z+LH<95VoA&N~!=nCd!O-yw z@!+`imVYbea^M9B{_$<^kqi52PqE@u|e_8LWTRM1V5?QAAVFBPBBXf20tFIxh zXDH2)TB)uTi7kQj?;zB7nFfO~Vk6l$qHwwaA4OMUTNwnfLJSxV{qWdG)Z0wMrgHsN zq6lk}=^~p0H`tndhHAmw`PATrFmDchXLJ>JJEe|zP5kdZ!eWFdog$r;=SY#uE*L4t7Ww-n3_B)U+KD)9JoR7~h5w8$fmar8GB?Z*yjGKc zm+n85m`k1dV~>OMnqm9X#jSF0BLU`Sd^)}Hii+c&baKuKrjdM)iEE+0=ByT1qyF22 zLwTao%x_ZgNU&+i(-t+R?mH=wp

    REJk;D|A~_2Hu60}I%9 zZ^RRlj%cET7rJ(JFqE3cO-ZuooS8f4osfYVRrY;Q4H(MKD*NegVCk4!FT`C6N?jFLmlbKe1I97pD&kPUiQ&$1h(Acrr|t-u0_h&@p#HaJh(^hIy* zTIdsVIt`wb;GGU647%)+uOFO6lrzuZ;`<^1Q?xZ&AsW}KI5Gb#O2U&Q~BQX7#$SmSM zVeHLDnb;cOq!5ZTghq-9A|Eqj`Uh|j{qe_3!z%dd{#BU?C!uNZ!0xXn)3^u)Hqa_a zm)7P%9N`!6o9J&tE~3Aw1MMy(0Zsc?z2;CH-v%|%>)iQQf#`h1XP|H{Mh!sF^4rnk z@2kW@mIerR*-%)2R>G-L^iTa;y*6^=6svz|2Joeh12*t_sOUo$9)qHaiM#|*ymX7Z zKn|r*f(Z%gzf)ifMkF%Gc0E-EHu^xm0j9` zQ7gDS5ix51MLKtrE{gNG3oi*$&sX9tN054EF8xUA*_ru&rqmM@@m!bvFva2aPI4%{ zLdWoXv=@Y%hX)ZQ;bA->GG`4-Q7Lq9XH+_|GCtYnNL^$XrrP=vwZ>_rx0!4dT@v?Q z=MWEQyM*>mW{)#GiY$mJMt9b~&Qi_*p^vxIjtaYk#++Q3(F1hhxT^?(1gO1-E2_%>QFaixOghrw zFaGfwA&?RGL^vw~$wBx#fd8Qu02Z0DpEjcgdI_|5BkGL|NY&_*^kN?aMxi0GeIL4p z@=ZI+&|9jatZGhd7F82Bt%&eXWTZQrEURN_=0j9*zg&t@8__cGU|}7w1Pz4b0p!%r zqv;f*over`Y{~80z4yRbR{2#eJ7zkeg8a@}Z>}qyjqhP#MG}p4GhTJdQZH{0@|I>+ zvqhKl_0QOPA4C}S<*d(ir!Hi7n z8!iI#BFwaa(a_dco={j0K;;RWUa`DEZ&eq*-1wIu)X>=Sxs+idc5X7=a zGcXt1e6eaGP6e-O0pvmO6cmO{p{^-B4q8RY4AC}|8b?hY7I=1;{?^k@W?|#kFks-{ z{0!RrtT4D{ZNsyd8>J>CBy11Q?!HnpdT<2b6%ELn87IIM3l4givwJaAe$5;0hv z8b`?`JhXEKw=Bf^AbUCW#w57QF21B6#Lof8MczhtTU@B204%9-1ZM2I;#m?`3cV@? zFKS5bq1nva@zG7-(m)`>5LiGVdmbp;HSqsaytcl1>foi9khbbBMO!829qg=#^jW8fAWOw-TyAg6H3Jd z5YoaxtQ($HxF}{Wfc6*;i4NR~v;uX*g6_3sJ%(2i^kZ-%XOMFunqcXMPtB;^zNLXe z2_YXONu&i%jeBR*Vp0fQU$ua7`CXHLd*lNC(CA!*!}ftFH3S{W`nukK)>JlE24Yol zUKO>MlC2gZ?bR@EwmNVT5V`>>!VS)Fpq6q<*|E&zvh5waoCDoDTQ>p)e~|uBzTW`o z_D@C<&J(5t0uBlad7iSPv-kBEctb-=)wizKpQ+Be0j~%HHrYGxQ?O4sRckJO_hpqX zsY%5guuah@6ZE_hP%zuqMR#sRY4Q`K` z7he1JlhY@^@%?eww?FLvWB2KT3HuDHpI-~xqQ8FdmMA5IMSHgBh36SBDk(GW|Es3y znBAYR&|cBQ(&1s&X9tm4t-f^nu@NKo??3d)$;@zK&ZfYehT`VmTfCDS@Mb5|r4v7? zWttA;e4`0lGbCrkp<9!GsUA6Y+&6)XM+G6}ibx1|SYC*tm(ijvC$C@ZoJVt@*5go# zRS;WPUv2WZ`DL`j+tRbzP7e1|ohKjRAVnc?0ZFrlayvzxn7edb)TJ*%vrR5meKzWUzXeaxqQ{+8?%orPUo&bY@D&ajIg6T43$w*8su^s z*LU^i2vi~O3l;b3rk4)|P!Sg7BfYX51|2JV)kg6pWaTG&sfz^9b8D85`?QwO397Br zX{oJHHuRzMhG`b$pU%yzlk;6Lg4w62Bel|a9y5W+IIuTDXsE1fC~zAiTB6301)mqg zH(5>fi&8@J#z$8e34tDsbKb_qT45Jk4eQW7&~$n^d!4@^Feh%5N%Xn3{BP1v8#W#fgNA&TGks>wq{J;4kkbv?`nXhqX-cwpk(C9b@ zym3RjrlXdQm+|nR^LY@un16F!Wn~e=wOYya#DWoucm`0wD=_LHH<=GDORActCJja5 zhkV6J2_e00EK@r$O4tLsvvc!cGJvUdH`Nz@$`d;KrW-Wk|7+1aD-dQtMe&u-CSxYjU{2&31#?8M z81D%w!E4S|Tt$Zl%pl{)OWeG5Yhg*GIl?dr4>3ZAM-z)j4g$Kcv-S1l9P;E8c+iJW zg&2Q|JFupvW~kNc*RRW=efOdo3HdB5f&;(nz91)6Z-v+P(B6cYBE6~|l&zqQ2%cAa zYt3XuMMip}{9ZUa9`|KF0CPdrg-U|uK!j=HWFTSvz*=a}+;m-D!T#@fo@f>spk{B% ziZS#V9y{_PP`)e&&#)5obkx~gn~GYAY|}G)kmF3VbX6FRev(JzZfZO zP)Al*nDdA8mzOl|)UhgY)&Eb%+naqp$_rWZ`9~gsiys#_Mjyk=5yL*3+Gp_n22tTP zY}`k}`TcUQ!<^l1g}M!iM*#EnZ?%$S@;xSMo|c=~qKhFEKtBzK*P-_tO{D!-6fnL` zi%0aK7)wc<{Js3XUH=}yZ@a zH((uBhyFihkL+~B2MybNN96&*#l!vV@iW#I-K$e(&EIi4r@-*VY18sox2lP=N7kIX zQZ?kHlAsF4-hQs+?uo^t?%UgyO?z$*6*a5(C(oYEx3{lV4HV50aW@{YVcDod^&sFB z?FP%D{6LeNMyiCPXJ5i-LDtXacQ?Wy^Y2DQhOnL!2i}jpMIZiOWp) zB|Pq^Aa#NWEeYo(PwxbY*!#VRWr1%EiuO$I`RUkXek>NP$o)5PY`6ddJ%GJZQ71k= zaKp8)xRdBLtPx!#pI{u%hK!G+TT|tu_z_Kf3TH(3PG%@y)Go2{B*eoQ(FHiG1f6V= z>-VCUR62u~oVlgy2RvbtID|6Ve3U=(It-TshAOK(bZ)x64~c$rxoj-*N3fEt^3XdQ z{hPmo^m;zTR>Kt)4=m8YRmA*j6*x-5cZeo2%4F)`FnnhZKy8`lgw}Er5EGkYqQ}tv z4)?CGYu~|IK@~>@2;e0p<+P8d6RT$Zxy5^aKZ*gUw1KdN0~}VMx@c`}y@#wY7I-vG zToR=YxEqHX^epb{sX?bH{@$B=6HY@u6@y&f+!H2gfM*JKPK$_v&VrGzRqIiF3Yvyz zs>y@h7S(`s!uIbanKN3k9Oi^AY`jgjq;m)eiaxt(^%PRVM608W3{2&)2{aRZDQVi%&`+zX0ztze7pL3lK)#0QC9;>jfYMH*kRQ7(QVF z0l*-jiqzvFqOGLgo$jk3gKA5Na#?qG*bv~EOu%EW$0rfLdo2^Zw`G2g=7e#$y?i5ELCzazl!b8z5`!!`0dOgXQ!th^ zESZ8DEL|4Vc(<^f_T8T<hBc$P6+rj2eTcS>Q2a} zDjbdP3VbdP586oL7N)wGTn5BLfsl19Z5D)WP+W}NxC|C*p?n7{7BkdCkhL&S7D;S+ zl{dTJeR*|$m3uq6dE6dX^1{PYlj~<3SfFS0Ws=gL4JOreaL|o(50rk@5X%53@ZNl7 z>sY#@=m>!LHki8o#HMC+?fcX_TVkYis{jo1oh&LHY@<*1z%rsFb&UCzL*78p#sO$((wE!3w7ukmY>?hv725}cmrZpg1QU-)UC>FKzmW7 zLO;lav>~$c)u`$@d7W|39+=5~GbhIzv!AQvkJ^<(qq-0AmoZYv(#>m&iqy!H3|CiI zM?iyb7+hNGt<{%zzrKv#VwCxiq|WI1?V!UO*SGI_fG@nBIqUSp2x;J(^un$}hcwyd z?}Ho{{FQSnD{Bv0We zHqxEL39Y~m``MQ*g${FKPyr+o6J}IHoUintwR;K7ZOyx+P^^}`!ugkk+1Q6zy|XM; zS*m4}45=9d`gz0Yw4)mUbH4@52u0;Rz#sykf|Rk{kVvAJVkaF`fE{4MjBseX!Bo`YN)7N;1{YItA6CxcpT!Y^7bc>E`!D`!@_kb;cMJ`Lbi1(zb6 zIVtmIa{v*{f7Y=C3G&18``x($^^av9g!$nUW{4^xkDH;jJ(P;TlITXM6_*Ox+peyz zDoZsjnO#oBAf0D~9@b1Ys!gI^2e$nf92gQsHMMPf){cEKsug^Y?L8yWC7W>NstZQR z$;S@gH|_MmQKwEjIywptFGx)_mM&d7-u%im90tc^wY^&$?sgD7<@VjxQP$- z1SgQ2tICpJtS2TuIS3P>S}b>bDk8CIXKk;;{8L)HdgWjlDP1`ikrd!6o<>t?SZ%*S zodHDf*lUL`IWjr;=-E>jRiJdD5V#O(c+Ln9ZG$vdmWmj95rS(cycpF*8)yCakz^Wb z`vt~_QeQ=S{oS28;wB=~_TM3Vf$8m@cs*4~6FRaqY&hn#KV9)c8fT|K9-Mj~Nhcmd zKlxgF;7wYRi=Wm)+9EA)P|e4Z!9CXuJ}xT!AHKT|)YW5b@noEwC{*xBGjNrzi7nn* zBWDn}umQF*e$z6*w^M9b2b9ZeOHFh{UD?T<_;|Aw%dtc}jvecAi2-~@8QhA!eOElB z$&`22*bYaQW$i;m4&F~zR#YVN1#rz|DliYY9uhUhba(93N1YM}7omrZF)98#DihA< z;75`|TXeqwv8LEZxE zADacYNn^N$=Hw$0we=yN;aX*e;Md}A&jxLm9fIJmEqmHH$yh#?OxDNG<+ll=`*@PL z5In=iH#?vT+{4bF5-0s;58GhCkd}E+h?ze<0p@T`9*N)yRq6n~yo{;Vd_KPWIX~Ht z!3Au#d3LHJNRFqV-pvk#+IwJorp=%n>)Q+=tWMEtZimc>R3b-aaX2S4Ad*V=X)**^ zYx7WXCG~3NsN*lwnXk=Q^T*NM@8sT<$+~wQis&geIP$I460wuB4)wy;-MAQsTo8)d z6?jAKh9g5vHtjamxz@cytQWEFF8EQ^xFc6#V9fNzYE zPRbaBQuuVS7|nXYlN5k?=&<&l3V&oM%&S!}cX)dnYP9etqqd!Tmbnexm{lHkn>)2x zI116%Ok(2KIU7KWOJ+f|Ll>w2E18H4vLU!A76|n{ zSKQ8-<1M$Axe}kTu2Vrd#96+*?hw&j^I2{n3Nxy=SHXEWvbL3qbqDJ-iQ%! z!TDVXQ%0dRyd0l#AMrG*DPaa;+Nc_O5xyFc|w?YvC)cA-i6 z+du}Wuwm*|(~8XEteq*4tQT7m1)6HDLu=@~DH1pI%e&Rqv;DdC~3HN=)m>d^qKGLqoW2Br4N1Tpk71s~?(R z@DTLx5Fo)&h#~iI$JQa4z*pS=6wU0hEleNf2}5is;VcQ#5Jp(@rBMbMl>~KPjMS~8 zaUgg9S;qa#kFl+fslEScY%A<8Hp;3YKQ2)`;DHnQ;-w7Hd*8H)#r#cdP~Zpv?}t14 zc(gctq~yGqkCoT+;rHWn{630_i_1a*D0-z~r~S$vB~P$*`o&qbc%bOCT;4YyION%@ z4PpBijL`YLsdq=5rbYKJx=TX z*mHU>26cZBnb?I(u{9jxbO`&T#sW&&g+MqUSF*AsvnV%$YR~viUs2VaH|UdE-;;QH z>Y-XEw2hWm3zwSycz9B&kq)b^7rl+mXFZy=b#R_=?>!An43h#DjG!*BK4@si7=wsk zSKJZ9s%zBo^LbdS)JhCTKWzgPVlTWGNdv_R#vuqv9ruu9?odDgW1I!5+kT^HofUqA z*YN|Dh3~cO@(q^RP7Bhp8Y)*)%`(c;AN-)Gt zIZd>MF~~NtJ`-iZ|}*WQ*UAD zhIbKepHG5#01vQhiNc=^?9Hz^*qCd*a^HrxhP>?f&@g620M{)INLy7G)nJX?eR5Uod`B z9vIHt0#1a86Q3O?(=&0(*zBefZbE!<9~P=0@|2+~eYedn_*|;ydK@wecpF)IWB~sR z-?Ulujsvgut_?ss#DMQ+y_#E$W{P8z^7hsQl;$RHJkKntjj&D%;g1*yU$k?r=y#7U zNKIl9D-2_V!OG$WKxysXaSr?Ojwo^kAU6lOz#)cFrE!BqyvfP*z8jRUqGVEF_71W{ zf=#bB0qmmiPL`@Jc!Rc^>=P;Rm#_aa!Qlx*?IEE0G2MHp#j5joSkD&VVFcjTpV2DB zL>cjKn034RQ_95t1#uMk<1bie(sQS%IQyo6HIgyFb4It;04gq!026m9UGy-A=*LyR zONvKrF9cs|tSOczTc1aFJF~2HDXUisI;!MFJN-#N>BLWK0Z{CrsN@9Z5TJ{$>=lQY z%(8t9MqtHbw@I|J^41pVr@BBMwGmNt8Pr`NL1UcZd4{$NBcch{+XRSqgO$$b%|0t8 zP0bFr7@Z133?~0sqfkH~gIgmbEvp8Kf9JAJ_@}k>kkR7K@kNknW_=mwaDL-?mImH{ zVJ5h?Tpzl2mS~O7%@4)C!ww#(Kp*O6DH6$B+1``&27JF{;F;aI0yLT3^ogPizvAmp zYx$+i{Y>mVU)(?EKd!J8U@0fgmE&af1}ScrwL(-R6)uo`l>k>(mY~)K#gZjw>j1Lu z?Mo~5J%l=-qK}&}UqlX`{7hxeqfS&7Y0T{(>e}*~=hY^_IL6piZ=xt?ZnUs|f%FIj^a2N*TtH@|3zk7VFR^z-@|(H0Y)n&K5dOcB zgWGtv;pPVPj+?K+`F8ZrlF|71S036xKn{%sCtq>U^boXaSPzJRy(-AP0D+l)v>ukv zQBi#4it;BNCaE7j|NL{yo<(XAm$y{+BVLGCNS3Y@t*TCK!VFJ z&y)DXhg) zo8jCSHnyoQs~`5W6)m%qHe~(RDDwYJ$K`+C_QU=Be^CDLpNssTi;OGq zzfw{2Us+P~0LRG$90mR^C~DAVGq`aom}De<38TpvZm2K44DJ&}(NL`$-ndIdcL%X9L>Qyr(xq;0@VF`Tg+XQph=l&@jkG z4AW20iNyT-GzQ0n;iGYMI>AOp;QCzdV$J|PFIvp8-bX6|zJs4Z7n)dkBGAmYgK+6s zn6lZWsT^F+f!~089h*g``DBWb^rAqLGHI!Nw0i48VogjQx&f?-^ZFrVeqh z@&b3U1v7&=ReDtdbc+ZQ^r9p>xxJ+BY(7g}LbQLiSumCYd|nK?>ErzV=bPVh@q!#@-pu$xC1iCeqLPIy5otGYOA3-5Yg7`jSC>A)e~rj$Ok43G$r z&*4Zo1ovf$*U*?F?(JJ`q4VF18r-%anQBnt9Fnb^GK>szoyZQp0_SW>44~BnqBr(I zC!ffmt$1GtRnwH8(dkzZs0!yi8U}zxc6`{UWn|{w`mFfj(-Rnu;Uz`>==;=01dF z_BtL-`3xYsTpLEAAYLHiwu=Bn{_w+@d^RhNWu?hi0S7BU9A%BB&6#+}U)iEszjJ=- zal0z=S%7+Cn71?>d&V^@23rQHf^(eQ?DM9RSL1Ohr6|;sNFi0?;B-0hP`Vc79^2wV z8Jm&^9w}PWYA}rYl{3@>#M@7B-wjp)Z6Kwm*dX+x_;fT-=>03&k&t*%oI|*L244%U zidEiLB$7Z%fc_VhOn^Z{^^t=Dkq zet}_TIIL2eg2C962Q)B&Zxx4-!@V6iQSOA~;}Od%t$b)9mlU5yw-ZDVPJcBq)r`HW zc0l)=APIvv0<4e0j#TcE!A%o$I>VkPjbz6O@kU5YgXn#WxZT)VfG|3-0`mQQ(C06h zpyM~tFRH3k9HJzX$z>NNxE@lAws;j`z+-L#n~+%3u(~GJ=p`X&uU5F;xH=t=SNhJ zusZfANfTO0T4?{uae|@+nw}v}>y9`>j&2gUN%b78G{Xm!0r({!`B#nz*zH`zHv`Z6 z?lGe*3;lG|-RYdtO7XaaAvZS|RQ5C(UshAtN|;VUs&rn51 z)#+thsJY}=Xu9(&A|Qm>9NnRs59%CD#!QZfSQOp` z0k>fOL4MU>4pX2gPnZG=qdcX36;GRBPe|h%PM_{-AR(7N6aQ~qg7Lpm0Q!YGm`I;LaTKe1RcM;klhfeR*ICvnC$<4BYehJAx4dtDg zU(3iLD$t0k;eZ+6@DM0a1|4?Pc2f7J(`Er_F0AhsX^dk9Lbv)z&erOJ3ADWlu~66~ zeRD;Rc1sk`fhrZSkDwX7zIAFl|8h7gD%>)cr5=yC54MI|MC_YvJW{ADwO}+1 z4;CF!wmCd1ME`hv>>*h6F`pp!U+H-M7g`to^R^#u(SHDv4_87)=6^~t@c)w~CCBL# znxN^i(V_(9#!)rIb6`38;-xNiOv=Z`qgyguDA|lgfVmZ}ulvy|?9U$X{7vQlPf-AN zL=Cj0LAvLbQngDa$ly5ZDr(H=$)fKI?q*L$k#ICKXL#d?H_RO!dKg6*<)gaQ3?qUn z&}c%mk0m-5ra2QnpxG6Uy&-_e@1a4a;Or&f5XP)(p%@Ey&T9eDPyt3|H@3R@7zWL8 zstn|yvn>xbZg}k;MxOzEA5;B*K?7!i~f287MKK zDp7^Wx)f7XfN7Xm-vL^VW@5e+Z4>&>U!pd`Y=0`SNJzWU z)X+eOsPuLl?ZG&gyjfXjq%oimwT3s45`*o69`j_=tq)R}lV^pInh1#UVobtqdY5t2 zuHaZF4|b{enH$_dg8z|v*R=_=%TwY*zJm~feW~DdfO4U-Hd=+rI(XA%64rto7o~c+ zj?z9>70h2R#8s%^Pzh8tQ#+Asaqh38_yPulP+AA=qrEW20fl}#Q7$^^8L3_=)s^Q< zf1%#;GohSdju}BCC`a~`QfQ_JI|ggjN|yvwR1Pa)TF_Nu%I-6Eu^?QQGDS8XC6s=rdzuge`G(7)Y7wBBgo#2sHv&N47VWuJ0TyE!_sV?Z*hkxIMSi=g?_HpFAFc!<4W8>lnduHhBik5 z_;M^c%`!g&wK{eXTqK67`BpdhJ%tl`!5vhhIn=Lw4)aN1bQ#;&%GQPF zRg4tDUVU-VCf2v2*B?bGdvF2G1y7!=>8v}6=VN*kwxv8L9GjNeoWK+8nsACkozM{` z+n10SG>C03JyfTDVKShQ0OKfl4>>@rPvCK!$OQ3YDaKzkyLtaF-!4o#0q0z34*K*7 z01(QsBRlxHF7r83p)D!3EV>+y71&9)>+CBa>6X=V~kYq z8C!H`amPIG(&wGW+r19}snK{`V)LAsGlVVwvbK{71lx8=5Rf?5mArX~w53p;wbDFd zCQfE>{Y`f08Re<^S%TU+*oT9gso<8j!Fi?ByyQ8DTu9sKOJi)sEPr67aiuMP@1v@( zr|08z+~41@dZz|}NpM#r2m*SaRXwr;}nCncs z%(@ytNl39XBR#Mg%(pRv&Iz1D=5~9q3yV#1j!lowo1*6GaNkA2$rOiAXd^xxl}nW; zkh_Rc8ZnK;@wMMsd?;n*)s$KO$C5uQP+u-e8iIX>zsS;-)#p^(!k^e#Ps39t}Q2oyAGLE^#^&d+0CC^m2 zGH`|Eh3FW1B{>LA&|KlS% zof{trz&=>Q5o&k@1|#cU*z1jJXllVlJg?Ipfo-)ywQ=t+C6yOmG+c& z1s9-frRdFaRx?KQ{rEH|Fwl4?| zQmPgtL~G&!un0F)=Hk7xJ&K@zp)7SZPTPPITw7@iHoU<+E-*$Q&>KyO!`2+`)?^S_ zb1N#+UQ>u2x8nAc950X=(}+*QjKp{0#Ord}$ZPE2zj|Ba;I;=9mEkL0zZ{&^xdjXZ zJUB=HH!K_LpI=e%$_W*?%RF3La3OW-0J9TjV~93f%An=wo))fS47R*^Rxnb`uuH-) z?mmH+F(qPE%d5w-vckbH!~ua}gnx24@kO3l9QPOcjL!?6koi)1f=NmPx~QrKbE137 zXj~W~SwoKu7b3f!L;B>~Bw%$H54}IOY-zE@d3MAAI_E67UeO*3+$YF3a?IqWy}->v z1wIfe>?JijICqu7Wng$+qGxXdXD3*KfgA3@Rh&jYC0e9OYi{nxFt7!l&Zso{3RoyR z22QFPH)rERys}@MJrs4mjbXNK@K{uEROQ-0&c|Z0Y#kqHcy@`y3I|+gP?5ck(>O6X zr}z;k^3y z>ezq$ebc|PFRHC&m+fXMmZLMm5`$+AHuHM#=a@<)-tz0x`0EMgR~yI zsfV5ZUc09CLSeq7s_|^R zht=gQ<&;@C-KxSdevwW{9+|Xhb0$DuG)l>o3M^cX8`5tZ6yUlHk%I;;nBMaTR=FN~ zc;04!{ps|T(r|_C^#=C~Ve}G)4=-@;q^OvR)YxRGCWb4c(Ep}|dowMxXO{(ComwpJ5QN8fOE?40g5)mta?15whh$V+bobyuL6t5|wK|mZ-w0Yq*BaW|L&0C8^@-FHA30`rZeY(-Gj4TLAUJgvN&qo2W^z;%%xz#?vsYzoeT`Iq@X4|Q4*FuQ8pCY$QLpH8C-Ie9-T$c#`U|B&Ri$qX z8eaD-&JtQX1X522ApmbSJkE8wrClX;uKvAJ;t|zUir<@teK80$9miwecy#s83QRLt zG}{e0kQUIQ#CC^)r3kdR=jSRUDxTcc_d{`rQ$^9P1ZKUPx5jq48I(=iK1B9{aJ&Tk z^xM2Ez=T4#p!KA?f1;r`R!BN14j%&rY~6`g9*zwY8g{kSFcbQ&1*=mlUuRUvS zK~*;_=WC8_LcgksEf52P-zV7lIPXFk5G+}A*-|*kLpt8?)h9or+(mln%v=W}RTtOm zH4g#?qFa-;A*gP_h*|&k7L~$< zv`t4d!g$;N?zEakH$?LGbZG2CtEE$Gvjp(>#?pz z8KNJu3}+wvceCB0}COlapj-cwBOeF&BvIMk5R6A;uMx|Sg zhFz0#jHHxVJcxoz)+SqTPf=yNfXR$wVzaZd!hIBS)wXgFC1PoOjNKp*;;3W)^>x@e zkH(?LJQVMA0%EVJjY*pjj>V=yduJp;p$D8`=R3~O&*>?S8SP?2KTgwr`7m0y8r!lEkVqfMm!3qPo7*E-Of-rEW2frZ79j7Jw3cyduJ28r$Spbh5fbT9sk9n zDgcUThYyB0jvjKy8s#Y9$uZ}=+VG1-_c)?JajD>6l2d}w!WAdOLuxwtm7L8s!&eU0)qA&sABpfP4KgrvEB!CS&5FZm7xhU1;+7l9q{ ziCRRu@Qcr_8Sx%A4aT0~$)UHf?}ar1Y|5ZDA7cJR&RbEkFdRz=x(AW2ozLVhR1z*n zT<~QhV}|f7g(H^p5>IUHyA$w;0x;j^3e;*@UuxCS<4h3oT43}nB5^VD#4dW(SOBkp zfmvasD|S}pnup*H!t$(;b7O4dP0t@Q;4@gkahxbgjNsC#>Y%<8;GsSZYMk?;&Lpvc z&^Nm21Yj_;#g&)89H;3%Jojfg%(VX6*UD)WRv_kQSw7o-4(2wv^Amg-hk@&9ylswq zqq}svWwdiPnv1+@5DUtz#M!unH4z*In0&yP$(S_-UAfo{*v08o;|ue_$!rkJ4(`TE z2a1d9l0LKXVtqy6{!9V(i1E!)?!h~erTYWG?*-u{E!XxQD4Uw%Ggw9@HOw-+e4lRZ zg7685CKP;{N57O&swIs?XG>5r%ZpW_c&`bug+a^tQqsHU8GS$BF<~q)G7}Xl1}g z?IWwSx&SrqY&d<6@6?(gRq!Pi z4ITS>6VP2?V%i|~C)lBaXR8xuMjN2&T$=*;k)x$-I@t10Ol~RfwNsJn`s4_e>AF=t zxAEU)3cc1xCXT{YO%4H6ZA?=YTMQ0~n0&1UEjNRfPMX5$BaHXbg1Tc$X}}y8CxJHu zz3Df=z7E|nDJT7b_TC;GIjs}*?DmmEy)Iitqf4hzUXYIhpGhY~+~;}??%5s=dFONm z=!fIAC?CZ!u@TapwYS@d#6yBH0agH0j*0K!mY71Zn}p1Np{a%nmAk% zha&dQt|pmb0TR$I)NfOlgU4q+LYZQai^5P10x~{B@X9EW`Y_urDy4G<>1BfOq^B|3 zwjba}PKVOhj@pgB;fr$2rrYb$RqBKt;!PoxJ#nprDup}+pva^%`1@VnN$uZyU)1jU z;_ZmJ&vVa&oILIEiR$-yXI4iQ{#vxH|Gij=rGmlu1f`Jbu#oi*330J2O}6C6p8hrU zkKP8d-!K1tLEXq5Zf-48s(<=@@w}Bi6Y8onw^Sb6kuyDK&8S1u59M6pFnll!ss2w?A2V<&PNs4IO-=_#GzEn zH#Vzy_R|i_5U=*9C}Ry@yP&;%5Ktsk&%a)N{o(D-cZPt7B8NS)hWs;HXnzFF51mZ_ z1{foVPQ>}}sK|B{VngOeeh@5WR&ooYJQo>6c*-3|*^gOta0CGvpBN5-#8!TD)w^Gs zDEmGsHFG33Dvshk*hJqUw&I*sni0zgGo@2NFDuIjoqb@IoB%8&YP_dB#>4}`)jBu= zGlt-!9ER@TXwAW2myxR)*?b(aEE*z7rly~= zi4&aW?u`p!nkOJpWQk*_=P3Zj2LkJ^*y1jwcD?IA=1%9|WAi>(gWSxOfBuBMcpVzs zxGdgFN00bxA}7cGzW?1%^|!Ex0xKd@u&ueQZB1U$8CSlH`!UsRrN^e#Z<}vLR&vNKHI+aY`;$utM70k6HKSoJ+)*uBBZ*{>fzU6*TxR^Ct{+%= z;u+{GZ#L1WdB&qWVv~ge_7~>ot^nSUIez*x&XG39+$Qd1=s$+VL3m$?lPQQq(9tO1 zosmdA;e<*J4U)@*qrEb=0dS$1M;Ix>$b4e6kku|fWi0c^O7uMWvhZP&l;QTB*yG9} z8dh#jKq=3JqYC`V8?Gupr`tRN{7oGfVtYJytx-P@5SMoLt0a6F`XMF{xeTQglkJO9 zW`)mMv>kGk3RG=-ffwC?4ccg|N>nx}70@G5uUe4amBysSdl%qDUi}scNM5}12S+SS zj6mbR@t$UXPDLH6AEr0fus1F0rkn3+%F5mYf_AVjtAbJ7sE;RN4maCV+Hr)esb@Xh z11qiG_42}6FSyO&pbQvJfy6bOvXq^mj!7RgIg>|2bwo10E29zd|JoODhKa&>gehbWP|F4<_$wy z8Ih#7wBD#7#jT40;|niiL2*zfzisA5gqdtNg{DhB#~GO=>1c?DE7Bn)q$MY$quh>q z(vGTlEZFAby>dR6`Ny$`51zW~CCLqDlvY%5{DGG@J#Ac)OjbCUEQymU5kW_A$FDpU zHPV2g2zL@(U(k~Tl!qC;)r!Ao`8C-*iic8PhTSc^OaTwdkI5PI{162v^s&$aCG6@~ zkFFAH*FZ*{b{mkU!Tn7){LCkT{Dp=F0dc14s6;lhqJR61KWc0BO^PW8G`eEUCC=^;_WMsyC*Ii|}H{J!NWbC;1 zjfV%RVUnNHNzmO;G9Ub=1b;j*;+An}JzBxNKrzprad~oJ{@SUvWKR)kNr{}PQF0|(SG5NjKXc!tmxs|$x~`)tL@W3c0;vT^`ec_nW$o~yuq zL&X68#Olr2-(R;>*31N#GdX9}cQU~rYD4S+zvghAu+#QQ2CAnVbO9GhKndOe>F7cf zqU#G>6Xn)q^zx(|z0#K;e>pe_7{Hn*A)$oA@5=llSU_Yq3M=KU+08n zf8jb`XC%eF2b++*-4C)mqjBqdoPYdYHNykx*F8kD1fK=;6J)Tcez3)v zOk7jY05H=8N2ueSR`y20CHO{;do5a9;EYsdzIo*I%-0lk#Nf%z@XCnX+3z)`4f?FF z@pU^=Xu^k8*9eAxc(@jly{GPPqyckX{FJ?8-)&Wp- zgsxUpezewHnL4l1s zH!O>ri=pX8O(IZ~vQPH5Xg&P;#DSS8Z*2-95~b50LG z!y_*#0m}5Ha+J_Nz59?amyeW#cjx-B!^i}*BrJ1|HRU;h__n1&&jN#*G`Ht{Xb9I*2$tcmp_sh;)OihVM%8b&rJZD&L%R-Ra`59QEM z;Kn0skc;cH{%t=KTUp##TezjW#=KAfde4PQL$gi>y$1X@rtnEEeRq_&7J#0lnuXb* z!RDnzPw|X!kqL!66Z_$W*nF#!xkLmf>N&3=wi-v@a6YjKjRhtZi0EECeTpmSG?K&A zky2}(nLtdODcb&dj_69xnRdbipos#gfnmGE9{+YK~|a+X2MZ602yPZ-v2})$J%+GR>alST&jFA6vhw zQktc3aGwX|IH&;=&LrX13(>Wxzy%#HXo*$Wz^U0tqr;7Q3JlCr7s#=|$%Z>VZ8}oO z4o}|R!id@gCbm+18Qevc0=yZKx9f3+?M3MAXl8d)UdDk!6k!rwCCF_3xQWthE}w%< zz)z1m_G{&vFgwcz;3-CNb1p5zY?Yc_KvWoyty!Fn6zY@@BnQ6!eog3sJz(!27KR_l zRRgTSDWHba*H)%BWm;{{Nz1fSu~7K&Rp7{lI<=EbY%gfO#UyJl(0b=?HxTZEhLW0Y zU?zKMoD_!X;clKu#nraswxbjz>Q1NpcGoNE*YlBioc5 zO+NjoNm^rsFJ2B@bkU1(2&TMZb!+LtHIkmihI968)BL1GR+eBTr?NqR8gQ>3Q(L@g z#jh%m1mp&Ojj^MF*-ue`s4Hb!RdC{eBNn2JjAcy1>Y5SKGo z2LNWuJ%Md3vEV3hcgWyRpE({Fn35U&yP*cQ-QoX@z4wf&^4z*Wv7cy+v0y{7AxaSh z1Vs@{RFJBIG-)bL1VlhUq#C25Qj{VfEefb~=^Zo*TY5mHY??}!qVx`TuAP&k-*?Bj zcib`VuX|5^oRbkjHt+j9tIRd$TvD?x{V8D;t96&t2PXS^(CL>0+*A~QJ6!>?r9U;P z1kh6ypzB!NhG!T3b;2)BBHjAv6ppzk3M+J0JWqqk6Yp5&6vnMGMWd{h;7I!>*1@=c z+o!1)Ac}U&Fecphs^SsV3ian~{>}?(^A6%fM1nY)QLK+cI_WzCTdR`P>SBgP>U%Xh z7*=J8`{_6%al}SU<9$Av|G6Xv@!b+=IBi#EX%A5D&@hC?4CJ;)JCextnFb=um+Lu? z8y1hD$56Y{3;s)eW)9ukD=4~<PjXD!)etp^Y%EH8RPPb+KSkm{ zf{1k7gf=#>^IG8Q<~Ed`^q)ZRyqt>0$Xp~dRnBaXDI!wTf4b~%2%SJoNP;kR093UD zCq*wq*pA-{TL6`|fy2%qB=YoXFV67quoa*xo~!^>s`4gBFgoc`W41Kl#ZY z;ZJ2?8qK7i>4Ez<-5rC?>4vXqK%@ z7{=r~A_a9zjXT5W0(S2xnapBdA2%c_Cj!YBmG5#)8n)1?i;&om+XD^dZI-xI4B|a4 zC)nGK3`%-&D%vAb_5y=eEgvl@fk-RFaw82mO%fcX*j$so!vR}eDfY0cHC0X^Ta5&F z%QX}GxpKd(_iGyQ_~wCOzvmUNNII~D$P{(Sm3??To*Tt>_{tzT8ytlJX$q86qasyw7w}zARs`1)P~?6{K^Xu@>Ds4U-SuKQL8tAOO_1&j(5=x zsm$`fLZWCEiQpfi+|0Bk2o8^2tBSw{%ANFSbpY*=p}21z`ZAyec+z${4x~Xx1}Z>_ zq_yIz=NOMR#1KyzKzLLNm!TSj#9bpaMneFQpQGF)kS2keG4zo~p+86E*yjd1`%|4f zRPZ#idoGto zmT765m)M4&G%?(qO0Q|Lt%}yE9 zLaXh@pJveSh6AQ#&G-Q5lG1}#%t7bIqdz&+wGuB^uzhY^E|U! z!JDfI<;fMqQsn>c`W*W~^e@sh!K4;b1B1Te000rMF)3rG(!NdSzA6O^_jMxN*esf|~|BKx18 z5vA3MG-C~9&?dKjI7P&rr0GAdtbq971Tk-bFO{Cx_TyFkgd(2|7sz#O>!+uaorrC| zwmmd>C$kFaK~iaLw_u{=thlm)Ef(G-!6#As5Fme}P_4D7iIgBq(>nkO5(^d?ob^%w zjM94|c@o)^6|JgsiOeL9QgPERP)0?0j0PN-BN>OpU87heqS8F6G;P$v#K zU(Q9iHMM}U>`t_0$}!&7lVEu!S-qryk#U#Z{0h$J+otVK{cqREgiP2Zor33T zYu*Cu)vx_-=>@Xd6q>$1;QdWhh1v8qYLTAD#PE8fC{P+Sq)G!1@Oh5q=c-KfESppN z7pZJ%kVaAhs-XzpfV#{R*;@udiv06gyY6tfcx@VKF*r^Lafk`x#PVdfU1_yjNwKsz zOx#IvDXe6;ekXD$t&cmEpnI^TpPCeeKbE^7xqTtw5H$Xn)^MQ#uEj*GTCK|v9gn8_ z#sgy9O2KPE`gEdQ8pM4FGE0(W*%cz)P(6=U3MW`-o6afYaHK*ifL;LmI88tw32&Y4zr+HUZO#$cv%~;4sFE&{e z0XJFt(TE-?u3)*BuNf{*14{2NqxBY2@cgbmhAFqJ4eeeggW`xGg7HCAOh>m~5q&C( zWwlH+pvYMi7w%9voVptK&xd}#P7|shWQ}2E4cHFpAH!;ix+&uIm#G@E<57sKq zF3KU4=-M6{!+Dv4K9`x9J7z>x5O1^;*0#Nagt&dm!;y&KN+5;e&;XkZ$I4C^?-N;W<_K*l`=y}9e}1!6n&8r_HT9G z?=V3lVj3rAR#O-_-K?f%u6o}}|L(3dH>7Mb1e0xstqfN)mDn{hKrL7m_!n zCEInaKhLqVqT+-k2-F}G^KuHQl(nJD%L(a}oFzPI)iny5;$R}jnmB=E6?G|&&r1h^ z1mlx49_>?gNLr7vy+>Og)~5M|-TywG&#wZ0c89vdfT$$3o2f_vaW1%5_|2BNeFlxR zpOZRdq7{-IybL~rvH%fKX|FX!1v2cQ5l!0wO&)MF){~dnyJ%I@R8!(-#w8H=9G{z# zi0BvX>pDq({lLk`*H?VUgy~+=F}TB&5)onV#0ezkEO8W1Bf&ogmQ4#ZS^na@e-8eqUuSz!h}<6Ag%@)f!CG6Nv>udU zf-hIen)o*&rBL-|RfBu3-X|2p5yT~XqvP9WbM??9UIsh9&W zQ51(X-85tOIZJ7gq(PP(~l^MQ9*is&5_;V_qD?tYRUWNOfyH zKl95CQw@7Fgxwpj2EN^w=ZWxo%=JV3!%aKh9VTHvz{`l;;O6D_e&Hu|GQp#%p>W6d zmvTO^ zIZ+YRKOF|!F7H=U7u3)uPQ+ii(f&_B#mAf9pjqK(n>GT-6dl|bl7u@^1iDep31P}< zJ&+^*PZ#te-n|Lg^Xj(s6L*y+fkU|nyCyw zW@iLh=p(AZ3NohDb8bI%Ax2le^YnME z`d3DBIgObSIZ{X#!`h;3R2tx32%%jeRkZ}>g7WiJ`vJTPY2^WdZ{eBL`GD$oyo>xn zS%Vq~@BH=~L7T(cL5VmQT9uEm`kFB8%B*is2!!wl3vZ@=0HWYU*2D1_tq@ZR1m7)0 z53e;#E@N%fLq$me)Dc||l{ZwG^J^wWyvrmK39#)mg{CzS;c((q)8v4Kr@BxH3jB`b z%;89!fIPKcqljBe(gteMfiVY&)ew&YG93GHmv5ho?a*R`?>_-$=#%RQrWp@RwQ+>V z>W_r0$ql9~*E;;TeZX&2{&wbr{P{KgA+sM%604UZ&!WE6Z^y~?@;HcrywODE#}HI) zXTwY~f=eNy#Cj@XYchx|Orkp?``)PeZodJYrSe|jY^+{@lo{kI{1{Jqomb<9m^hZu zp6ElUcMG+Ms@bhA+pd*{}U(Oae2 zE^uhRu1S2(b3gz{uZb-z?!c4_V1`SRZCb}jH9EbmhhpYgY8Q~g)kwC2eZN2JISs3l z{{Vd>^;}X~Nh2EmX73?Q3bpE^rX6jR&GzO24X!X;;UJD2(~#zw&`j(g zVhq6z)Xh`NG6rddil+m3!Ym6O0w%(vzjX~~UTp_<*$uG$Z|I%OQ%PyCkL)%)Hv#yC zRo^~XP54$xp%!PkdeT@rsO|d)#)1;WSll(}NqzHxL0{=jt&v*_83a9ETA-|4QSD4? zQQ%cqEgkjk_kUjb^Cjc(%LVM}5yK$$8&+cYDa;_^trrlpy^$7hi=tg7>ZhH)`zE0k z_@+TLj}vyEwrW)NE|i)S}n|PUw=j)O97-i&@(A^)65*IPy-1DX-(+4 z$q@&KXyn>06W@OiJ(`Z@*%`aj&8%+AUXDYB+3@Wv4jEw$$TJeukmsat9s%7z8Hx|8 zqiL?1psgVv|Hp?QoVX1w*lkRmee;O&j=@aHL3KD=kmQfY3Dl>L>)(HeoN!=|kQgr! zE4pcjbEBOXBLxiao(?$y1)&6eMnU_U}LJuonrWS11euNlG5UTvzg{3IV$(|2a5a^&fFjIrME&<)?Ty%Hp1d$a<>nAy5bNqM~H7PXnp?3&N; zv#VW3ZkjTR*I_vBZxrP{03<3r>C)MW11_MMYJMD-@59sv zKN7!2aW$G&Gk-jSLq9#2!r%6y=~80==>F7X)*Bjd0T6qUl>!P+N*>@q`ZebEff5n( z)$!Y7doDyj(;ui{l)ev_rjOOPKROexN*Hw(dfe2MJb)+a^dPWP8h?}C1RP}6Kq0WJ zCIB(0fP4sj6vb}S7zq$4VVQm)*TpWQ9aF^Mm!;{vCpv4xgw6O7)#46^@qax_=WHfO zks4obTaf3HG!(}HuAGcQcYIpUa8U!=f1Qux&QXB=DlulV^6Cb{9~rxhjpH}`Wk)gd zO(OV@P1C>v({b0_Eo>YyaPK$B{(W%~BG_{Qa`=q1040@m9wOtu#R@iG>^$+11B~@{ zg79kmnDNS`FG|O`QQKieI*U?RmZzI z;PQ8)ohDq&bqyeF)DwI6M*FwF*wq6t6^9>2pwJV4Zgd9Gn$Eseg(%=&vUQUqc(3}| z|2W??Q;5QZmSjzl79an-K|8(u9pY(r5SIWU2z&f@AeERc4OwW*Z%Q6w|Ko;1U$G}C zncseW*HS#_N^#8c-cFE6i$?Z^){a2iY0&DMN30?I3YmrfV7T~2UlhvWk(Fb9x|aBL zT|hs{czH{UIBmX2$%A8(zK7}ezh@wR&q%Vxh$GhXT^J-iD@{yqu#(R~KHzZzgrP|M zTtBt%78#hiBM$AP6%;gV@Xf>JpygV$?t;&zl-3KuHpdN|kORF*Bn1YPlBRay-*X0h zN>`7g1ZLM+fkRnaFp2qD`yJGornvh;wGXu?1F40fO`?2Ewap-mTO^v zmLnj)h)wMgFe8>&B?Zr|HlvmUS>g;1T6cO5m#nwhnmKjC7$*^oMttdCTU=_@A?ZZ)))gL+%d3sAMJfc>Y2&N$nPV@YV3F(v$#O{LkKRUwuM0_~k~TtP-P7 zAEveactM5H&b!F%4wudYLkdB1rMA&V!1?DNv)cI$XSdVHb(!SYW+KYcUK;$2LNH3# zL(M|$(xZUC)8r8>{{4^ZQtxj{82mT!Er=Tb{2~7T(En1;en)krNc#V-de)Vn2@TAD z*iNOkvc7M@WeBk1pat@pamHX9@C?(sH_f;m>ZgY6cGc&HvunCwTEe3b<~yk{6aJc@ zpidqdB;+J?mPGEbZBuENMC?>4B#x5#v z)cnF;R5RNdYgVm#jjF6>N{*hYsSZp28Y2wq8fza1=GbJI^$hM6q*!2$pIAXnH1( zw$1$Kp7;c}-`ki(!jzrvAlRt4$;qQcbq8GxH8+==1bKgbJ zb)l)*TmX8+IRwe=dPI#^fcB|3LqSZCofMKJcw_m+lS;>rAqp;(5?qYNVAcIIT+j9R zQe&eqI(!Y@;cD`XRtVVrsI&!qL~;ncO>!$|Q0EyK-sJxjJztCq+!0%TOxGg7FjSX$ zA00ZN;`}t&N{(zT>IC$|Paqw#Rua?dA~!cYQx@x-Xktm)>tgkia=kP|AS)sN7nT+^ zo8QO9D%NyF=tY*%rsT6@U^?oPV?BgN#@VY}R}tx&w5__BmAko(Rk+Nogv!c*HV@%- zoS7Cwas(|z@EmSGu`no56GMWuRV>vrmfFxX+RXol?l_#I0$Sl#brB#kiGMX?-BQm& z?#bHbjbe4{R$4oBo7B^29Po(`UHr-`^g_CiG}W{}k1h#qN+QEj(^TTt6D^I!z-&)M zB&9k;g6?Am=1MIfooX#&H)@Azf(M8U{Jr51N|I3S(}a_zr}0^zqp1~S5|z< zQIs(?hV~p0G!X!_E+@%74WnvxYB$Izo?m@oCZGR^kfAtOK`7fh)=#b#GZ+!dERlRN zMT8DI?{du;6X}D5TCViVY%}>DUC77m@?uLvoGcHH=q? zbrkC^9zvf|CuRPEM)kx?mnEw|Pt;C^iCe305~YO}ozVf`y7e4Jd#oMOVDNR|_q)fz zJM-fT*QI|VZL!q5I`t?X-PvB=O7L7h$5#Ci83uVis;#b zi@GvwY)-5SiQ|en>QOF!@#$t0eohw~t>rml7K@oPUz4zdMENDG4GCybLLr}Jx$lB- za3 zo{@33`}-uXIY~v5Mz84FKTSce#ZGJh`dkMV%<*My)IbIZk8nbxN+vB-vDfh}-po*h z=^i-)iH3HN1Qd%>8Kf>JCB*hq3pvstzUP7ysvOe=q9>xuP=JZTBV$Dg5(zI~PP<+y z0h32d8!Da$q&qf#m8-jc+M7l<9I z(Y98Uo5a)53-_9Abs?|M-WA2>xc;78vb=jnXk%sf0l3l?i!w;kh^)?j+C?8tMk-4& zK9HOZQu+LbN5R$aCAnA9R1QqOn222HL0;o0g{59qGukxIAjqlorhc2#5l|j))LYk1 zWK`TJevYz$fpOhRQZBiU7 z!SEiE6?Y~fA)bc}DmWrPnx+t|_#C)kv47Qqf*D`Q^m~Nv7YdQ*g@XnI50`d_q1@it zwsH&U8pg=-69vH9 z{o5AWyTRf!B5&}L$sXx_Z4}5(fXK9^LX6kI0yL|n@KPwuDTj_Fn3HW1X(4v4@&?0# zoG>U8v|ZghjFOfUZHEKdp<9w~0>}Zp8snz7yIace2RMeeW~jd97a!7x+!jbdE`!?*3)BFhc5!MpbaMCD=($vDwN?W zBn?RZ$vOAUQHojQBFXXzxR~(~cIK{D#E15zGquAERu$t72zPa=l z6G7iLxy{!4i34B067>K`Xoa%qz3ik+FP;*rxUJg9$f!6Gt%6~~AAYq+fT;OCYJy89 zSC7M|3I??%IZa0*74(%Ti?U^+v%Si4$j3ukjZRW^A@(krnxT@Q zfJD+7B9c<;Oj{_d3LmB#;<&PO;8aY(J?|vA?X|$_SmYL*5JQOy`V$oIl(R^`Z3Z8= z#P@1i6!Lf-9~3D#q4HzJIwJGp<+EZSde?9`4x9v*n3`p4Pn=T{?tsE%PK^;vR#<=& zxsszFTm!1Gp2bX^1Zo?SynzL{8*R&SXmweOzHs@-($3JCdH5(1`Ou_(C zGz(1qH>A#@ZN(s99>rIKH~J5SjqWsYK_X*)Sopd-_HK}7B0$ts?OnXi0fM~Y%W{X< zwDoOmDTukOt(xG5QdvT^21559B8YT0WDcIT=MP2gm_s|SNRo#i_AXXageY2MK}?^? zFHiRfYm*LTEf9XTLiG>gDVnX{y%cT@zb3H)#V}b1#5>S_`n@z`cltRgjMp+^wMK+Y z3nh)-lgAwuj7ZdC^vwwV?g%o{aOA9#T=zT)EfYVg0d|@g5co;rF=|usf@Ba|$ymqK zP)Ea`1-W231987e^g+V}BAp^s5q+NyMghUy?dx>+4(+GSETkGCHz!(4Kz}0@5yU_z z<%_g6{W{dLkl%L;$|HA(dswn+@Q|Wvjl4aRCW?tIvCed+z6@o_h>+g!}~FKB;SBQ}!}n z-8@5n2wpBz+afE1UhWkjPCDhiHs?!tA_cTyr68IuiGPl(uo-lqvPU8ueNdseh`oeJ(2M zB8(D3uo*AEw0@cvrV$|K-ku9)yV?OgZoRflU2t~gXyQWm3rh#I+JaGR%c8*WLCbwm z6W#0rmkYCaNdc{}50xW>g<{ck$Shh(>#ms)+k8DW3Q38_l!BD$o@Ri}K8U{JQTW}Y z)4L(n@TOw0h?)|GK;g)S?p=C$5}A1UKH#)HnN}QYQ%8=f0`S*r;WDX}1abdjo%B01 z@v7BdI?=26;PJ-|NMiepKUs(s5i_l@`#2h4^BH7{gm?OYEE1*j>j|u521gz=I`s}c ztq<*bDI%1KxgV|41G2TUhhEnQaC}&we-c<3j8!B0k{;%)ew&QmQ*NGa=dX zAvH{k)4Sqy^`m*b-4^BO!Sf1JeV=R{R;z9bmvDFnXp`Pd@7PA$+AW&k&1Tljz^&!J zSPgZu@HNQ(wPyQD(+McS#=f@F4kY^RD|>U%eeWRea+0;-DV_zUs61 zfrp@C4gm4lLAp(0004h~CBDS~iPY3EmwX#cM*T>y!JL9`o-CQxwc?%2!<^Zh8lfT- z#@+;+zw7Y!8_y3F9UgR|%`HU;g!gHjxeoGnYP)xAlkrr5h9*y~dLt$fBsk^NbG*DqMTdDgO2719t##lb zVq#7FaZ~M<&!mzxF{RBGrZ!~?G$|y2MUCSPUoRiLQO5IUC-V(ZD72lnP@Vmm81z=C zw2gC|p~3b662H%UhFaiVt?5|_q+~eoM`HAv0yO<$Bz0;@MeXpA)gL&Yj1<=OrFP8+ zh@*eE*zEWwWr|UO*FM^?|ELKCDLqW#>$@VIQc;c2-k9R)WO?tfKV`58&+Wr8mIHC~9x%4>22t{hFCxiVpQwxOD4x2m`A+k; z1eo>t=IqwfoHOd@auYUoMOd`Qw`Yx6X|>G@r?$OKGi`z{vY8TV=Jt&UNhNxXH~Tx4 ze$MaF=hZXp(^s>R3KlU|vVsWU!DO1QkBeJA+&^S#9P6}+G9Q*h`}>X`*!e*>3m?>n zOBO?!j1(bKvt(9J{*}25-$nS?_6A2k(ek zPBO`3f4IoCbPAqu#~pr@njU&9!YTMcSIF?79?{jU0R3?xCkhT*Ocz%asgF+B;R+2v z+_)WoZwId)Ep4b#*RqHY9{Yd*c8p@8YG^=IZ0@f|r<-S|^@{hSf{63A-=P#7Kn)IB zT1B;S5p$e2VWQszN|#Mrs#_4WI{{+Lry-aP;WO)74bOl&Ns)sI!qI}#8JMDujnx?@sjHS^d`+@V$8aS6b| z*dL-yA1OXRQ!I=&_X^;Hs)^%Zfv2V!aZ{mAvmhE!u*P{Fir!ol3pF7HyKD6A$-xE()|Sedgr0glL7uDKdqS{hbnHUukirTfK#E~ZwZSf=@@IPl|D75 z1m|O6gb=p2)|{UnV2PCR@%nMd{T#^l3%cZum2lLA2CeY)GXZO=v!Ej8XRWBm$;xC< zej~jFEkTD19n?A0B;YURsrgbywpJumYnfy|BxE)mI*7;wK=^H;#t6`sI8_X^DVgPc z+0-L%xb$KUIsB$K=9XG9>QP4Lc7mGV8**-L zG(jd4f=Uc0$YPsD?5LZ_q8V?H1=NvcM0`!nHs>rC)@%#b2n9s%$2454x1aP<6s~Ed z0xdPuF-a&BMBC(vB9rB@iOT0Vv&qJs7t&FZ`13gVC8~2G4^bQ^h3|l)wIP|(profE zef5R%tt!;J(Nu#PAp8+PqLNhX$LkVOaQhHm)nu`Zj3(5Wy`uGQm~-bJNN ztB2@6k)24?s2}Mrv9T_&Y7|dZojJwU9YjvTt{!hQEL*B3FmP9Wlu4$qDS3B(=N1_T zion3wN@W6=JZdjaQ}ePUat#CS$xbpzrXgVg0?Zl1VIX!R}hxN@i0xg$wZMF2Efh*Av6tW8whg|h)IU!X8b@B0AL!Z$~!$e zgEQlSJ6QmraRYUUC=LrrOG4cvZFvQlAn&PzBRYjeP#)+Z3vtAAm{*-g$f%#v}F~Y=?0d$*OEkn zEy;68x7#KSNNU@SJiNTf+!GmDld5?Ays4q|4Yb;vE)3HUd7a>sZAc<(XpOU*8x3** zow9m%=%({qYcWe?pf_Vj1?Y}a#W-aIb*R?oFTWD{MS%;mhRD`0oha>=lRYWMs9Uyf zE%e;TLG~`ZmptTa&>g)&3o$P9f~ zP6kxfGEU_cGa0@{giHg6eJb&UdOX_UQ;RvtW5_v$$b|=?jSyl2>wWu|9UMAAt31gj z!#JlMp>QJ=Y^Pla^hVk5K^9o}?$tQqyD|@whh55K8QMDz>Ut&6oZN%ab7MB8p2RAU8{D;Eer9^sUfBSq% zl)`VD?p?-lDnxKeR{N}@vLocOGILsB>p4(JNINVIe^Hh^6);7B6mVLYyP<$xgEh1! zz;CXG+_;{k*+JrCv_m{~HDtI#a&)?AgySISkR>k3qY7zQAZ!Z&h=qD1X^vtXy7v=I z?s>)!XaZtm9X=#;B0P~79zi9n6{oppyT}`j9H^YZH1W%x!6B?Qc^dN1nCu9cbu3tn z#-+5^nbb&N^Uy6;2~`n5TTBzhTGR$2O)ee3(X&Ph(yJl_iB?JajqLrw^%Qm)9Pb7- zr!48j4-~rasu*|!T0Dbz(7RBGC?|FBCqNq~^pkk&y_61_z_Ibyi)foX?X`<(eC2Nl zdQu`{2m`IyiI3vduZggVJD|{|_11L~$U8X@x2>ns#o8^|O*o(W|xe5iJrWL zrcabe+ls?5vflz?)Nj4vc~XBf2RuP6ZK+q6eOEo0~6t$%G&Yb>aM%S zUW?8gOdMe3sTPKL5e!g<5MiNqJ|31-5l%-mP{LPd6snGw=*@VmO;Yh(U0w1T&r>D`Io)zh|%Kk}> zjjG(_vti6`xJjvrHAy+emei+E8%d;9Lo>9&-k3)EX7-_mRr}1~nt{)NEN+qN=Kw$! ztw@o_F|yFIa)feIr7&UYZmgOK zrNX`?tLA3$*reKhabEpdZ!XicTUj^_Zwu2dHeOAgyjOMc6_99d0GClGrrT7T7+(QV zM!e6))PoDXnyqv<-WH!d8bxoEM+pP=%7z*>Ht{*tf?+|_uNR@{NG(o;EZIdF*({~w z5S2SgP^S9smicWvV3=$YBFCg73r5g@Hf4v___#bOYaenE>%xhuLU|eiR74ix z(;Mh97b<4cDu9NqNOQ6<9m*Mr*SLu*em}M2OXNYlJX)ede!>d0B8!Cs(r|O4kkhP}0R6d~S?KE(gYewV<4Teuz5rB7J%0Wfw zeFDjkvZ zaW}D7YkPRN9&`G@R&Qu%xrrAU*n(2u8)EVJcZ$~Bb3Wdge;6oxmF{_3Rx^M&Y-r^N ztiv0P4L2@{1@nud=CwP;KiwoBPKBT^K*5e_AZS6u0?QbYFdBZsk*q-dpxs zsNWorytxc9v~3KP^>(ViZ&a7Jre42uy2j?g$k@u|MF}`t1|ZNFva}V+x| z3c>haqpf=!;I~OaU|aF+WzO~osV^p9fg-$c;hD@+H)%<1CqMYw-gQjd26b!$1F)S`}Z#$Sp(-PN*k<=lSS%8tiwdOryDxwwZ!#W z)ME5jKLS%Zls zz~X!~>rqI=gBJWEx)*pa`8K_XbppI++p)*;(~&kHPu0Uo#uf31)j=tZIrb`$m4jU1 zhwhBq#bs8bB?p6xVt>If*?N>k=JNX=Vz_j4%a$#P@g2Rrak%F@QQdR*;!@Ng)^8|& zNpbX{gq4mr(A|V$KR&IE+kW|N{shtxtfiu#r6DeUMsU4+IyCXNyZbwYV*M($h-1r% ze^f0Klm86q;&Xj^f^0oB7W$5t8^IMS@(U68vu4|-L01$}=KVPlmd|W4E2>qA2^B!)t zn*B}_DMkHX@chtw@xq?vVrjAPgivDmro&Q&>xcAvI%1s_6z0yGC#qlgV+?0~2o|#e zmA5nM1&HKfQxDNs7mDAL3AwqsqOcia=RNi2vo?0D#ly)W$nheM^&_-F8XM<@p2hD~ zj80Lu9^NQ>Ac^ovTeOU`6YcMNd>lol%h<<7e*BtMZT3vQiPYKwTA z`;OK?K6L@OiYVpfAq_4>>-5Wora5P3W=fU zuzK=nVy@r_KH{yS?zsyV*Iu@0JqlJq4VocECT{q_T^i31VK|qRp@aG;#kTi!q^ys$ zH+AOOBNhpCbB2Vz;S$ z@V>;xe)lvF=j1n+i=~!;WsCD(jnLV~vrIiu4q)^0^n^`Jy30vq8esg^F*V2UZN7GPi{cPRV7c(+ z&J#X0xVcrp9Zao*VUo@EhMjUgqvdhz7m9pQYPkT=dFWrJpyx-LM1Xw1bC@~FfvDgiBz-) znyd{-I*~Um8^%0GK%m=gQgUz0^^ZrV0#Fi_>M!BaIg4I8%d$oKovDR|MUYof2OP*u zeU{{_$7@#tT-_DFY1;7~ZhOq#*P)a65Fxr;7cM`l>#SY$f%E?T`xhFsQj7jz0-ZSp zrnRNMno#(rl_C>DvxlX)jsCfXI3laMJ-)2LxcxBBqnHWZ0{43GO-|tg?KLrN%E^@T z7cd`o4lB98?N;53(2OaGSK?FC)5EYAeaLxH}IKmG`QT|AaNfe03o>5 zB%!nBC*>y!T~8Fwwf|s`uc(RsUNXbuK%t}rJWCDLfFJlS_6{8Whi=@u z)eR)`6xz@5JAsW+zMjl5h#Ub+$J{-3C^_ync=NOBvlB1- zdV7b5x>T^As5+vo>E(yX(JwU!K#I)Yu3x7r<1T}77qK;*2!*K@GG@(rjXhW2U~>8% zw3-@J%h1FD`uO!bn|}UdXH^Ln1;4no}hYGYd&UQj!G04O4QU#vOY-^G?2qV)q-lV z0cRZ)wmpvXu!aR-&^d{kCyE?FG6iTz>U*BNK`vS<)$O?tX``-eq%)bl{u2oXD2L}$$y_PS} zuQqnLV;bJvgQyT)=YFqz#k8Y0B6U#38t-=*mioz>OR$Ewclnyg?Iu8xjE?h|pd2Z? zo`KdN0gl%tfl>A;ogLSa6?dYUwJj)y)Ud8J@Im<^FkaunGEsqv-B#v7i&sX@8jS)_ z3skIs&5ats$Xq&EaNZ>^kvGc1+sd?3}(B`fwpzxhO(%^0h}*F2@sYOTKxxUQ$vr zS~>C#s89~(&3QVbeT{G725W?9;ekZ@*KyL5J-%x)IeWL4*a112^nYf zcSWJw&bfW4OE|3sSFwFsv7nEQ&FkzJ{WN~_?r28ox&j+6JDIg*cUDeUKCc$enxkJ4 zdmzozY}w+)!hwlO6JOfh;`U}H8k)ECo7{;^;>Qa4}-!kGi z@FRT3X=-8c*6V3}>d?feo^$c5rtK^*Uv?O3E~xsF=rDRQS!VEkf?jjH6Eiy`{MW;W z%a~@n{^c_h)Yg5Jf?PvJ;@i9D6SOnWjXe8`I}pSkt+fp`$5k}(ip;*|`~;vk`+dWc zA>Js6?KPZqEJ#K0J&z;HZSEOd!_{4}D0FJ{j^t#xf#kL=Y@_?^2O_XtVcjqjs4=43 z(~p+jvYm|A?#`vNe|gbl5bW^@-3QYlO_4ggnH zPgmfGm0c)eTiUn@r?iMQ;GL#+Cz)BvvtR6P)2D7L_M3pB14V)6 zrGeL9hFAQC3iWF8FYl{K|Io|KKgcQN9E+(-KnmCk$~gJ91u;6=X_}7V4!=~ng!y$| zKi3<0=rL3fyzuTazg@U>;!G(%a2G~cN{x%ya(;Ad41smqd+eJx7E>O90K(SpnTLSH zj$`*;pllO*c%!B%XrbpF$+?Ya*ZX8Vi~;i@aaCi2u&dBZ5hb$Umct8!Mnm{Dg?%39 ze)w=E``nQ=^@=9jFkMKoZ*S_jvqtb@b((dD(yfb^WsFb7>L1@CuYj8JN07%$odLVSl1Gony#+7>w9ugrXXf5|~K6%Tix{jE;Y-ScI}>gmC@?rzgj`?{uz zwh|+}D5HYYDD*^vUKXsbw0OoS82lKJC$X+|SR>cJ~GT(NX^E9@Ehd}a%f zA5>AeGz9qBbTTxgrEWUAr*<;-6>3hem495(U>w)EpfGXcb?LAUIE|OZJINOw-|T+3 zLPWa-;3b(Vq`1t^JWB(}925ta*ysLFDqzF7ZMd(ozICD-CzKXG&|AclRX=vj?IVAb zWl^L3{%!|Xkx#V~I(X76*9#lCfi85mX7t{g%;W!u<0JfWI`FhYH8kV z2j;*iHq-f@927Byfv^TFzN=JeMbiOnlN%8Z(E*6Eu^bp#ErS4SkJBUG`aeL*S|{NAIkXLzPuDN{hQV zB%Lbid^V#le-&@;vHOP=Y*V-9+OfH60bzOFwxF*(ctOumX_23@yZ>|=;s{0fQ9%LT z`fv)o7PVLVXd9A0tJ;P{5{b>0xIn0cZB;S5T*FF()i2G(BP#tOH}ozBJ; z+D?P*icbqg&J8(e=U!Ds`ESFN_S~sMreAK<-rpvJG$6;_(TXm? z6hjl*GrNN8*dO=q{p^SJ?@+oa-&o9UCJ-vT-@moQzari-a<*Kxy64r#$1Zu`_MIEG z7f0!*v@2lblyt{+jF%4#%g(J1UFPWIRA%%wMUBorO4H;eZ$<`&MZd($gvri7)PTzM z_v!?_O~d;!Q<^~g*cfns_O6l6QYrHB?+;Q|Hq5h8n zZ&4{^-H_p%x0=oKf|oNMV*M5Km8Ks!` zo=c4<%h&(dS9+}+jqR!jjFMO&MbcA6NWqI=Hg=b)Acy zx=7Zuvt6SeieN9W(!NR6>#uVwKpIFzvG21)0e3?MG*)W$wNy{7oYvkhZks2ZRp)@o zVRnnS%20*>wyzK0)HtiO)i=T1F zWa}~1dgSWLw%5>rV?yXbrPrfP`nx8CJS~l+3Xf@Tyx!k7_+t&w5-&viNK8%wccF=S z3JQG7&Daft@YU@`>y47w|LmNvh;kGfFor%><+4Bs*-8!5SSkr|gn64f#)X~3I%^Zd zA*Jbh7clbUJL@$QjcgyZaY|GG)R%Iz$@&X~fb8=KW38<13GnCd2B2Pp>6s!E#ZFJ! zTsu2CA8wC&eS7BXMJT#$y6xDEmoBs`crli-^U~gj3Lj7$DK*|o_4#3+!`vTacCTH@ zHq+A7v0yHnuBP-hJ&*l@)&E+yY;TZFY194G>fN7f_YGRAq`A$k<&`mx+sm9$b$I8u z?aNv192e;ue+q81o1xokJlm*Kc0SB=)gg?P6aiGLVrChe73eVb^(=s;&C!1yOj{Sz z^6B8HueGXnuzw53@%@{wR5kMngelE?u^+)sSX?Yf)bt4k+Szc)K(Wv)F`dmmy_Qkw z)xUxDh_Oa9?5qRo@K28I%En#H)R><(?E3?isZ{mc2;@N%y!Q!Unts51Xi!{k#Nntp za5){9u>N_*K}5L;K*6GryT$3{>q0bJiB^6Xt@=@E>4#JMx_>b0`!%0~Ew}?EM>KW; zd2SFtR}~7+b8JER75$Q}iqR~{V^Ny*D3u!T-JTS;e!S2@zu^7rMb%cLt?alrGf6+W zk4DJeljxd0x0@T0llEv`ybn@RG8V2LbWp=(Oc$@lYcG|K0n@$l(bTzfLsz1JmUO+m zzR1#fj>JT1o@bM*5Oc;bx64RSrs-A2m05QID~YQ^Zyp6MZFt#9H~AJONS_9rIHnaj zC9a$qUBGYGFt+C*`>V=zr797}9w9<>0UW8s=S)3Uuo%-2C3F#$xFt)|30T9aNp&2z z0<5NkHfE1@MU}%v!Q+zZM=M^jceITab~=8=2olAItw3}7j@evyPZQ(v_<1jELF^Z{ z&qZ$##HnX?S=$RHy1>qaO!!cc$Q#*4Zd#ddKVip*%Mre*MSR5$B{Si}mO#R6+KSot z8qGhRjI&Qj-OUnc@MtL!akwpm{j7l<$hgc-r^AePFQiZ>fCNt*r|0q8XUSs(m1XzkVs%rp@o`!acysoPdye~cU)@#HE+_7+;^}#~ zi=9o^Cf%Z04UMo3KtG?6RXob)!A{^k@rP@W@MIV`ZIX zN@|IKjsJyYk$U#k!sK6$K6>=%0!mi=Ov8OE<%jA-xEST?()CY{-T$-iO8F{OPH#H4 z2@4PS1vpjvlrKU*r2yc4L2da3B zA^lG`cQ{$x?_^}i!^~pU`t|FFAFjh59l@V8F1^FEBaGd4*OJjXt(5@Db$)D3Rf zVHZFO5G$}>w}>Zq=fv#hi3YYa*?4f#;AjClNtbmvbLJ16h$mZ*-pSw*u|`$2m*}B_K$P#f{nroIV3@`CYHekOYL&~Hr#E9W49gzD&_B&fIqPq=Dty770MnPsAL6pEK07*vuAc~wF*w4?e z0IJm0uDjRuU@H}YDntcGwGzxmx}m8S%}r)vJt(sY7PweE&{p*JJ=(8}155@(h6WSj z`pW*L+grxW|Wzqck=aJNJj{eqIq?v5+%Cd6D z(W~!wNM5Qwb4h+)OE0_nyq1F|!gG#a&428&5EDAx=xZEoANW^cEm*@R%-Qy4TRyiR zG|FB)E@oDHrrPLK#zzH2J~2~l?0%%Oq&{|e?!8%LVrH_jmrf!c&{M>oz!ZCC!P5*7 z{}&{WzC2wi_(`_hWtqGmuVFPt?fakhZ4;QkXi-pEMlN3FKAv{biJ-O9y|I>vXX&ei zBaYqw73i<9iFHIq!D`^Nr*NTC5gwD5@L;OBn_o#}{pA#W#!uPSwRuk2nn6#46CQW; z<305@X4k}Pj5rJEiJ=hjO;0ze*%M!GS}w5(lXpf`$Mepk#`vu14mZ`GcoE zJ62t-;(x{Vp$j4CDQJ}!(06e60!Mj^STl^7YbK9KiEeQ2bSAdUWNlfwyZ>4g{_Wwg zc@J)%=AuG}DucP9L(WIT=HGr9z^1FpdQDqV`1J_o5Q>ATo7o*}Wa&8FTxP^BFz{)T z^h&%i0$HIrKqy>R>&lf3=)yazrlr-ND6C6PLl0WZ-i=;6m3JoQ3?KwCiUL32<}8 zb~%G8F^3Ve>~ooeM#kvVicG^97qXQ>%N~Mn^TU*bj$?ma8tF#p;+65-z-^ASlxDi{ zCN9kdfT!H%c$})33Y&YjEtd8QNFNfA}VSST!*A9d;a-U@VdrS;Z6y zA3{LfSZ)Z*Dy+8(kkta;Y6z82NqxN*LCpu!%fqCa&4R|B;FucakK$rB_$d6S`#>V3 z4O4gSgz#leNzzD`I?V?qZd4k~AIhHdV*luuW_1i(QgC2>M!+khp~TG^?Z#e|B1YFI z=gph9H0)pVYxFb?r;8Ou4h#=gubTZ>(# z5=o0zrjo6qeYGVNl}5!Np7)s-+n>+7@5ArGcaobS$|ab>q#)Ow5DG6ynj@ubolLM8m1~duTadab6DqmZODyV%U=1l+UFtK`^lEbCpyVhC8qLp!3*BuXk=B zLL2k*s<8Bsc7D;ZD+{xZ*iRAW#A+Ts&=-D+rRI!Iuybs>y|$^WJzwwm5=Y<8M7G6>CciPHV zDD{tB4Jyvr{NdI^0DCKnFP_ma7&g-?eRPLcw`EYpDdD zcboi0#OTztkpaL;lM#X}5lU`|&7I za{v|$r<>m>END;lZYoSw)(Ki{?(z%G(BelgRZc*n@QaqCq@AvR^N-3EFG! zpCdMud=Jy`PiVPvWxVT)qPwSu^zJ>80G2}?XInOkKj0VENT|PqvQ&BdNN&~TZuNiu z30vfI(r)pFT3)-DKIqX}_2}8s89WvOQ;VoDHIB&ts+5o z@%)SRd61N@QnI|tRaw9<>n9TTHCLdoqq*-zyUcE)*M)jHpw1F` zE+?t!Oo1a6TGmJC#?np|oY<2C<6Bd*3HXG!Y+;Uamo6dD0o@{O=*x`SwG)-X3_uX> z=lGI{v!dw2iQ@GsKpYaFpW}@LONMG??i|dq6Cs3O%jjmJN;uz`&|7og0gmhQWoRrPySl|Iy_rwFSyjMD^<^7@ni#TJZj^H z5;bDbFGZK^eNf}=^P42ha@0=k?Dh=F80o*6o2uUW@zm*$N05QcQg;*rwL+qC0SO)0goS}{y<^18+F+a^iX`oBdtfjalgY1% zV`;Fy&yzFnt-tivXelJa=qVgPpzG&|!^Z?vdE4odo;!S+2skf(w{6v`)u`;>o`+Do z%LUqvzS*ZwpGM-_AXsxbxhQ|%_oZwWSd#N9+N&+gU4%-ss$Vd9@-Lh&^d(Gm_Tp`t zW5-&v%*D-lyMSKldxJa{^*25_dH;t&z9HzSyMO)h&`=w5`RA#>d8}V=kCnl$bMJt3 zfIM!W^Ti#%2N#DDV(oE{Ydn0DYI?eYfGGT4_poW*PUgZ+`&fU~j|N7f+ z!&)>yB2f#iNYBh+UGtZhw{=hrC6F0UnZ_l~V0v6wZetfR^yu#4&LB*7>MX49zvp)z z!Rm^Dk2%XKZ;3UXH$xOSX;rLXt>(d6?0T9j+!a->$Z|TZ^pLla z?G^T(RPBg-c0%>lz((7#6ULACraZNK)1{G6yqwb7`2G*Jf1(R=`iup!$b|Msd@ffk z+dhU2S~SXX3FC#OlzyQevFS*(eeSjJb7{wo7YGHTa?d`FL8UQP=Oke~>eF{!;h=yZe2r+`sf* z3gR&1kD1A_^mSSSuPmsL=f~6$t?j8m+*avYrX%=loboruyVZnZJ1#nKs_)&8O|Y;d5|qDEIN#=ekwq(oH228NgLan& zzcoDkOhWvqK8Jo6C=D4d!u5?kDYV~&jNkkPLo8!WNsdczA2uHCRCb3JW=-Y@r?#!( zIYzHImSv7nlVWJDAhCqA8*9t7z388Li#BK`TSLucDwxX{??+`_-FY!;0iSf%y*v&S zIp4OkdeMPYdcrTkVgZnPg~|b`)%R0UM2(cyC{2n`Hfsqea+ox4R_# zxxE7uB+p9$&xopcuXiyLF&ORLsQTUrA zPCYKV>{jDP)fPfD3*k9qVDKdYpMu|e&Q6S9^K`FN-o)({RDtH5ha`oNZtOaD+Sab? zNsdGwK*XH!d+%S14L?X@GQPw;ixS1tMqIX=62fK6j0;mdfyjo&mseNL;X#WZY>a#D zGk9Aa=Lg;rYe7$?V{@%oIE~oto{?#W+pFqrOhj-yAkUYXNWuAJ2$*Se1Se-ZFQ|L|*%1PvtHtpK^LdzkqpZZ|L zoad1qDa7%v&tG)Id6h?~5mwBni4->A=btCDiitFv;$OwWVyQyY zaxZb*1>M(-;5Sc$L%74^I#ohJdXimg&m?f6_elR@$DN~>4YCv)IE3w`3B~UqMGYng% zng;T|UW@SoOgV#Bnnyfrw=bjUl2iFyqU*j5J$m+x;E9A0dwKCbluYWR3Li(x<7Ha@ z_isheUpCPA5gLHUF&1G=xeY+CQjPL%G!D1QO#dtsp)dAocv3d)TgN zdSQiPn{zsoRLA=>Mq*rCHj3QnH_j^_qreH2)hW@e+Id3QK~*{4>>I(PnTe#H**NJ; zkB@If8k|XlwBXv50A+A<01E?>MFFo&@!PxP)&&5k-m^wCYcxk(RZV(wKAXhidko1@ z8oI!mdc^pt_yCrJ+Kr#Tee-5Huifs;)+zL8C@Dx-6|>UDy8AW=0i)*~WTuFCewC{J_$IsmxOI`M z-grLRBGHPB2MxQq*zMuNhuy|`#>rUob))OA(nN3yOgJs+UYyG*fl#j$8bz5!Z0|Q~ zc>t9qoNE{z%Bhcrk6f0eBFm~;XA#nBP4)Sbl@$GLTDKm4ImiRpAcm>x@`PAf52LK6 zt;LQ~lDIyK3dqJVP)B%YGjNb?Sr9>l<4=FR!2#uv)jYhBo^PC^m*4%vIYAH|hC|~* z^MJ;UGX6uzEPLBw{k$!@itmO%cm2tu%PgsDab*WHtPk+?<;~)NH*|8IHhJ=LvR~x5 zFK%I0fkm+Z7@fNp93?N2cP&7rood@=*gD6e3s)oVCcruDYTP=5`ymMoltl&YpkW2s zt{JB4b9AW9IBc7f)#jYOpq>5Z4{T@{Aq&7j5!9$N-mbcmj_d7QNiq>0tbP+I2kx2zF{8R}#o4tb5tosb$ISF4t$+Oa4p}pgO^%T?%i%Bb zhpq0@qLtLW;#s#^l)-)CGkx5p?YNvE!!Y81m`%z?^&6SpNQ=hQwMGG2KjaI)u&vD` zV)`i>x|cV-C^lc!k)_H-sj(d+Bc!*SgUV)-3L}fPH<5_Vbywfz2?-1X)}G569)W3A zGI2|SQ(RcJNyGKpX;9Q#I5Sb4ErP0CAZVq!@xlx|7yk9eMfQSYdAu1ahwF{xU_<{P z+>^_BqWx+l8=`4Z!7XL>{thQwTEfZu{>e?YWoM{g zMdyU^TskLYus#bWpZIbL(@{p%JKPJW@Y05m(flq=n>w|cTxmBtwQu^{(_0Q5AXv*k z(z;vzVwG^lK41LMhYq4$%%D~DZ|VwNfPFvv#o5-}y$Jy-eCO|<>g2hYzK8E9#p+RW zFRAp@$KWX}BDSfWS9wvV!IraGS!;@YCZ6;_cRWPP-5i{GcX>TCOSrYOyy*0}a|9un zSo_AOsaPx#pg@wgjyls)5f$xCdMbH}YEXN#T_|k^sZy_C`v2uCg6%~pU zQR^gDWGTVLv_S-8#L&5r3D?KCy%?r4Gk{}j5zy|0_Fiux)##%7m~#&6JLvwPYt$HPad4SUp!| z5kMqLDXNYudwBoW+^!>*+@U@cp2p`!`#S7mEXtM4;}!vo5O|6lHfX?r>CDN>A>+%4 zI|_6IiR*+B$9u1_*PaHPh@N5W$ObTmz7dBuP{KxXJIKM6v9)4VmQ+X&!>Iz*VGF%Y zYUfQ&ndaMjN*gv|#=|&;mlscp6Z!|ax-*9U=2DR-wqB;sMKsdoupw>vi;?y@%eg^` zkkC6%@9c5F1Rxp}w`t7xpDXD{LYtekA)2!Pq*&w|2fXO~x)8>)8K_Ek=f%-A>-rFK z=ca5-5-(n(Z8j=j{uzVLG9=e%l{a?S2_%9!1S)}cSZYg^GT1qAhjZ{H*v>D6uBA0_ zHjzzp>CH|FO`YfCof+x&!jfk=1Ty6S3L!Mj%T@(Z0M$K2{;&xklWB(MwBe5gH zp`-=$5kdv76_{370}6#W6|C7$V@pO52~>=ozPMF8+daL^K9()r%s_>1oRnhv`%zt3 ziWQ9xtFu+l%J~i5l+RktiLln@5#Cb!sE6X2+2*(i5Zfxg7(bKjui9X{c&c8%4-K|s z4JEjR%>K!hQk@riVa#5UGxI~wfGIQLooLLsL?1E1bq4HAtjDqXDbyDykIq;v5jI9l zKc<^6pPRM0`d(5(@s)5q6;hpzO1GzSqUf<5aa@*?ux-o+cs@L1lDta0#Mo`@IhczG znxN8jDo*x+7uWq2yCHxv7{f;K+FCGN3rs?DE@}Kq(|sP8?8f@uaCdgIY!wvV&w@K& z^iCEr;AGq4Z@>AbB+(Ey2<`7Q7^yhXky&`+^5CJ*oHx zyV9jjJ6~Sy=XgJ|da#h3<&}p2$Hk<~Bubc`baX zD;Ez;oy7(<<`HqW@XvaBdI_5jA%WQNp}BldBZJ`v@FC`o;zW~4@uDwF{{zmdtZFa8 zQP+R_N%%ymbtdol(aL%>;aOZ=5?_TdtoDkV11&uBUNEr29*=;z;~4s40=3sn<(#7y z>Z&okFFcMpP(hPzfsy21(4(fOH0TA9YpQXw&jx7Pj&&n03#^rPRW8s&ZD;8*;=lpc zWf&fR%Qjxqs}fQeCNkF&RdExCA5atgR^W0xu-iA%;a|YzS2-f zB3Ovj;utThi=%{7J&QkB4OiE-y2yGnNIVgR#es9}59LjpHbpy@nnQMsIHFrVSKK20 z>BES*B8k*=8z5*dDtHPLD{OeCQQ8N?Ag@{$lXt5xg<7Cd$TE69TR#u`nWPWeJ)GCy zeRFGnaDROgpzLAD%M6`3`V2=Bm;Lwl)U7V)e^zJU+m0njmqA3+IJNEyvdHkv^% z2`rV)h4QLieos59GMy&t$;RChB1Soe0B0&!@qZOW%nt*wm0a3KjCe*h>2_10 zl2TtDTR%r!2M^4iD%m$!b58NtE4;)@=z@WIn-&P8PEzoN@m0d_$<|%%zG0M<)$OT3 zL6p_pvE*g3KT-tJb)|+jInXrZw@IV(_MmG?4KIFa8F0X^96*B`OUj)6OY!lQ&|YSh0MW{gE&Ocfnib_F#C%AJC`7eyyr08J2#Zml$1#aMua8BQvrK#?UUPG zrK+MJ-@YLmcy>nM5ctYy=;1i{g&2q8lK}2&0!|wQ5eSn5O#JxumtWHS{rBL9$U`a= zC{mv6HII9cVxG|fnz4Gy`MI<_Oa@2EdUuuE5fW-C+AHWAf|J*S2G2j11#Yl0&jz z!B7aRB7JZo7&184OQADoN8F*&N8+6n_riqHxbx@J?DNR(DHmtmT77W)_U|MfAy4M< zsFk{P3#Gi2MnQQQ1XVKz(#7fxd4!7N697FLx=(iK#Q=7?tz;ZM;_QU*NO}y- zs7EL1|1t7e$}Y`Kljf=DZZKt_OQ491 zgDfH}t%qXElh_4FmQt@c@fg+W3Q8>F^N>B&!)FDx<|VRta4}bf#Gs{yVcvGjpvmge z+t+pHc?N?uj@9Vk5 zMEH$?_u5gTY9ZvMecJ%WaH#@z6%h4KF2GVgTiEvQ8eb?B4TPtGIxXN%T|c~E&zc+) zs`ooOjqH4iGHC`d`LpOFT)1H}Px}lnOiDOnj$;R@;-3C&YBI-> zC(A@>$lSb&`?;mDt0Xf+bNV?^rJ!6%mZ!cm;gKJ}xLtYu_tdkq)aaS{ML+^Lq>2?W zgXSJ4mz-c_4g!rBmG1jlm(Dj_8hJ#ClO-Gv9>U%pzZB;l2dog ztFTehz;S8;uV?{p&CO<_LbzM~qbZ~jwQ5)DtUL}1#I>lP@crmdmq9Q2$(^Nr&%&N} ziseMBeZH(EnN;1+k?ikgGHQEUp}x0#>^r;_zInYixIY!V*Y#qJ_uFU1e5pv5Y#oF=eyQLb>2}fWK;2cz#5T$9%hj!eSh0-GL|?)s5f) z!7E&t1-tVg*{)k-{OZb^LTSr$%bu`#3_tPxECS5}FwCwEx)c~bZF?(>&4v@x2V7X|M@iam5pN3XiKdjrvZ6JmANo~~Se+HHu8mUhPHSeQNbZ#Ixt zLBQkA4K=hN0;%J8g)avu*BZ%V^ zC=2A=F7Nq_%j*oEHnzHP*Q_%W-hbbVJIg8~;ClJtwa95ykL-e7*ec)6M^1fBpa2M~)u}IQ$JHS3C$O z%Tv44(s#|%5-gv-ew=p{s=_pZ>nn$1m7~!pP%AqlP#^%z{o!Z}hHy~p?tJynxsfZy z_0{s#D@$-JQS>emcz*JEY?hZjRj=H#16a(Jo<$ERwE@l<29BtzqET6SFk@%IjW66! zy`I?8FexANa{1?HvfTuZZUwhMAYt8H0`T0!BFL=}>jh&(c(Dd6z zY2azovvbtIzyLWRN+rd?F02HGm*B}1H5(L~c6Zh1x1nIslC9(-#s~bx=}T*|_3pYg zY2w5NHG*SL3+ z59Q1I(DvAbj-UQp;pK`VBqnikxU4@m&g&AZsmrcX+d1@=D)t;}i3oa!3m3Q3dXF`& zJN^Ar3J;e4ApoM~>roooU#G5J?c>7n9?9t#fidUd@WNnabwElj0|Vmz`AG%mAuX9n z{v1~Z5Y@y3nYt8gY7Un<|KfdK{UC@g^&S5%`KJ|sCI0>M)=sST4R2nacz6*xcL31S zCE$n8pFQ@6&r!_U38bieNL)ZJhg<*q9Y@Z8zW47xiZK7^B2`uTqJBr~q)Q2gj?WcZ zr3wr$Hb6So4p5K)u_p>@!@%SVboO5iv%7-EKff{V5l~@gsIIC&&*I9)z!St>JZEyd zfIPEPQx(^HXiVFwfzxOL!&3^8v0S0fLtg8R9BC>w*MGih?MvIE$g^xv0aa4e14)^o zeyg^5>#x67adG_|W!QrCYnV--?t%PB=osJOwo?<$)BAh|V#ihGMZ5EisZ^0h7 zU1#HJCUxOm6Pk|}5chY>JPA^1?PO@p$crs9#K5%qGGzgh1AG;tij$-pT z!qPx`yGGU3O%VnL&gRqt8xB`-PU1(48y5UL{kQR~C3B zcJutwfEagfLo^c+Og#=WQ&MvW=gV9(OaT!lb8J4x^pH ziKD5P<8!d!G*C_8C~?tQ{djx1i)io!kx*n{u{L>ndbTT+G1ssZD_?J1ytrn_CFCRN ztDc+e3E5C`2EE4rwb;R=jg_Sxov^$lNBg#MWZGEl46DG7)>(e_Mmfj$kRKBd$gT8Pu(72sUUDF${PzP$UYt@h3^uhi|*#c)- zSk)vBJSFL^OEJY=qsYBQz|ZVbxCNr zzkV^Di0Bf)t=wu zrW9-N(Q6cwEKa~0tb}tU1URpbS?7dRhYIj~a&yzXZIls=Ls=v8s*DK99sViQdLRCr zMz2)Dz}dSLlSdv6>zMr`s+7=i%vy;`IP5|Q7A>EhyE5hlotYA_CPj7M1m`LoSb1qm z=+u2ZYZ|V07s`XVednamIO}3Fl_hA#0(Hgazsu+#laa(dE0(q60r#cgxiYSBpx|@V*_XJHDE>x zzP7c2(i=0ioqAX6r)Q6uw7h=rL;sZ@)NxNJA7Zhqx7Vm2%pY84oT#Qok?u{*eOfLP zSwuO&ewDo+Nxh=qw9nXFft#LVIBo2Cw6#X*jOI#wUNyUthN_XkX z=6-vu)+!)C$;6RuN83Db;J*tw&1drF^Br+a*g&%{?C0KJm|#szyi^S>m#Q%6P5hXD zP(i&7ivJMxd30Am0iKiz#fzraa$ewcfX&np5Isuolfs-js z0bg4070c=%-}6~Gw$}YAXT*CZYZzfbqIx`APqF~r1ol7z<=8+f&2h~QCe1t}qg&cg z61=qn>i3x?jNUsbTnR$TO^yK(?HOvZiqgLXn*c^m;j_D5^Q8J^Qg|Qf_po1IA|P9u zHHRq2dO#8ai{;kvx%g7&1G#Teb<)h%Mo}@J>u)irl*Z1I97tTOcoXX!K`|^p#%KV3 ztw)|qZDr*dNQLKIO9_!)kSay_+}$lbhYZ%EVzVjht2!%i?D{z(wWU{b1db?`;KUH& z_ux|geEs^+zJ2?!^0)#g1uMX~=hIDbiIE3~#?^mX4G+wwea8QX29XbW?h*=9D{+?O z;k3#_6U5w;=Hlj3$BU2+sXq@|Oibkxowk;gr92BZab)M=<(0bghe8VlV28pjY4-nC~FVYHo4}l@-qKxA(x{6=GqXU{{!yr^@JJH3Q^ zRD6w`yRwggB&ro}S*^p@>;9ea9g<)YmkVS0bWs>0e;69OtmX7qZ+3}?O+SZq*b*$r zN?V`HuV=p+v(6UpSr3uyQz#NYOFXmSd1-9jAd8XrV^`VeZhT+lEYq*fnumT06TdyKsr~2h?6ghy?%gv^_nZocF3C-l6&be2lx3A|#py|q zJ3d`^ru_A!yBzpeaPm5R<$JnnOeOXWx86vd0pc%O(o}_aLXa)}=d;42&L0B!QWlz` z9x5J6xTrNUxgIG4eiZ5NCrNRvt*FqrW^Gh4{N$!&pNaoQmzPHgG`Yd*& z?P^Y-SW6GAOea*{ReEfYO>$yWZFzUwMe7(jF}VHp$oyUDEnQyV=LW_>rM+D*qKT|h zZ0Zb(x}Vh*tFg)IwP{3(cBKjEtJ0QbTBfcq^A3cr0hoz3QO=L|WxCX|)73vxy5&-h zxI!9Bldic^Y>Jozg;QSiQ+&hyj~SAjx}HtG$Vik>_@3_S?>Gmsfz@8hM>oIz7{`0{ zL+8#qsv9Q03N}dA^v<82zexWrU*lEaB@=$^H?vEHj*QUYvJZgG$ssf7$-F~ZxRxu? zx;dg&uHr2Us6&YzMZk<>A;wQE(e@P1ood47jCR)B*A|(yR^n8__!d!D{b3t>XtUuuF7guEanhLWRBnt*N`L$H; z9N4=fwLX_(q1Bh)6pcOnH-T{QGx+Fbw1h$k+3Vtpau_TqcSE7=2};3I2cTF8W&)a? zgbjFxjI5qj)F^r}zq{l~jf^;hddP^shkCf&VzVpSp5&@j3gRJA+V{d_s+G~M0}y_O z0a|yRy|0796x;5u=x&gngQHqrwPAWqHnX-Y#V`SURXoH~uG=~#vPsHY2xql~*v7_2 z&6OPpany-D3^Sd`JDLuw=yzC@T)#(S-x~dHm_^zs4_;o-Q(4*0CCD=^E9dDY0D?=v z-{oldlz$?*w^lB(PJn%cBN{4$QtSlIK>uHaB{n-T@oQN$QXZ}8xTgjvG2|9J$(VvK)PpC>= zTHmbY-gjK^r!6p4^>byPc9xwOLg;Rvfh%l>glk^5xI#8?BjWpQ0el99gYZiV27TS6 zz54NN(9K}=I{Z`Rk$ulnj&7~#6qhiI8d|Y5)}i>vj3a61aF%wDp*f(q@vC2+jr;h1 z4N62;b%b-j)cU?vSdX1;-_b@;aG^Zs#gqDUsbw+;-FqF6^o53XVZ2wCwEj=l--{%-43 zv$XEDRT{XYXd~HRn{DYJRL`w7FhaX4WP?6s21Yl0_5u0eZY zFOKOnX@7dF2`r%Y88H?V^opgNQhthepXM^Eej4jfv9#hvo?wE8yDaC;8Bi7*sKJDxhgaIDeVY8)B$v&cse zi*hz;wC?V*Y(h<#IuX;(5e3Kf=HQB>&lpKUN5M#IIsaPQQI7@xXVa+Ur56zzw=Eu1 zVR`5@xi8htz2`oB?U7pJtE7K~P2>t&s+Cn;xomBKfnfhfT0U=10C2JBj9ke<7k4Dd zfcQk{BwQ-DJ!cywO;#uX6G0>VPkJp--DIkyIpD@tH>ZyI;enLsCoxD&f4k~nK77-1AcAnQf|9`) z^ORPyFolQ^f$FBOS8woZA)x_*H6?Oi&{H2-a}y)2E|73NEJ(B>#aV$dyC6U98LRu zIdn=~bK+11@?j9T7dNwTDJA}CkjqMxOu+~SAJpp`wf40or{=RW*7B@UD?woSY-%Y} zIiE$1ur|;p*)#9yNHU8{!0?u3aV2ce=LOy~8}l8|!h(`2AJ>C^4(|U3Hl@APw2Z0Q zT?%*my86zEEtD|bJ6By3QGKYZr)7WKu!7<6*d0e6NLXuA?|NtRnIpb|n%h(T4)4u+ zqH=#<1~F|ptb#OMVZ2a&{#F!vZUVh+o&Fr)y%{i`D>jjGI{iqsZZUYIfulm7k=@nH zXL;&oRZJu($PEDn48`8yBDt5REn`&WSh=6|krgb!Gr|jou%_!j#Gz{O&g)$~Je&BZ)Fw7!aNVsrGYzyK zY!aptvmoD;_QY|hO|orKMx4 zG_i(2EM*Nt8E5nNxzyUHA!jf5GWc|f(?3)D>u!i$Q}hQApmN0q?Abd`?S2`Wz}!r! z%YZTQV-6fzAoe>CB4a@EP-1dw6N0#omrcDfh*;(=?CCMa9O@}@&^1a1I-*UbD5k#F z+4LdNCu;3c$SlFU?^mtA(P^OR^l#g%+(8*WPMuS%R3VipV#@)sjp2fn_7|V1Pk`S0 zU#41N87E$E5n`9psk?fLRLHcCP*i0&E?##A-%T|Kv)*CHF18*@72i^8vmY?Vm{CTX zq2})IXUL&;HdKaPoM_ZFgxI~Ibk;+>Y^hB!0^E_X%MLPQH}|!Y4epsR!~pR#LIMSG z>Z$?BKbOkeVXsj3GS^4^?9_qQzn!!FrbEwgUQ~LV5x+~Y7_Lg8@yYFhbC?Rm*11Vl zot%6vwn|V3WmTb){iHbr7%{PW(IO^{w2+`IK-k~BAT8+t0PxG$Ljgq09#p^iDdaxL z|16ApuR8@9=|_`)da)_q1td#r!lso_i4R9Ga8@3xY%Cat6&9Wbzt+{85nUBa9fwSg z-g@Tn!PVYNDVroDN*-m+i&+D-Q$UE9!8Ukd%0smX4yND`>C+T$G2Ln&M`G;~yFhv+ zc+eQG+6?FXt|>31tJwKxT}w5n3~7HA#zp++_mg$2*TfV5%ac3`L$!d5W)ySLuFuWI z;*f6j2?i&2OO!c^pT4vVVC^VRS6NvzpuJM-_BSpM*%7ryS4DDZSIXS5AiB_`PtvV( ztieUCb$ya&1nNY$m_=Ew2E&I`Dm)igA6*h}5tC$p6UYo@OT)ZHFzt0vL94H%`;3Y4 z!7S`+#@8a@a_;a9Rrb zn8_opRKld}<_?@YRIgi=Xt(Fhx}&c#r7(O# zYKD~n46&jRzkaA_S3_=*w(^Q$pF)TCn_ZtHdi@Oek-_Rz=6h0S_R^<&NYlaH{6tKD z#kshpw=D7?982rfwu)mqt~VcY6kg%p8CJ1W}2oN793qCYU;N9ngP+VN_H z8Xc_Z#TR06tz2pbR7y+b)?28Vk1sV}kuB9odziFnJp2>R=@}u3DO%bF8S8OA2!qBw z!=B3604*cy4F|GbF^xZu5NlVzLxnrsSs|tpz8>}ew_X@TijRpK%vM6J`x(jf%7tdg z9its7E)`RQ(_FSvm2{0kPRUoR546&Fsm!Ry?MH*brHmDuV`*urvh33a{Ql!- zWZi24-?lG(DRqF$q3_6l0-vF1j-^I2U@ny6Q%fboYOvBCor)W+qz42@ptARsH5_TX zQdQjzk1u&?8L&wJ#pmV)<8d9MqQ_yqiz5+6D~BXoIPYP|=`M}`+>dkVQ&2O%x!A4J zMO`0&w04GyTJ7F77Du1LhfBL9zo!_>6NEf#oY&a$_`Kij_;)!@Wnhi;t-JiiQA*0& zipTF~-`W)=btTgr-yS>&}V{b6iVJ?*WSq0l_P~^yv<%e3?RyW1hV^q2=ib4F> zFX+;GheEej>jPxR;>cbMH@wz9V_OY^*}|gQx;oY8F+9kGKeYg0XJK%jI|n>{RIL@e z_Ne?tYUH%tT^>5Ov$)XE^+5Q$ze}uMcAvAZ^JPfN6>!Yz_p@oe9;_^8Q2IM7SFdkB z>;N;PQ;QD+(O50t5G!>0a#);dKmQx{q7NRxgxcN0*=`LP=VD(wXcg46vPg z_N<-lw7;^#6Sye;i}Du30*4%5udUgWmhZ9vL44P{;^>u!%3Y+F7>9F7Qqmv*$?Xfe z)DXNKtN+Imx>6}4g5B?}p9acgmclOUi~D$mVqv-%_nw^Xd}HGe8fvY(7#8vP z``eD&&@i2Tb?Hb{xt(=+aOUDf_JLzr+}()7vS8`;kxAFn=4klP`IXwlBSq5seUYAe%}w%!`E_Sta|mw!d_3~5INvFK<%0a_!I+7}#E zpRaQUL|;%N^&OLB{h~P#+yZ{bT`Ps+w|myTW(rE_$8bycWXu^Z_Y+8A+I~k{8)W>o z`O9xxw3(Vby7W)iu`;VzhdFF*O0%fN=?rP?BcEs&{KlsZzx{ zpSqMX>8>b#m}#wg)S>OH$T__uz4$~fVkS?r^uY75B^12U6(z(JFH2ix6^lpt+kt}i z%+u-QH$mrBJOmq^9+bV~2e2a*WtMX>Hp@%k2>(aoWEP_tRD#OQThm!WBIlwAU49xZRcVbOaSdL|3~yzPlHH|TB+dsJLUKwu2V1?9-TVpM5NhgbQo(`+tob% z+p97B)Mth=0EOZh8zLA7vJB%AJt*^62VmCR%d5}OuNuKDSPM|#g}7R-kp_W*-$rds zf!#4IBORhi2v+nUbjXN?7-MJVjDiDG9Gq^yD zkDN_uZ^jxT?Xj58neK(oK%%O2cb>~a~bQvZ;^IS zYI|Fg-Go!=#h}{R|8RU?vRSlKc}ev;@>9&BcT`WlSO!J!_M@d#Y~pOeZX07dz`hPo zKC`%1P6|Py7ITwrkGPH$)kqxkSZG2r8y;Lr-amN%&W3~cc42cA+qSeR$W~(s3cskO~?3cp#(qWZ%^pDfBfJjK14Xmd{)WHl0cWELN1a6Timj0b0!m$^LXQq zJJDYav0jLtG0ZdP*OZhL3hYPD5iMCk;kTAS+@CUpMIk!RNZ50hry&E5#FoKe!3Z&} z!zoA)qcA?$x?yLlS=J)hBN_#R!H;yhV)Z9%ryQ%{^0M^GkE}(!p=pn1%4y?s@Nt=; zsUYSWi0%%opjsFrefr{XA=2I6`JiKeqi`az35T=Vsk~P%sN(n>;itiKQAXJ5UDxrVsHRuOm!WON-^WzaDo?T`Hm+ zrO@I%tizUO0=2)=g6K5|bsy&RHI39tGKMjj&zH<13Kcw&F}ynZQ?Tgq&@>d5ej-Fv zY?MH?)3jzSXiLmbqLC3G&^%kqIWUwrW*Bo3Ie%s=<7z`wD$gM@2wa6x$a|^r(Hh#{ z4D1>9iuTe2R1nLwCjCXtBzlGn0@$HxMB~;X4-|Aw&ag=RqfV4-p0FqutnWTDP#RIg z6u2f$0P~YQtz_;2Bv89!)pr1t@D0KanZFN;WH}|#Z?*JILI9pOXrn!vQ>lDhsY|;Q z3W%E<<6vL!IgD+E?xfFhnT`JH%i#CEOnyX>EEKHG6K0p*e(|26iQ~ipTaAJizJ6{C z8Arg7YE2|%$*q+3Qld)(KhIHk7e>Gy=YLhdO^JLPoW3?%j#H6wFT}WI zE`fZsJ;xEdrJtuwe`9l_g-32`Te0V{0j(PI{&Sc%M{}6~VOV;a=>S5FVf9uiiS5fa z>ZZHl65-NG>+qEQ(&Pmez8e(B1L~%|`n1We%W%4h5!A8sMA=A{aa;Pi(y< ztOjysezomjLch4j2r|!Zk2Itxl5uW@P%8dd1QRB;4D28 zS{ed+mL3oS50@B&{4U38iLZ%bX)mm#+RJV?{ETIR&2OKiwPkKW5T6avwntJtsQ z%V_dH`iH`ymALSq0CstQY7_zL6fO-!0pj?jS9j%gOO>8LycqB=s^oQ^u(nWxdn`X%t}b5|h%l zM~u~;NbWabDF^xlfGkRe5uif{M0keMi6{i5?(oG@#!ZN0n=XFCZ{ddAh0iZa9M=aB* zg$(R?aqFP3zW&-C=0ZN#@ax~uE`#AXS~G37jZ*KPQ@B*Mc_A~@d0^C*X{Up5Qzw^dsMXNp!Yu6wzV zarNV6f(#4Em)E=P+kNy#SRm<`Q6Yny_iKNV=NY3QFR1TI{O+2z$sd40DY`Lw6L z>V4|g+pNJxURDy787FdwP_dRF8S@j@84GYDUkOIQ6flsXb9{^`1eht3WL9@|XHq`y z0evy5NOv5zF8ML^AgQ4+4k>;Ay_6fIof#riVgV3TDDyXJjqd*>v_6?lz_5^Ra7$_j@!!8(_tobN`0ro#Z0X(l_s{ZQ z-qVi%gV+E4hX+6W???XoH2(Kl{`)Kx6#jP+{nyj@-`(?H4}*~;A37X6d0p4~%N|DZ P_xe+eCPz$M_@Dm+xVMqi literal 0 HcmV?d00001 diff --git a/post_processing_grids/post_processing_grid.py b/post_processing_grids/post_processing_grid.py index 17d0aee5a..e5c87ea1f 100644 --- a/post_processing_grids/post_processing_grid.py +++ b/post_processing_grids/post_processing_grid.py @@ -292,7 +292,7 @@ def extract_solidification_time(cases_data, phi_crit): # Paths grid_path = '/home2/p315557/outputs_Norma2/good_grids/escape_grid_4_params_Pxuv_a_epsilon_fO2/' - plots_path = '/home2/p315557/PROTEUS/post_processing_grids/plots' + plots_path = '/home2/p315557/PROTEUS/post_processing_grids/plots/escape_grid_4_params_Pxuv_a_epsilon_fO2/' # Plots : True or False plot=True From cc4cac98732e02956dc64db043d624837185fc65 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Fri, 18 Apr 2025 20:34:27 +0200 Subject: [PATCH 008/105] Move post_processing_grid folder to tools --- .../simulations_status_summary.png | Bin .../post_processing_grid}/post_processing_grid.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename {post_processing_grids => tools/post_processing_grid}/plots/escape_grid_4_params_Pxuv_a_epsilon_fO2/simulations_status_summary.png (100%) rename {post_processing_grids => tools/post_processing_grid}/post_processing_grid.py (100%) diff --git a/post_processing_grids/plots/escape_grid_4_params_Pxuv_a_epsilon_fO2/simulations_status_summary.png b/tools/post_processing_grid/plots/escape_grid_4_params_Pxuv_a_epsilon_fO2/simulations_status_summary.png similarity index 100% rename from post_processing_grids/plots/escape_grid_4_params_Pxuv_a_epsilon_fO2/simulations_status_summary.png rename to tools/post_processing_grid/plots/escape_grid_4_params_Pxuv_a_epsilon_fO2/simulations_status_summary.png diff --git a/post_processing_grids/post_processing_grid.py b/tools/post_processing_grid/post_processing_grid.py similarity index 100% rename from post_processing_grids/post_processing_grid.py rename to tools/post_processing_grid/post_processing_grid.py From e6c59ed25f67c2d5d4b018ee89ac0c4643ce702a Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Fri, 18 Apr 2025 20:47:52 +0200 Subject: [PATCH 009/105] try to do plot function : fail --- ...us_summary.png => grid_status_summary.png} | Bin .../post_processing_grid.py | 92 +++++++++++++++++- 2 files changed, 89 insertions(+), 3 deletions(-) rename tools/post_processing_grid/plots/escape_grid_4_params_Pxuv_a_epsilon_fO2/{simulations_status_summary.png => grid_status_summary.png} (100%) diff --git a/tools/post_processing_grid/plots/escape_grid_4_params_Pxuv_a_epsilon_fO2/simulations_status_summary.png b/tools/post_processing_grid/plots/escape_grid_4_params_Pxuv_a_epsilon_fO2/grid_status_summary.png similarity index 100% rename from tools/post_processing_grid/plots/escape_grid_4_params_Pxuv_a_epsilon_fO2/simulations_status_summary.png rename to tools/post_processing_grid/plots/escape_grid_4_params_Pxuv_a_epsilon_fO2/grid_status_summary.png diff --git a/tools/post_processing_grid/post_processing_grid.py b/tools/post_processing_grid/post_processing_grid.py index e5c87ea1f..66ca80aa8 100644 --- a/tools/post_processing_grid/post_processing_grid.py +++ b/tools/post_processing_grid/post_processing_grid.py @@ -5,6 +5,11 @@ import seaborn as sns import matplotlib.pyplot as plt import re +import numpy as np +import matplotlib.colors as mcolors +import matplotlib.cm as cm +from matplotlib.ticker import FuncFormatter + def load_grid_cases(grid_dir: Path): """ @@ -140,10 +145,14 @@ def plot_grid_status(cases_data, plot_dir: Path, status_colors: dict = None): plt.yticks(fontsize=12) plt.xticks(fontsize=12) plt.tight_layout() - output_path = plot_dir+'simulations_status_summary.png' + output_path = plot_dir+'grid_status_summary.png' plt.savefig(output_path, dpi=300) plt.close() + print('-----------------------------------------------------------') + print(f"Plot grid_status_summary.png saved to {output_path}") + print('-----------------------------------------------------------') + def get_grid_parameters(grid_dir: str): """ Extracts grid parameters names and values from the manager.log file @@ -286,13 +295,80 @@ def extract_solidification_time(cases_data, phi_crit): return solidification_times +def plot_cumulative_distributions_by_grid(parameters_to_extract, grid_parameters, data, xlabel, ylabel, color_map, plot_path): + """ + Function to plot cumulative distribution for multiple parameters. + + parameters_to_extract : list of str + List of the parameters you want to extract and plot (e.g., ['esc_rate_total', 'Phi_global', 'P_surf', 'atm_kg_per_mol']) + + grid_parameters : list of dicts + Each dictionary in the list will define grid parameters like 'sma_values', 'eps_values', etc. + + data : dict + Dictionary containing data for each parameter. Format: {parameter_name: data} + + xlabel : str + Label for the x-axis. + + ylabel : str + Label for the y-axis. + + color_map : list of matplotlib colormaps + List of colormap objects, each corresponding to different grid parameters. + + plot_path : str + Path where the resulting plot should be saved. + """ + + # Create subplots based on number of parameters + fig, axes = plt.subplots(len(parameters_to_extract), len(grid_parameters), figsize=(14, 10), sharex='col') + + # For each parameter to extract, plot a column of cumulative distributions + for param_idx, param in enumerate(parameters_to_extract): + for grid_idx, grid_param in enumerate(grid_parameters): + # Extract grid values and corresponding color map + grid_values = grid_param['values'] + cmap = color_map[grid_idx] + norm = mcolors.Normalize(vmin=min(grid_values), vmax=max(grid_values)) + + # Create a list of colors for the grid values + colors = [cmap(norm(value)) for value in grid_values] + + # Plot for each value in grid_parameters + for i, grid_value in enumerate(grid_values): + parameter_data = data[param].get(grid_value, []) + if parameter_data: + sns.kdeplot(parameter_data, cumulative=True, color=colors[i], ax=axes[param_idx, grid_idx], bw_adjust=0.3, common_grid=True) + + # Set labels and grids for the plot + axes[param_idx, grid_idx].set_xlabel(xlabel, fontsize=12) + axes[param_idx, grid_idx].set_ylabel(ylabel, fontsize=12) + axes[param_idx, grid_idx].set_ylim(0, 1.05) + axes[param_idx, grid_idx].grid(alpha=0.2) + + # Colorbars + for grid_idx, grid_param in enumerate(grid_parameters): + # Create a color map for each grid parameter + sm = plt.cm.ScalarMappable(cmap=color_map[grid_idx], norm=mcolors.Normalize(vmin=min(grid_param['values']), vmax=max(grid_param['values']))) + sm.set_array([]) + cbar = fig.colorbar(sm, ax=axes[:, grid_idx], orientation='vertical') + cbar.set_label(grid_param['label'], fontsize=10) + cbar.set_ticks(grid_param['values']) + cbar.formatter = FuncFormatter(lambda x, _: f'{x:.1f}') + cbar.update_ticks() + + fig.text(0.075, 0.5, 'Normalized cumulative fraction of simulations', va='center', ha='center', rotation='vertical', fontsize=16) + + # Save the plot + plt.savefig(plot_path+'test_figure.png', dpi=300) if __name__ == '__main__': # Paths grid_path = '/home2/p315557/outputs_Norma2/good_grids/escape_grid_4_params_Pxuv_a_epsilon_fO2/' - plots_path = '/home2/p315557/PROTEUS/post_processing_grids/plots/escape_grid_4_params_Pxuv_a_epsilon_fO2/' + plots_path = '/home2/p315557/PROTEUS/tools/post_processing_grid/plots/escape_grid_4_params_Pxuv_a_epsilon_fO2/' # Plots : True or False plot=True @@ -311,6 +387,16 @@ def extract_solidification_time(cases_data, phi_crit): output_to_extract = ['esc_rate_total', 'Phi_global', 'P_surf', 'atm_kg_per_mol'] for param in output_to_extract: extracted_values = extract_grid_output(cases_data, param) + + print('FIX SOLIDIFICATION TIME FUNCTION AND PLOT FUNCTION') # Extract the solidification time phi_crit = 0.005 # Critical melt fraction for solidification - solidification_times = extract_solidification_time(cases_data, phi_crit) \ No newline at end of file + solidification_times = extract_solidification_time(cases_data, phi_crit) + + # Plot cumulative distributions for the extracted parameters + xlabel = 'X-axis Label' # Example label for the x-axis + ylabel = 'Normalized cumulative fraction of simulations' # Example label for the y-axis + color_map = cm.viridis # Example colormap for plotting + + # Call the function to plot cumulative distributions for the extracted parameters + plot_cumulative_distributions_by_grid(cases_data, output_to_extract, xlabel, ylabel, color_map, plots_path) From 860794215309c16a140754a1b98da10046941e68 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Mon, 21 Apr 2025 12:14:34 +0200 Subject: [PATCH 010/105] Try to save extracted data in a csv file : still working on it --- .../post_processing_grid.py | 229 ++++++++++-------- 1 file changed, 127 insertions(+), 102 deletions(-) diff --git a/tools/post_processing_grid/post_processing_grid.py b/tools/post_processing_grid/post_processing_grid.py index 66ca80aa8..b471aa1fe 100644 --- a/tools/post_processing_grid/post_processing_grid.py +++ b/tools/post_processing_grid/post_processing_grid.py @@ -9,6 +9,7 @@ import matplotlib.colors as mcolors import matplotlib.cm as cm from matplotlib.ticker import FuncFormatter +import csv def load_grid_cases(grid_dir: Path): @@ -149,9 +150,7 @@ def plot_grid_status(cases_data, plot_dir: Path, status_colors: dict = None): plt.savefig(output_path, dpi=300) plt.close() - print('-----------------------------------------------------------') print(f"Plot grid_status_summary.png saved to {output_path}") - print('-----------------------------------------------------------') def get_grid_parameters(grid_dir: str): """ @@ -243,15 +242,13 @@ def extract_grid_output(cases_data, parameter_name): columns_printed = True # Print the extracted values - print('-----------------------------------------------------------') print(f"Extracted output (at last time step) : {parameter_name} ") - print('-----------------------------------------------------------') return parameter_values def extract_solidification_time(cases_data, phi_crit): """ - Extract the solidification time for planet that reach Phi_global < phi_crit. + Extract the solidification time for planets that reach Phi_global < phi_crit. Parameters ---------- @@ -265,7 +262,7 @@ def extract_solidification_time(cases_data, phi_crit): ------- solidification_times : list A list containing the solidification times for all solidified planets of the grid. - If a planet never solidifies, it will not be included in the list. + If a planet never solidifies, it will have NaN in the list. """ solidification_times = [] columns_printed = False @@ -279,124 +276,152 @@ def extract_solidification_time(cases_data, phi_crit): first_index = condition.idxmax() # gives the first True index solid_time = df.loc[first_index, 'Time'] solidification_times.append(solid_time) - print(f"[Case {i}] Solidification at index {first_index} → Time = {solid_time}, Phi = {df.loc[first_index, 'Phi_global']}") - + else: + solidification_times.append(np.nan) # Append NaN if condition is not met else: if not columns_printed: print("Warning: 'Phi_global' and/or 'Time' columns not found in some cases.") print(f"Available columns: {', '.join(df.columns)}") columns_printed = True + solidification_times.append(np.nan) # Append NaN if columns are missing + + # Count the number of cases labeled as 10 Completed (solidified) + status_10_cases = [case for case in cases_data if (case.get('status') or '').strip() == '10 Completed (solidified)'] + completed_count = len(status_10_cases) + + # Count only valid solidification times (non-NaN) + valid_solidification_times = [time for time in solidification_times if not np.isnan(time)] + valid_solidified_count = len(valid_solidification_times) print('-----------------------------------------------------------') - print(f"Extracted solidification times ") - print('at timesteps when Phi_global < {phi_crit}') - print('-----------------------------------------------------------') - print(len(solidification_times)) + print(f"Extracted solidification times (Phi_global < {phi_crit})") + print(f"→ Found {valid_solidified_count} valid solidified cases based on Phi_global") + print(f"→ Found {completed_count} cases with status '10 Completed (solidified)' ") + if valid_solidified_count != completed_count: + print("WARNING: The number of valid solidified planets does not match the number of planets with status: '10 Completed (solidified)'") + print("\nChecking final Phi_global values for all status '10 Completed (solidified)' cases:") + for i, case in enumerate(status_10_cases): + df = case['output_values'] + if 'Phi_global' in df.columns: + final_phi = df['Phi_global'].iloc[-1] + #print(f"[Status Case {i}] Final Phi_global = {final_phi}") + else: + print(f"[Status Case {i}] Phi_global column missing.") + else: + print("Solidified planets count matches the number of planets with status: '10 Completed (solidified)'.") + print('-----------------------------------------------------------') + return solidification_times -def plot_cumulative_distributions_by_grid(parameters_to_extract, grid_parameters, data, xlabel, ylabel, color_map, plot_path): +def save_grid_data_to_csv(grid_name, cases_data, grid_parameters, extracted_value, output_to_extract, phi_crit, output_dir: Path): """ - Function to plot cumulative distribution for multiple parameters. + Save all simulation information (status, grid parameters, output values, solidification times) + into CSV files for later analysis (doing plots). - parameters_to_extract : list of str - List of the parameters you want to extract and plot (e.g., ['esc_rate_total', 'Phi_global', 'P_surf', 'atm_kg_per_mol']) - - grid_parameters : list of dicts - Each dictionary in the list will define grid parameters like 'sma_values', 'eps_values', etc. - - data : dict - Dictionary containing data for each parameter. Format: {parameter_name: data} + Parameters + ---------- + grid_name : str + Name of the grid. - xlabel : str - Label for the x-axis. + cases_data : list + List of dictionaries containing the status of all the simulations cases in the grid. - ylabel : str - Label for the y-axis. + grid_parameters : dict + Dictionary containing the grid parameters. - color_map : list of matplotlib colormaps - List of colormap objects, each corresponding to different grid parameters. + extracted_value : dict + Dictionary containing the extracted output values for each parameter. - plot_path : str - Path where the resulting plot should be saved. + output_to_extract : list + List of output values extracted from each simulation in the grid. + + phi_crit : float + Critical melt fraction value to determine if the planet solidifies. + + output_dir : Path + Directory where the CSV file will be saved. """ + # Ensure the output directory exists + output_dir = Path(output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + # Prepare the header + header = [ + "#############################################################################################################", + f"Grid name: {grid_name}", + f"Total number of cases: {len(cases_data)}", + f"phi_crit: {phi_crit}", + "----------------------------------------------------------", + " Grid Parameters", + "----------------------------------------------------------" + ] - # Create subplots based on number of parameters - fig, axes = plt.subplots(len(parameters_to_extract), len(grid_parameters), figsize=(14, 10), sharex='col') - - # For each parameter to extract, plot a column of cumulative distributions - for param_idx, param in enumerate(parameters_to_extract): - for grid_idx, grid_param in enumerate(grid_parameters): - # Extract grid values and corresponding color map - grid_values = grid_param['values'] - cmap = color_map[grid_idx] - norm = mcolors.Normalize(vmin=min(grid_values), vmax=max(grid_values)) - - # Create a list of colors for the grid values - colors = [cmap(norm(value)) for value in grid_values] - - # Plot for each value in grid_parameters - for i, grid_value in enumerate(grid_values): - parameter_data = data[param].get(grid_value, []) - if parameter_data: - sns.kdeplot(parameter_data, cumulative=True, color=colors[i], ax=axes[param_idx, grid_idx], bw_adjust=0.3, common_grid=True) - - # Set labels and grids for the plot - axes[param_idx, grid_idx].set_xlabel(xlabel, fontsize=12) - axes[param_idx, grid_idx].set_ylabel(ylabel, fontsize=12) - axes[param_idx, grid_idx].set_ylim(0, 1.05) - axes[param_idx, grid_idx].grid(alpha=0.2) + max_label_length = max(len(param) for param in grid_parameters.keys()) + for param, values in grid_parameters.items(): + aligned_param = f"{param: <{max_label_length}}" + values_str = f"[{', '.join(map(str, values))}]" + header.append(f"{aligned_param}: {values_str}") - # Colorbars - for grid_idx, grid_param in enumerate(grid_parameters): - # Create a color map for each grid parameter - sm = plt.cm.ScalarMappable(cmap=color_map[grid_idx], norm=mcolors.Normalize(vmin=min(grid_param['values']), vmax=max(grid_param['values']))) - sm.set_array([]) - cbar = fig.colorbar(sm, ax=axes[:, grid_idx], orientation='vertical') - cbar.set_label(grid_param['label'], fontsize=10) - cbar.set_ticks(grid_param['values']) - cbar.formatter = FuncFormatter(lambda x, _: f'{x:.1f}') - cbar.update_ticks() - - fig.text(0.075, 0.5, 'Normalized cumulative fraction of simulations', va='center', ha='center', rotation='vertical', fontsize=16) - - # Save the plot - plt.savefig(plot_path+'test_figure.png', dpi=300) + header.extend([ + "----------------------------------------------------------", + "This file contains the following columns:", + f"| Case number | {' | '.join(extracted_value.keys())} |", + "#############################################################################################################" + ]) + + # Prepare the CSV file path + csv_file = output_dir / f"{grid_name}_simulation_data.csv" + + # Define the column headers for the CSV file + csv_headers = ["Case number"] + list(extracted_value.keys()) + + # Open the CSV file for writing + with open(csv_file, 'w', newline='') as csvfile: + writer = csv.writer(csvfile) + + # Write the header as comments at the top of the file + for line in header: + writer.writerow([f"#{line}"]) # Add a '#' to make it a comment in the CSV file + + # Write the header for the data section + writer.writerow(csv_headers) + + # Write the data for each case + for case_index, case_data in enumerate(cases_data): + row = [case_index + 1] # Start with the case number + for param in extracted_value.keys(): + row.append(extracted_value[param][case_index]) # Extract the value for the current case + writer.writerow(row) + print(f"Data has been successfully saved to {csv_file}.") if __name__ == '__main__': - # Paths - grid_path = '/home2/p315557/outputs_Norma2/good_grids/escape_grid_4_params_Pxuv_a_epsilon_fO2/' - plots_path = '/home2/p315557/PROTEUS/tools/post_processing_grid/plots/escape_grid_4_params_Pxuv_a_epsilon_fO2/' - - # Plots : True or False - plot=True - - # Load simulation cases - cases_data = load_grid_cases(grid_path) - - # Plot = True or False, if True, it will plot the status of all grid simulations + # Paths to the grid and the plot folder + grid_name = 'escape_grid_4_params_Pxuv_a_epsilon_fO2' + grid_path = '/home2/p315557/outputs_Norma2/good_grids/' + grid_name + '/' + plots_path = '/home2/p315557/PROTEUS/tools/post_processing_grid/plots/' + grid_name + '/' + data_dir = '/home2/p315557/PROTEUS/tools/post_processing_grid/processed_data/' + grid_name + '/' + + # User choose the parameters to post-process the grid + plot=True # True or False + output_to_extract = ['esc_rate_total', # List of output values to extract from the runtime_helpfile + 'Phi_global', + 'P_surf', + 'atm_kg_per_mol'] + phi_crit = 0.005 # Critical melt fraction for solidification + extracted_value = {} + + # Post-processing the grid + cases_data = load_grid_cases(grid_path) # Load all simulation cases if plot: - plot_grid_status(cases_data, plots_path) - - # Extract grid parameters - grid_parameters = get_grid_parameters(grid_path) - - # List of output to extract - output_to_extract = ['esc_rate_total', 'Phi_global', 'P_surf', 'atm_kg_per_mol'] + plot_grid_status(cases_data, plots_path) # Plot the summary histogram of the grid status + grid_parameters = get_grid_parameters(grid_path) # Extract grid parameters for param in output_to_extract: - extracted_values = extract_grid_output(cases_data, param) - - print('FIX SOLIDIFICATION TIME FUNCTION AND PLOT FUNCTION') - # Extract the solidification time - phi_crit = 0.005 # Critical melt fraction for solidification - solidification_times = extract_solidification_time(cases_data, phi_crit) - - # Plot cumulative distributions for the extracted parameters - xlabel = 'X-axis Label' # Example label for the x-axis - ylabel = 'Normalized cumulative fraction of simulations' # Example label for the y-axis - color_map = cm.viridis # Example colormap for plotting - - # Call the function to plot cumulative distributions for the extracted parameters - plot_cumulative_distributions_by_grid(cases_data, output_to_extract, xlabel, ylabel, color_map, plots_path) + extracted_value[param] = extract_grid_output(cases_data, param) # Extract output values + solidification_times = extract_solidification_time(cases_data, phi_crit) # Extract the solidification time + extracted_value['solidification_time'] = solidification_times + + # Save all the extracted data to a CSV file + save_grid_data_to_csv(grid_name, cases_data, grid_parameters, extracted_value, solidification_times, phi_crit, data_dir) From c944fcb01b4194621d4fa752f06e67fabadb7825 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Mon, 21 Apr 2025 12:48:52 +0200 Subject: [PATCH 011/105] Save into csv ok now need to clean script --- .../post_processing_grid.py | 74 ++- ...rams_Pxuv_a_epsilon_fO2_extracted_data.csv | 541 ++++++++++++++++++ 2 files changed, 574 insertions(+), 41 deletions(-) create mode 100644 tools/post_processing_grid/processed_data/escape_grid_4_params_Pxuv_a_epsilon_fO2/escape_grid_4_params_Pxuv_a_epsilon_fO2_extracted_data.csv diff --git a/tools/post_processing_grid/post_processing_grid.py b/tools/post_processing_grid/post_processing_grid.py index b471aa1fe..7684e4aa3 100644 --- a/tools/post_processing_grid/post_processing_grid.py +++ b/tools/post_processing_grid/post_processing_grid.py @@ -346,55 +346,42 @@ def save_grid_data_to_csv(grid_name, cases_data, grid_parameters, extracted_valu output_dir = Path(output_dir) output_dir.mkdir(parents=True, exist_ok=True) - # Prepare the header - header = [ - "#############################################################################################################", - f"Grid name: {grid_name}", - f"Total number of cases: {len(cases_data)}", - f"phi_crit: {phi_crit}", - "----------------------------------------------------------", - " Grid Parameters", - "----------------------------------------------------------" - ] - - max_label_length = max(len(param) for param in grid_parameters.keys()) - for param, values in grid_parameters.items(): - aligned_param = f"{param: <{max_label_length}}" - values_str = f"[{', '.join(map(str, values))}]" - header.append(f"{aligned_param}: {values_str}") - - header.extend([ - "----------------------------------------------------------", - "This file contains the following columns:", - f"| Case number | {' | '.join(extracted_value.keys())} |", - "#############################################################################################################" - ]) - - # Prepare the CSV file path - csv_file = output_dir / f"{grid_name}_simulation_data.csv" + # Path and name to save the CSV file + csv_file = output_dir / f"{grid_name}_extracted_data.csv" - # Define the column headers for the CSV file - csv_headers = ["Case number"] + list(extracted_value.keys()) - - # Open the CSV file for writing with open(csv_file, 'w', newline='') as csvfile: writer = csv.writer(csvfile) - - # Write the header as comments at the top of the file - for line in header: - writer.writerow([f"#{line}"]) # Add a '#' to make it a comment in the CSV file - - # Write the header for the data section - writer.writerow(csv_headers) - - # Write the data for each case + + # Write the header + writer.writerow(["#############################################################################################################"]) + writer.writerow([f"Grid name: {grid_name}"]) + writer.writerow([f"Total number of cases: {len(cases_data)}"]) + writer.writerow([f"phi_crit: {phi_crit}"]) + writer.writerow(["----------------------------------------------------------"]) + writer.writerow([" Grid Parameters"]) + writer.writerow(["----------------------------------------------------------"]) + max_label_length = max(len(param) for param in grid_parameters.keys()) + for param, values in grid_parameters.items(): + aligned_param = f"{param: <{max_label_length}}" + values_str = f"[{', '.join(map(str, values))}]" + writer.writerow([f"{aligned_param}: {values_str}"]) + writer.writerow(["----------------------------------------------------------"]) + writer.writerow(["This file contains the following columns:"]) + writer.writerow([f"| Case number | Status | {' | '.join(extracted_value.keys())} |"]) + writer.writerow(["#############################################################################################################"]) + writer.writerow([]) + + # Write the data for each case of the grid + statuses = [case.get('status', 'unknown') or 'unknown' for case in cases_data] for case_index, case_data in enumerate(cases_data): - row = [case_index + 1] # Start with the case number + row = [case_index, f"'{statuses[case_index]}'"] for param in extracted_value.keys(): - row.append(extracted_value[param][case_index]) # Extract the value for the current case + row.append(extracted_value[param][case_index]) writer.writerow(row) print(f"Data has been successfully saved to {csv_file}.") + print('-----------------------------------------------------------') + if __name__ == '__main__': @@ -425,3 +412,8 @@ def save_grid_data_to_csv(grid_name, cases_data, grid_parameters, extracted_valu # Save all the extracted data to a CSV file save_grid_data_to_csv(grid_name, cases_data, grid_parameters, extracted_value, solidification_times, phi_crit, data_dir) + + print('-----------------------------------------------------------') + print("Post-processing completed. Let's do some plots !") + print('(Please check for any warning messages above before going further.)') + print('-----------------------------------------------------------') \ No newline at end of file diff --git a/tools/post_processing_grid/processed_data/escape_grid_4_params_Pxuv_a_epsilon_fO2/escape_grid_4_params_Pxuv_a_epsilon_fO2_extracted_data.csv b/tools/post_processing_grid/processed_data/escape_grid_4_params_Pxuv_a_epsilon_fO2/escape_grid_4_params_Pxuv_a_epsilon_fO2_extracted_data.csv new file mode 100644 index 000000000..2df4557f2 --- /dev/null +++ b/tools/post_processing_grid/processed_data/escape_grid_4_params_Pxuv_a_epsilon_fO2/escape_grid_4_params_Pxuv_a_epsilon_fO2_extracted_data.csv @@ -0,0 +1,541 @@ +############################################################################################################# +Grid name: escape_grid_4_params_Pxuv_a_epsilon_fO2 +Total number of cases: 525 +phi_crit: 0.005 +---------------------------------------------------------- + Grid Parameters +---------------------------------------------------------- +"Redox state : [-6, -4, -2, 0, 2, 4, 6]" +"Pxuv : [1e-05, 0.0001, 0.001, 0.01, 0.1]" +"efficiency : [0.1, 0.3, 0.5, 0.7, 1.0]" +"semimajoraxis: [0.1, 1.0, 1.5]" +---------------------------------------------------------- +This file contains the following columns: +| Case number | Status | esc_rate_total | Phi_global | P_surf | atm_kg_per_mol | solidification_time | +############################################################################################################# + +0,'10 Completed (solidified)',67703.72,0.004906388,498.4496,0.004871988,360243.0 +1,'25 Error (died)',10861850.0,0.9488592,472.4397,0.005472131,nan +2,'10 Completed (solidified)',29194.38,0.004841147,498.4835,0.004871882,336653.0 +3,'10 Completed (solidified)',203138.9,0.004995163,498.2177,0.004871985,360018.0 +4,'25 Error (died)',72574700.0,0.929459,353.3335,0.005614977,nan +5,'10 Completed (solidified)',145825.2,0.001995689,498.6418,0.00487306,337353.0 +6,'10 Completed (solidified)',338514.5,0.004965492,497.9852,0.004871975,359911.0 +7,'10 Completed (solidified)',204370.6,0.004841626,498.2091,0.004871895,336598.0 +8,'10 Completed (solidified)',87634.38,0.004907217,498.4127,0.00487195,336537.0 +9,'10 Completed (solidified)',676116.1,0.00204669,497.7406,0.004873107,360814.0 +10,'25 Error (died)',51757670.0,0.9287383,352.2479,0.005615245,nan +11,'10 Completed (solidified)',291979.3,0.004854271,498.0726,0.004871904,336516.0 +12,'25 Error (died)',31047200.0,0.9301765,360.4474,0.005602393,nan +13,'10 Completed (solidified)',473930.3,0.004911005,497.7644,0.004872003,360223.0 +14,'10 Completed (solidified)',28384.19,0.004892344,498.5124,0.004871967,336554.0 +15,'25 Error (died)',103600100.0,0.9295238,350.6526,0.005620765,nan +16,'10 Completed (solidified)',195985.4,0.004977072,498.2225,0.004871963,359873.0 +17,'25 Error (died)',45889640.0,0.9273917,347.1285,0.005622346,nan +18,'10 Completed (solidified)',326606.8,0.004938631,498.001,0.004871962,359834.0 +19,'25 Error (died)',9639414.0,0.9504914,486.1506,0.005459048,nan +20,'25 Error (died)',64554400.0,0.9295432,354.4608,0.005612868,nan +21,'10 Completed (solidified)',65334.27,0.004977864,498.4477,0.004871976,360257.0 +22,'25 Error (died)',92478120.0,0.9312607,359.3701,0.005607573,nan +23,'25 Error (died)',27585030.0,0.9301442,360.6329,0.005601931,nan +24,'10 Completed (solidified)',142114.3,0.004984716,498.2326,0.004871669,336325.0 +25,'10 Completed (solidified)',85016.01,0.002000602,498.7392,0.004873061,337298.0 +26,'25 Error (died)',8591862.0,0.9514668,494.8517,0.005450983,nan +27,'10 Completed (solidified)',27577.01,0.004861797,498.4816,0.004871814,336639.0 +28,'10 Completed (solidified)',457597.7,0.004984443,497.8171,0.004872081,359926.0 +29,'10 Completed (solidified)',198417.5,0.002036447,498.565,0.004873078,337419.0 +30,'10 Completed (solidified)',283490.0,0.002053718,498.4335,0.004873089,337300.0 +31,'25 Error (died)',24619290.0,0.9279063,351.2857,0.005614975,nan +32,'10 Completed (solidified)',653519.0,0.00491445,497.4775,0.004872056,360122.0 +33,'10 Completed (solidified)',63086.14,0.004989442,498.452,0.004871975,360061.0 +34,'10 Completed (solidified)',189369.4,0.004981859,498.2731,0.004872085,360006.0 +35,'25 Error (died)',41199100.0,0.9293393,355.839,0.005609464,nan +36,'25 Error (died)',57651310.0,0.9277621,347.5743,0.005622412,nan +37,'10 Completed (solidified)',82716.26,0.004841894,498.3983,0.004871881,336651.0 +38,'10 Completed (solidified)',315341.7,0.00499652,498.0089,0.004871932,360229.0 +39,'10 Completed (solidified)',137839.9,0.004825349,498.3056,0.004871866,336473.0 +40,'10 Completed (solidified)',440898.4,0.004835041,497.7451,0.004871772,359952.0 +41,'10 Completed (solidified)',193097.9,0.004891533,498.2431,0.004871946,336480.0 +42,'10 Completed (solidified)',631391.1,0.004997609,497.54,0.004872141,360081.0 +43,'25 Error (died)',7725990.0,0.9520145,499.8614,0.005446439,nan +44,'10 Completed (solidified)',61023.32,0.004967416,498.4514,0.004871958,360135.0 +45,'10 Completed (solidified)',26836.69,0.004827417,498.483,0.00487187,336573.0 +46,'10 Completed (solidified)',80567.01,0.004910122,498.4264,0.004871967,336551.0 +47,'25 Error (died)',82541550.0,0.9291519,351.0675,0.005618861,nan +48,'10 Completed (solidified)',275596.2,0.004784428,498.077,0.004871839,336537.0 +49,'10 Completed (solidified)',183225.5,0.00499333,498.2917,0.004872124,359917.0 +50,'25 Error (died)',51965660.0,0.9294277,355.1927,0.005611039,nan +51,'10 Completed (solidified)',427317.1,0.004922584,497.8596,0.004872055,359951.0 +52,'25 Error (died)',22166160.0,0.9295618,358.585,0.005604468,nan +53,'10 Completed (solidified)',610043.1,0.004980424,497.5049,0.004871925,360104.0 +54,'25 Error (died)',37118360.0,0.9301077,359.5539,0.005604008,nan +55,'10 Completed (solidified)',305132.1,0.004983641,498.0408,0.004871976,359874.0 +56,'25 Error (died)',74314770.0,0.929005,351.2263,0.005618121,nan +57,'10 Completed (solidified)',26103.85,0.004817895,498.481,0.004871858,336557.0 +58,'10 Completed (solidified)',187700.5,0.001995293,498.5761,0.004873067,337336.0 +59,'10 Completed (solidified)',134417.3,0.004902077,498.3981,0.004872136,336759.0 +60,'25 Error (died)',20189460.0,0.9349988,384.0395,0.005570524,nan +61,'10 Completed (solidified)',268564.6,0.004878108,498.1333,0.004871977,336434.0 +62,'25 Error (died)',6959020.0,0.9527038,506.2141,0.005440801,nan +63,'25 Error (died)',33380220.0,0.9283625,352.3951,0.005613908,nan +64,'10 Completed (solidified)',58981.55,0.004946749,498.45,0.004871947,360448.0 +65,'25 Error (died)',47069170.0,0.9297849,357.1815,0.005607939,nan +66,'10 Completed (solidified)',176873.7,0.004955614,498.2265,0.004871879,360321.0 +67,'10 Completed (solidified)',78333.43,0.004852214,498.4092,0.004871897,336637.0 +68,'10 Completed (solidified)',295102.3,0.004976322,498.0847,0.004872062,360265.0 +69,'10 Completed (solidified)',261227.4,0.004898281,498.1373,0.004871956,336429.0 +70,'10 Completed (solidified)',182842.3,0.004893463,498.2569,0.004871941,336549.0 +71,'25 Error (died)',7687754.0,0.99306,297.7589,0.008192613,nan +72,'10 Completed (solidified)',413206.4,0.004979314,497.8924,0.00487208,359960.0 +73,'25 Error (died)',67322020.0,0.9298588,355.5548,0.005611483,nan +74,'10 Completed (solidified)',130466.9,0.002053214,498.675,0.004873081,337532.0 +75,'10 Completed (solidified)',66245.88,0.004940888,518.5295,0.005025867,2510533.0 +76,'10 Completed (solidified)',589982.1,0.004924449,497.5703,0.004872022,360187.0 +77,'25 Error (died)',27361000.0,0.588702,25.48924,0.01492393,nan +78,'10 Completed (solidified)',28768.78,0.004999425,519.2182,0.00502629,1643457.0 +79,'25 Error (died)',17138840.0,0.8138609,51.71766,0.01368148,nan +80,'10 Completed (solidified)',198848.4,0.004777559,516.8097,0.005025238,2506708.0 +81,'10 Completed (solidified)',463659.6,0.004909083,513.9949,0.005026299,2490477.0 +82,'10 Completed (solidified)',86169.24,0.004923146,518.9528,0.005026685,1646321.0 +83,'25 Error (died)',54745240.0,0.67268,28.95581,0.01547419,nan +84,'10 Completed (solidified)',331048.6,0.004811223,515.3144,0.005025484,2497540.0 +85,'25 Error (died)',38287730.0,0.6234554,26.75746,0.01520114,nan +86,'10 Completed (solidified)',143627.9,0.004781931,517.9856,0.005025047,1642937.0 +87,'25 Error (died)',7361420.0,0.9956705,347.6857,0.007926599,nan +88,'10 Completed (solidified)',201285.4,0.004760575,517.6623,0.005025363,1644929.0 +89,'25 Error (died)',17300130.0,0.9056885,78.74175,0.01212733,nan +90,'10 Completed (solidified)',662338.3,0.004972194,511.9385,0.005027177,2483646.0 +91,'10 Completed (solidified)',287598.9,0.004965348,517.2125,0.005026285,1640685.0 +92,'10 Completed (solidified)',63975.53,0.004932037,519.2157,0.005027694,2518724.0 +93,'10 Completed (solidified)',83907.79,0.004929336,518.627,0.005025754,1645508.0 +94,'25 Error (died)',37069020.0,0.6220283,26.78364,0.01517222,nan +95,'10 Completed (solidified)',139802.9,0.004919013,519.249,0.005028701,1647730.0 +96,'25 Error (died)',26460280.0,0.5797975,24.98092,0.01489373,nan +97,'10 Completed (solidified)',27923.66,0.004922845,519.3944,0.005026643,1646244.0 +98,'10 Completed (solidified)',191995.9,0.004922239,517.0608,0.005025934,2499800.0 +99,'10 Completed (solidified)',195622.2,0.004922123,517.7767,0.005025824,1645032.0 +100,'10 Completed (solidified)',319775.6,0.004912448,515.5957,0.005026079,2494834.0 +101,'10 Completed (solidified)',279188.4,0.004916893,517.4807,0.005026813,1643526.0 +102,'25 Error (died)',52935430.0,0.645002,26.93298,0.01550197,nan +103,'10 Completed (solidified)',639325.0,0.004934669,512.4593,0.005028068,2499867.0 +104,'10 Completed (solidified)',447685.0,0.004960772,514.2615,0.005026704,2497176.0 +105,'25 Error (died)',6897345.0,0.9963418,364.8108,0.007849838,nan +106,'10 Completed (solidified)',61868.34,0.004920619,519.053,0.005027163,2521499.0 +107,'25 Error (died)',16013430.0,0.8297413,55.24699,0.01343596,nan +108,'10 Completed (solidified)',185578.3,0.004857488,516.9909,0.005025465,2500237.0 +109,'10 Completed (solidified)',27177.56,0.004922003,519.3047,0.005026381,1647717.0 +110,'25 Error (died)',25649530.0,0.5948271,26.11658,0.01487996,nan +111,'10 Completed (solidified)',136075.4,0.004902185,519.1891,0.005028432,1646688.0 +112,'10 Completed (solidified)',81713.31,0.00495737,518.7098,0.00502597,1646206.0 +113,'10 Completed (solidified)',309366.6,0.004764858,515.578,0.005025519,2502079.0 +114,'25 Error (died)',35836060.0,0.6016311,25.46377,0.01514743,nan +115,'10 Completed (solidified)',190521.7,0.004856817,517.678,0.005025357,1644973.0 +116,'25 Error (died)',51271660.0,0.6456139,27.11219,0.01547038,nan +117,'10 Completed (solidified)',433139.3,0.00487856,514.2132,0.005026031,2498875.0 +118,'10 Completed (solidified)',272113.4,0.00491518,518.1378,0.00502852,1645396.0 +119,'25 Error (died)',6475586.0,0.9969941,384.3886,0.007769235,nan +120,'10 Completed (solidified)',59801.57,0.004850063,518.4074,0.005025206,2505176.0 +121,'10 Completed (solidified)',619260.7,0.004926559,512.2339,0.00502672,2489488.0 +122,'10 Completed (solidified)',26486.14,0.004914318,519.0187,0.005025572,1646116.0 +123,'10 Completed (solidified)',179368.0,0.004810605,516.9916,0.005025229,2504555.0 +124,'25 Error (died)',15957080.0,0.9089323,80.449,0.01204281,nan +125,'10 Completed (solidified)',79336.75,0.00478303,518.4546,0.005024975,1643316.0 +126,'10 Completed (solidified)',185059.7,0.004916501,518.1162,0.005026554,1644506.0 +127,'10 Completed (solidified)',132203.3,0.004921325,518.5798,0.005026685,1646051.0 +128,'25 Error (died)',34686270.0,0.599228,25.41568,0.0151188,nan +129,'25 Error (died)',24801250.0,0.5441033,22.83689,0.0147991,nan +130,'10 Completed (solidified)',418442.4,0.004801294,514.2764,0.005025613,2488914.0 +131,'25 Error (died)',49732510.0,0.6484737,27.48453,0.0154303,nan +132,'10 Completed (solidified)',299032.6,0.004901987,515.7688,0.005025969,2501291.0 +133,'10 Completed (solidified)',597881.7,0.004841836,512.2841,0.005026088,2483515.0 +134,'10 Completed (solidified)',264584.0,0.004843391,517.0946,0.005025364,1644180.0 +135,'10 Completed (solidified)',57877.69,0.004796431,518.3761,0.005024988,2512241.0 +136,'10 Completed (solidified)',173418.9,0.004913228,517.4552,0.005026502,2510491.0 +137,'25 Error (died)',6081025.0,0.9975954,406.8123,0.007685172,nan +138,'10 Completed (solidified)',128884.3,0.004925041,518.27,0.005025773,1646205.0 +139,'10 Completed (solidified)',25758.27,0.004926754,519.0519,0.005025668,1645437.0 +140,'25 Error (died)',24314510.0,0.7533137,39.87145,0.01462918,nan +141,'25 Error (died)',15599180.0,0.9340547,96.40169,0.01133938,nan +142,'25 Error (died)',33627120.0,0.5949858,25.23905,0.01509008,nan +143,'10 Completed (solidified)',292053.7,0.02410516,522.4857,0.005075257,nan +144,'10 Completed (solidified)',77240.91,0.00489782,519.2741,0.00502737,1646995.0 +145,'10 Completed (solidified)',407769.2,0.01564291,519.1554,0.005056173,nan +146,'25 Error (died)',55821450.0,0.9789547,176.2164,0.009350899,nan +147,'10 Completed (solidified)',582761.0,0.01755161,517.5473,0.005060446,nan +148,'14 Completed (net flux is small)',6281748.0,1.0,636.8511,0.01322981,nan +149,'10 Completed (solidified)',180291.2,0.006135706,518.6227,0.005029766,nan +150,'10 Completed (solidified)',257541.1,0.006126987,518.0101,0.005029759,nan +151,'14 Completed (net flux is small)',18845240.0,1.0,636.8509,0.01322981,nan +152,'10 Completed (solidified)',27041.72,0.01037476,686.702,0.006289473,nan +153,'10 Completed (solidified)',55685.81,0.38437,644.5419,0.00889913,nan +154,'14 Completed (net flux is small)',31408740.0,1.0,636.8508,0.01322981,nan +155,'10 Completed (solidified)',81048.36,0.005861142,682.7729,0.006263289,nan +156,'10 Completed (solidified)',280813.5,0.3475405,637.5932,0.00861631,nan +157,'10 Completed (solidified)',166864.2,0.3969359,638.5655,0.009033908,nan +158,'14 Completed (net flux is small)',62817480.0,1.0,636.8504,0.01322982,nan +159,'10 Completed (solidified)',135192.7,0.01036147,684.415,0.006290697,nan +160,'14 Completed (net flux is small)',43972230.0,1.0,636.8506,0.01322981,nan +161,'14 Completed (net flux is small)',6042665.0,1.0,636.8511,0.01322981,nan +162,'10 Completed (solidified)',189040.1,0.005055634,678.6399,0.006258278,nan +163,'14 Completed (net flux is small)',18127990.0,1.0,636.8509,0.01322981,nan +164,'10 Completed (solidified)',395637.8,0.332262,633.5866,0.008507002,nan +165,'10 Completed (solidified)',554388.0,0.3913497,622.0559,0.009056996,nan +166,'10 Completed (solidified)',79104.78,0.01319483,688.5267,0.006309024,nan +167,'10 Completed (solidified)',270083.2,0.004921809,675.4297,0.006256703,4290414.0 +168,'10 Completed (solidified)',272855.3,0.4007522,633.9868,0.009088741,nan +169,'10 Completed (solidified)',26369.98,0.01323288,689.7768,0.006308739,nan +170,'10 Completed (solidified)',54692.88,0.3845082,644.5896,0.008900205,nan +171,'10 Completed (solidified)',184542.0,0.01037768,683.4735,0.006291594,nan +172,'10 Completed (solidified)',164123.2,0.3825995,640.1841,0.008903312,nan +173,'14 Completed (net flux is small)',30213320.0,1.0,636.8508,0.01322981,nan +174,'14 Completed (net flux is small)',60426640.0,1.0,636.8504,0.01322982,nan +175,'10 Completed (solidified)',547575.7,0.3681032,623.6001,0.008846591,nan +176,'10 Completed (solidified)',263480.7,0.01252051,687.5644,0.006311978,nan +177,'10 Completed (solidified)',53725.34,0.3846744,644.6369,0.00890154,nan +178,'14 Completed (net flux is small)',5808209.0,1.0,636.8511,0.01322981,nan +179,'10 Completed (solidified)',25692.14,0.005025768,681.6201,0.006255753,nan +180,'10 Completed (solidified)',131773.3,0.004987555,678.9673,0.006256275,4288583.0 +181,'10 Completed (solidified)',77095.74,0.005801611,682.2432,0.006262239,nan +182,'14 Completed (net flux is small)',42298650.0,1.0,636.8506,0.01322981,nan +183,'14 Completed (net flux is small)',29041040.0,1.0,636.8508,0.01322981,nan +184,'10 Completed (solidified)',380679.6,0.4032842,628.9052,0.009133277,nan +185,'10 Completed (solidified)',128646.4,0.0134422,690.7813,0.006315699,nan +186,'14 Completed (net flux is small)',17424630.0,1.0,636.851,0.01322981,nan +187,'14 Completed (net flux is small)',40657460.0,1.0,636.8507,0.01322981,nan +188,'10 Completed (solidified)',161000.1,0.3991163,638.6232,0.009052969,nan +189,'10 Completed (solidified)',266613.7,0.4271393,632.1903,0.009335179,nan +190,'10 Completed (solidified)',373988.4,0.4080462,628.7416,0.009176319,nan +191,'14 Completed (net flux is small)',58082080.0,1.0,636.8504,0.01322982,nan +192,'10 Completed (solidified)',179958.5,0.004987525,681.2592,0.00626107,4299265.0 +193,'10 Completed (solidified)',556326.2,0.1823797,640.8081,0.007397363,nan +194,'14 Completed (net flux is small)',5595403.0,1.0,636.8511,0.01322981,nan +195,'10 Completed (solidified)',256968.7,0.004873672,675.1354,0.00625585,4289756.0 +196,'14 Completed (net flux is small)',16786210.0,1.0,636.851,0.01322981,nan +197,'14 Completed (net flux is small)',27977010.0,1.0,636.8508,0.01322981,nan +198,'10 Completed (solidified)',25074.75,0.004976974,684.2236,0.00625874,4294860.0 +199,'10 Completed (solidified)',52367.77,0.434725,640.1217,0.009366958,nan +200,'10 Completed (solidified)',125479.9,0.0115051,688.8879,0.00630266,nan +201,'10 Completed (solidified)',158169.5,0.3850745,639.9072,0.008924851,nan +202,'10 Completed (solidified)',75186.66,0.005767616,681.7204,0.006261382,nan +203,'14 Completed (net flux is small)',39167820.0,1.0,636.8507,0.01322981,nan +204,'10 Completed (solidified)',262141.8,0.4241077,632.3915,0.009305825,nan +205,'14 Completed (net flux is small)',55954020.0,1.0,636.8505,0.01322982,nan +206,'10 Completed (solidified)',366153.2,0.4303609,627.9795,0.009384989,nan +207,'10 Completed (solidified)',250565.6,0.005067891,677.3555,0.006259366,nan +208,'14 Completed (net flux is small)',16182240.0,1.0,636.851,0.01322981,nan +209,'14 Completed (net flux is small)',5394078.0,1.0,636.8511,0.01322981,nan +210,'10 Completed (solidified)',73440.89,0.00497839,682.449,0.006258441,4303131.0 +211,'10 Completed (solidified)',175412.3,0.005079951,679.1054,0.006258687,nan +212,'10 Completed (solidified)',51834.59,0.3845291,644.6932,0.008899971,nan +213,'10 Completed (solidified)',155430.6,0.3852281,640.0167,0.008925816,nan +214,'10 Completed (solidified)',122392.8,0.004965597,681.2767,0.006258839,4297686.0 +215,'14 Completed (net flux is small)',26970390.0,1.0,636.8508,0.01322981,nan +216,'10 Completed (solidified)',530225.3,0.3445829,625.6743,0.008637771,nan +217,'10 Completed (solidified)',259066.5,0.3856322,635.4155,0.008948891,nan +218,'10 Completed (solidified)',361189.0,0.4027936,629.564,0.009125634,nan +219,'10 Completed (solidified)',171339.4,0.005821539,680.374,0.006263793,nan +220,'10 Completed (solidified)',24471.09,0.005058587,682.0418,0.006256397,nan +221,'14 Completed (net flux is small)',37758550.0,1.0,636.8507,0.01322981,nan +222,'10 Completed (solidified)',524649.4,0.2922852,629.895,0.008203519,nan +223,'14 Completed (net flux is small)',15721650.0,1.0,582.7718,0.02587928,nan +224,'10 Completed (solidified)',156203.1,0.05088213,1401.212,0.01309585,nan +225,'10 Completed (solidified)',52043.62,0.04162354,1447.181,0.01294707,nan +226,'10 Completed (solidified)',69723.02,0.004951518,1575.044,0.01255496,3633089.0 +227,'14 Completed (net flux is small)',53940780.0,1.0,636.8505,0.01322982,nan +228,'10 Completed (solidified)',261741.6,0.02323777,1493.604,0.01269353,nan +229,'10 Completed (solidified)',244681.8,0.005070807,677.4687,0.006259382,nan +230,'14 Completed (net flux is small)',5240551.0,1.0,582.7719,0.02587928,nan +231,'10 Completed (solidified)',23226.79,0.00501055,1569.826,0.01256699,nan +232,'14 Completed (net flux is small)',26202750.0,1.0,582.7716,0.02587929,nan +233,'10 Completed (solidified)',116160.5,0.00485124,1563.958,0.01257545,3635707.0 +234,'14 Completed (net flux is small)',36683860.0,1.0,582.7715,0.02587929,nan +235,'14 Completed (net flux is small)',52405510.0,1.0,582.7714,0.02587929,nan +236,'10 Completed (solidified)',162725.2,0.004973301,1563.589,0.01257149,3632383.0 +237,'10 Completed (solidified)',22959.32,0.01200644,1559.69,0.01256556,nan +238,'10 Completed (solidified)',51705.73,0.02638209,1503.474,0.01273755,nan +239,'10 Completed (solidified)',366921.4,0.02297497,1481.911,0.01270009,nan +240,'10 Completed (solidified)',153902.0,0.05059475,1404.908,0.01306375,nan +241,'10 Completed (solidified)',520848.7,0.04461905,1385.468,0.01300435,nan +242,'14 Completed (net flux is small)',5146293.0,1.0,582.7719,0.02587928,nan +243,'10 Completed (solidified)',114630.3,0.004948322,1571.296,0.01255976,3640022.0 +244,'10 Completed (solidified)',232420.1,0.004943558,1568.48,0.01255806,3634708.0 +245,'14 Completed (net flux is small)',36024050.0,1.0,582.7715,0.02587929,nan +246,'14 Completed (net flux is small)',15438880.0,1.0,582.7718,0.02587928,nan +247,'14 Completed (net flux is small)',25731460.0,1.0,582.7717,0.02587929,nan +248,'10 Completed (solidified)',257551.2,0.0421414,1422.653,0.01296354,nan +249,'10 Completed (solidified)',354774.1,0.08814272,1249.201,0.01377559,nan +250,'10 Completed (solidified)',68755.91,0.006081422,1564.007,0.01256302,nan +251,'14 Completed (net flux is small)',51462930.0,1.0,582.7714,0.02587929,nan +252,'10 Completed (solidified)',229344.7,0.004968161,1560.732,0.01257309,3636471.0 +253,'14 Completed (net flux is small)',5050936.0,1.0,582.7719,0.02587928,nan +254,'10 Completed (solidified)',511876.7,0.05052878,1364.796,0.0130893,nan +255,'10 Completed (solidified)',22597.26,0.007453225,1563.109,0.01256143,nan +256,'14 Completed (net flux is small)',15152810.0,1.0,582.7718,0.02587928,nan +257,'10 Completed (solidified)',160620.2,0.006012471,1570.154,0.01254376,nan +258,'10 Completed (solidified)',50904.05,0.02656554,1503.327,0.01273731,nan +259,'10 Completed (solidified)',112948.8,0.00613185,1562.824,0.01256223,nan +260,'10 Completed (solidified)',152620.3,0.02935881,1480.806,0.01278314,nan +261,'14 Completed (net flux is small)',35356550.0,1.0,582.7715,0.02587929,nan +262,'10 Completed (solidified)',67782.53,0.004953192,1573.52,0.01255811,3639848.0 +263,'14 Completed (net flux is small)',25254680.0,1.0,582.7717,0.02587929,nan +264,'10 Completed (solidified)',252486.2,0.05704363,1369.792,0.01317493,nan +265,'10 Completed (solidified)',356575.6,0.02308456,1482.762,0.01269699,nan +266,'10 Completed (solidified)',158177.4,0.004936716,1563.117,0.01257309,3634941.0 +267,'10 Completed (solidified)',225916.1,0.007409788,1553.335,0.01256891,nan +268,'14 Completed (net flux is small)',50509360.0,1.0,582.7714,0.02587929,nan +269,'10 Completed (solidified)',150709.3,0.02749152,1490.478,0.01274194,nan +270,'14 Completed (net flux is small)',4958205.0,1.0,582.7719,0.02587928,nan +271,'10 Completed (solidified)',66794.62,0.006168411,1565.098,0.0125602,nan +272,'10 Completed (solidified)',50066.37,0.04227327,1445.685,0.01295002,nan +273,'10 Completed (solidified)',250031.4,0.04562959,1410.568,0.01301204,nan +274,'10 Completed (solidified)',22268.19,0.004942285,1575.23,0.01255778,3638922.0 +275,'10 Completed (solidified)',351095.1,0.03019367,1456.184,0.01279586,nan +276,'14 Completed (net flux is small)',14874620.0,1.0,582.7718,0.02587928,nan +277,'14 Completed (net flux is small)',24791030.0,1.0,582.7717,0.02587929,nan +278,'10 Completed (solidified)',506222.0,0.05101607,1361.418,0.0131231,nan +279,'10 Completed (solidified)',111348.1,0.004941277,1572.752,0.012557,3636876.0 +280,'10 Completed (solidified)',155888.4,0.004933169,1563.109,0.01257322,3633856.0 +281,'14 Completed (net flux is small)',34707440.0,1.0,582.7715,0.02587929,nan +282,'10 Completed (solidified)',49072.86,0.07001014,1343.767,0.01340435,nan +283,'14 Completed (net flux is small)',4871428.0,1.0,582.7719,0.02587928,nan +284,'10 Completed (solidified)',222672.0,0.004884666,1559.734,0.01257654,3634339.0 +285,'14 Completed (net flux is small)',14614290.0,1.0,582.7718,0.02587928,nan +286,'10 Completed (solidified)',501450.2,0.02689962,1450.255,0.01276485,nan +287,'14 Completed (net flux is small)',24357140.0,1.0,582.7717,0.02587929,nan +288,'14 Completed (net flux is small)',49582050.0,1.0,582.7714,0.02587929,nan +289,'10 Completed (solidified)',65733.12,0.004933076,1572.239,0.01256112,3639198.0 +290,'10 Completed (solidified)',21911.22,0.00611567,1566.243,0.01256111,nan +291,'10 Completed (solidified)',148363.0,0.02612085,1492.264,0.01274395,nan +292,'10 Completed (solidified)',246130.9,0.05095506,1390.4,0.0131034,nan +293,'10 Completed (solidified)',109553.0,0.00491605,1570.225,0.01256265,3641196.0 +294,'14 Completed (net flux is small)',34100000.0,1.0,582.7716,0.02587929,nan +295,'10 Completed (solidified)',153414.4,0.004893737,1562.646,0.01257481,3635554.0 +296,'10 Completed (solidified)',346002.3,0.03098054,1455.257,0.01279515,nan +297,'14 Completed (net flux is small)',48714280.0,1.0,582.7714,0.02587929,nan +298,'10 Completed (solidified)',219140.2,0.004835085,1559.399,0.01257813,3634297.0 +299,'10 Completed (solidified)',14532890.0,0.6840844,450.5894,0.03673918,nan +300,'10 Completed (solidified)',48468.1,0.00493607,2928.445,0.02088377,5347064.0 +301,'10 Completed (solidified)',492741.9,0.04163534,1396.025,0.01297826,nan +302,'14 Completed (net flux is small)',4864114.0,0.7884512,584.9552,0.0362648,nan +303,'10 Completed (solidified)',64357.01,0.004984752,2926.467,0.02086076,2722727.0 +304,'10 Completed (solidified)',21480.17,0.006295242,2874.69,0.02074235,nan +305,'10 Completed (solidified)',145823.1,0.02112701,2490.276,0.02052224,nan +306,'10 Completed (solidified)',339600.2,0.01003618,2710.053,0.02058105,nan +307,'10 Completed (solidified)',24092710.0,0.5212082,264.3165,0.03738803,nan +308,'10 Completed (solidified)',242340.7,0.004949403,2899.974,0.02088001,5338054.0 +309,'10 Completed (solidified)',33742680.0,0.5076395,239.9249,0.03749317,nan +310,'10 Completed (solidified)',150181.1,0.004801359,2930.886,0.02090104,2720981.0 +311,'10 Completed (solidified)',214790.7,0.006278418,2861.521,0.0207438,nan +312,'14 Completed (net flux is small)',4804171.0,0.7956935,589.6805,0.03621893,nan +313,'10 Completed (solidified)',21323.22,0.004917958,2933.341,0.0208742,2724633.0 +314,'10 Completed (solidified)',48194.28,0.007740432,2819.885,0.02067354,nan +315,'10 Completed (solidified)',107246.5,0.004962106,2924.832,0.02086734,2724146.0 +316,'10 Completed (solidified)',23874110.0,0.6077424,351.7703,0.03705713,nan +317,'10 Completed (solidified)',48124760.0,0.4601165,155.9491,0.03785938,nan +318,'10 Completed (solidified)',144567.9,0.007740207,2806.497,0.02067288,nan +319,'10 Completed (solidified)',485143.8,0.007727039,2759.992,0.02067168,nan +320,'10 Completed (solidified)',14349150.0,0.6856179,452.6683,0.03673298,nan +321,'10 Completed (solidified)',337334.7,0.007727914,2780.227,0.02067263,nan +322,'10 Completed (solidified)',149333.1,0.004875993,2927.386,0.02089193,2721136.0 +323,'10 Completed (solidified)',47525460.0,0.4545378,152.2052,0.03787207,nan +324,'10 Completed (solidified)',33345260.0,0.5167122,248.8261,0.03745441,nan +325,'10 Completed (solidified)',213568.7,0.006324725,2859.488,0.0207376,nan +326,'14 Completed (net flux is small)',4742044.0,0.7953901,590.0417,0.03622015,nan +327,'10 Completed (solidified)',47834.09,0.0049704,2926.802,0.02087879,5344867.0 +328,'10 Completed (solidified)',63980.57,0.004957385,2928.081,0.02086688,2723757.0 +329,'10 Completed (solidified)',241329.5,0.01486394,2601.978,0.02048336,nan +330,'14 Completed (net flux is small)',14169230.0,0.6870766,454.6751,0.03672702,nan +331,'10 Completed (solidified)',106633.6,0.004963023,2924.511,0.02086447,2722529.0 +332,'10 Completed (solidified)',23535890.0,0.5759003,319.855,0.03717244,nan +333,'10 Completed (solidified)',481467.9,0.00489658,2868.125,0.02088521,5334215.0 +334,'10 Completed (solidified)',106020.5,0.006391562,2867.624,0.02076026,nan +335,'10 Completed (solidified)',21213.13,0.007585733,2830.902,0.02068757,nan +336,'10 Completed (solidified)',32952970.0,0.5252379,257.2904,0.0374183,nan +337,'10 Completed (solidified)',143879.3,0.02114424,2490.076,0.02052214,nan +338,'10 Completed (solidified)',148515.0,0.006294882,2865.48,0.02074172,nan +339,'10 Completed (solidified)',63742.66,0.00884938,2783.344,0.02059398,nan +340,'10 Completed (solidified)',239734.0,0.02588945,2398.32,0.0206245,nan +341,'10 Completed (solidified)',334812.3,0.00495081,2886.39,0.02087891,5341123.0 +342,'14 Completed (net flux is small)',4683937.0,0.7978006,591.7588,0.03620011,nan +343,'10 Completed (solidified)',47527.02,0.007728008,2820.401,0.02067462,nan +344,'10 Completed (solidified)',46985320.0,0.4744931,171.2063,0.0377986,nan +345,'10 Completed (solidified)',142584.3,0.0100219,2736.98,0.02058276,nan +346,'10 Completed (solidified)',478179.1,0.0047717,2874.037,0.0208994,5342406.0 +347,'10 Completed (solidified)',23225450.0,0.5382402,282.0932,0.03731647,nan +348,'10 Completed (solidified)',212168.1,0.006291149,2861.025,0.02074182,nan +349,'14 Completed (net flux is small)',13992630.0,0.6913471,459.6805,0.03671188,nan +350,'10 Completed (solidified)',32498900.0,0.4976091,232.2157,0.03752706,nan +351,'10 Completed (solidified)',63198.73,0.00631423,2870.746,0.02073926,nan +352,'10 Completed (solidified)',105216.0,0.0049883,2923.131,0.02085921,2723891.0 +353,'10 Completed (solidified)',21064.1,0.004851198,2938.025,0.02089583,2721881.0 +354,'10 Completed (solidified)',333301.7,0.02121109,2465.133,0.02052373,nan +355,'10 Completed (solidified)',210619.8,0.007586838,2817.265,0.02068675,nan +356,'10 Completed (solidified)',47140.72,0.01004733,2749.131,0.02058125,nan +357,'10 Completed (solidified)',147441.0,0.007600613,2821.283,0.02068579,nan +358,'14 Completed (net flux is small)',13818550.0,0.691652,460.452,0.03670974,nan +359,'10 Completed (solidified)',46381290.0,0.4561991,154.7685,0.03786048,nan +360,'10 Completed (solidified)',141604.6,0.01488137,2614.326,0.02048306,nan +361,'10 Completed (solidified)',475254.7,0.007758182,2759.698,0.02066896,nan +362,'10 Completed (solidified)',20872.07,0.004968373,2930.534,0.02086407,2724138.0 +363,'10 Completed (solidified)',104400.1,0.004919521,2928.48,0.0208861,2721478.0 +364,'14 Completed (net flux is small)',4624273.0,0.7994033,593.0638,0.03618644,nan +365,'10 Completed (solidified)',237424.4,0.004885154,2903.541,0.02088914,5343722.0 +366,'10 Completed (solidified)',22939300.0,0.5492088,293.4287,0.03727222,nan +367,'10 Completed (solidified)',32120730.0,0.5173771,251.0662,0.03744469,nan +368,'10 Completed (solidified)',146113.8,0.004984913,2920.268,0.0208598,2723715.0 +369,'10 Completed (solidified)',471384.0,0.007719571,2761.447,0.0206724,nan +370,'10 Completed (solidified)',62617.4,0.004967652,2927.582,0.02086497,2722952.0 +371,'10 Completed (solidified)',236017.0,0.01473063,2605.362,0.02048411,nan +372,'10 Completed (solidified)',330383.5,0.01483217,2590.939,0.02048529,nan +373,'10 Completed (solidified)',45869560.0,0.4736628,172.2059,0.03779366,nan +374,'10 Completed (solidified)',21187.04,0.004921599,3338.838,0.0233587,2314112.0 +375,'10 Completed (solidified)',208943.4,0.006287675,2861.397,0.02074309,nan +376,'10 Completed (solidified)',24180060.0,0.4541279,696.8499,0.04055211,nan +377,'14 Completed (net flux is small)',4844080.0,0.4873124,822.8831,0.04042149,nan +378,'10 Completed (solidified)',47720.08,0.01675422,3040.147,0.02366792,nan +379,'10 Completed (solidified)',14517490.0,0.4861563,758.9068,0.04061354,nan +380,'10 Completed (solidified)',33821370.0,0.4345988,635.9952,0.04060198,nan +381,'10 Completed (solidified)',105984.5,0.006671249,3286.595,0.02339799,nan +382,'10 Completed (solidified)',143192.1,0.0165349,3034.078,0.02366246,nan +383,'10 Completed (solidified)',63590.41,0.00755527,3266.616,0.02342117,nan +384,'10 Completed (solidified)',238579.9,0.01696025,3013.321,0.02367612,nan +385,'10 Completed (solidified)',333959.0,0.02201467,2889.833,0.02383268,nan +386,'14 Completed (net flux is small)',4789306.0,0.4879734,823.0368,0.04042658,nan +387,'10 Completed (solidified)',477014.3,0.01970938,2923.675,0.02376149,nan +388,'10 Completed (solidified)',148374.1,0.004860757,3331.547,0.02335773,2313864.0 +389,'10 Completed (solidified)',48262480.0,0.3966001,532.4521,0.0406739,nan +390,'10 Completed (solidified)',211926.7,0.006666193,3279.341,0.02339818,nan +391,'10 Completed (solidified)',47431.95,0.02739904,2807.922,0.02400393,nan +392,'10 Completed (solidified)',237097.5,0.03091905,2716.407,0.0241295,nan +393,'10 Completed (solidified)',14354030.0,0.4848758,760.0076,0.04059962,nan +394,'10 Completed (solidified)',21097.77,0.01652854,3049.797,0.02366129,nan +395,'10 Completed (solidified)',142396.6,0.01943568,2967.914,0.02374857,nan +396,'10 Completed (solidified)',23901710.0,0.4575764,701.5774,0.04056599,nan +397,'10 Completed (solidified)',147640.8,0.00666169,3283.919,0.0233979,nan +398,'10 Completed (solidified)',47716040.0,0.4009292,540.9765,0.04067823,nan +399,'10 Completed (solidified)',105452.8,0.004850269,3334.828,0.02335746,2314660.0 +400,'10 Completed (solidified)',63246.47,0.006775808,3286.883,0.02340232,nan +401,'10 Completed (solidified)',33450960.0,0.4428366,646.5121,0.04063666,nan +402,'10 Completed (solidified)',332223.1,0.01682359,3005.411,0.02367307,nan +403,'14 Completed (net flux is small)',4735414.0,0.4878461,823.3765,0.04042443,nan +404,'10 Completed (solidified)',141614.2,0.01667604,3030.951,0.02366662,nan +405,'10 Completed (solidified)',474494.6,0.01953728,2927.643,0.02375605,nan +406,'10 Completed (solidified)',47209.01,0.01675372,3040.199,0.0236679,nan +407,'10 Completed (solidified)',210892.2,0.004985835,3323.604,0.02335842,2315656.0 +408,'10 Completed (solidified)',20975.28,0.004826134,3341.424,0.02335684,2316493.0 +409,'10 Completed (solidified)',104875.6,0.004901433,3333.468,0.02335845,2314943.0 +410,'10 Completed (solidified)',62974.42,0.01707151,3034.298,0.02367722,nan +411,'10 Completed (solidified)',23633260.0,0.4538077,699.7515,0.04053908,nan +412,'10 Completed (solidified)',14188050.0,0.4865574,760.6373,0.04061119,nan +413,'10 Completed (solidified)',235996.2,0.02195,2902.156,0.02382908,nan +414,'10 Completed (solidified)',146831.9,0.004839326,3332.182,0.0233573,2314911.0 +415,'14 Completed (net flux is small)',4680113.0,0.4880372,823.2763,0.04042637,nan +416,'10 Completed (solidified)',33075690.0,0.4368258,642.3609,0.04059799,nan +417,'10 Completed (solidified)',47192460.0,0.4016275,543.4469,0.04067392,nan +418,'10 Completed (solidified)',20854.41,0.004841639,3341.011,0.02335716,2316747.0 +419,'10 Completed (solidified)',329903.5,0.03912106,2551.272,0.02443077,nan +420,'10 Completed (solidified)',62562.1,0.004964929,3334.596,0.02335784,2315720.0 +421,'10 Completed (solidified)',209730.7,0.004865073,3327.042,0.02335791,2315226.0 +422,'10 Completed (solidified)',46909.84,0.02730718,2809.814,0.02400071,nan +423,'10 Completed (solidified)',14030460.0,0.4865509,761.2649,0.04060913,nan +424,'10 Completed (solidified)',140768.9,0.01982866,2959.354,0.0237608,nan +425,'10 Completed (solidified)',23373240.0,0.4559052,703.1239,0.04054573,nan +426,'10 Completed (solidified)',472055.5,0.01699228,2985.355,0.02367968,nan +427,'10 Completed (solidified)',104267.3,0.004961295,3331.752,0.02335799,2316395.0 +428,'10 Completed (solidified)',328487.0,0.01687811,3004.472,0.02367468,nan +429,'10 Completed (solidified)',146003.6,0.006659145,3284.051,0.02339788,nan +430,'10 Completed (solidified)',234664.5,0.01700483,3012.598,0.02367743,nan +431,'10 Completed (solidified)',32699940.0,0.4406824,647.5958,0.04061316,nan +432,'10 Completed (solidified)',46645820.0,0.3946382,533.0068,0.04065122,nan +433,'10 Completed (solidified)',469220.5,0.01987327,2920.674,0.0237666,nan +434,'10 Completed (solidified)',20708.39,0.004946114,3338.193,0.02335915,2314708.0 +435,'10 Completed (solidified)',13875620.0,0.4867995,762.1117,0.04060841,nan +436,'10 Completed (solidified)',208560.6,0.006661259,3279.622,0.0233982,nan +437,'10 Completed (solidified)',46601.02,0.027608,2803.759,0.02401124,nan +438,'14 Completed (net flux is small)',4628427.0,0.4875049,824.3976,0.0404184,nan +439,'10 Completed (solidified)',62142.88,0.00665846,3289.94,0.02339763,nan +440,'10 Completed (solidified)',103627.7,0.01197159,3152.977,0.02353358,nan +441,'10 Completed (solidified)',326290.1,0.0168145,3006.089,0.02367277,nan +442,'10 Completed (solidified)',23105610.0,0.4561201,704.4975,0.04054274,nan +443,'10 Completed (solidified)',233057.2,0.01996282,2945.745,0.02376626,nan +444,'10 Completed (solidified)',139843.0,0.01694099,3025.057,0.02367448,nan +445,'10 Completed (solidified)',145004.7,0.01866726,2992.243,0.02372498,nan +446,'10 Completed (solidified)',47679.4,0.00873239,3299.324,0.02383681,nan +447,'10 Completed (solidified)',207150.6,0.006665511,3279.575,0.02339822,nan +448,'10 Completed (solidified)',32340440.0,0.4396562,647.655,0.04060352,nan +449,'10 Completed (solidified)',46148360.0,0.4003576,545.2065,0.04065335,nan +450,'10 Completed (solidified)',466101.6,0.02003548,2917.369,0.02377172,nan +451,'10 Completed (solidified)',21141.59,0.004822528,3390.522,0.02365002,3149218.0 +452,'10 Completed (solidified)',238370.6,0.008707927,3270.149,0.02383755,nan +453,'10 Completed (solidified)',24003040.0,0.3675011,1058.69,0.04423333,nan +454,'10 Completed (solidified)',14404990.0,0.3790924,1117.791,0.0442381,nan +455,'14 Completed (net flux is small)',4803248.0,0.3828601,1176.859,0.0441251,nan +456,'10 Completed (solidified)',148079.2,0.004907715,3376.478,0.0236544,3149895.0 +457,'10 Completed (solidified)',143021.3,0.008694152,3285.269,0.02383593,nan +458,'10 Completed (solidified)',105754.7,0.006259765,3350.581,0.02371791,nan +459,'10 Completed (solidified)',211548.3,0.006279695,3340.067,0.02371928,nan +460,'10 Completed (solidified)',33578400.0,0.3654014,1002.995,0.04437669,nan +461,'10 Completed (solidified)',63428.58,0.004923662,3384.22,0.02365487,3147178.0 +462,'10 Completed (solidified)',47947360.0,0.34641,907.9769,0.0443877,nan +463,'10 Completed (solidified)',476753.0,0.007655478,3255.57,0.02378863,nan +464,'14 Completed (net flux is small)',4752958.0,0.3828743,1177.163,0.04412445,nan +465,'10 Completed (solidified)',142200.5,0.01324471,3189.579,0.02406176,nan +466,'10 Completed (solidified)',333506.5,0.01523243,3120.053,0.0241665,nan +467,'10 Completed (solidified)',237127.4,0.004921308,3352.664,0.02365585,5222404.0 +468,'10 Completed (solidified)',47417.2,0.01342845,3200.245,0.02406954,nan +469,'10 Completed (solidified)',21051.15,0.004950212,3387.655,0.02365577,3146448.0 +470,'10 Completed (solidified)',23753140.0,0.3688087,1060.277,0.04425027,nan +471,'10 Completed (solidified)',63136.32,0.004868617,3385.466,0.02365231,3146571.0 +472,'10 Completed (solidified)',14254930.0,0.3791044,1118.687,0.04423563,nan +473,'10 Completed (solidified)',331928.7,0.007691312,3277.208,0.02378907,nan +474,'10 Completed (solidified)',105218.9,0.004929378,3380.069,0.02365527,3148950.0 +475,'10 Completed (solidified)',473790.7,0.01756286,3052.445,0.02429109,nan +476,'10 Completed (solidified)',33231250.0,0.3655402,1004.745,0.04437333,nan +477,'10 Completed (solidified)',147277.3,0.007550437,3318.406,0.0237802,nan +478,'14 Completed (net flux is small)',4703430.0,0.3831222,1177.318,0.04412807,nan +479,'10 Completed (solidified)',210507.3,0.0049124,3370.327,0.02365465,3151154.0 +480,'10 Completed (solidified)',141489.8,0.008686349,3285.612,0.02383554,nan +481,'10 Completed (solidified)',47453870.0,0.3467873,910.6069,0.0443849,nan +482,'10 Completed (solidified)',62799.03,0.004877834,3385.275,0.02365274,3146811.0 +483,'10 Completed (solidified)',47166.7,0.004899471,3383.3,0.02365379,5232110.0 +484,'10 Completed (solidified)',20932.29,0.004933644,3388.05,0.02365519,3147529.0 +485,'10 Completed (solidified)',104680.2,0.01032782,3262.73,0.02391516,nan +486,'10 Completed (solidified)',14107170.0,0.3792016,1119.517,0.04423475,nan +487,'10 Completed (solidified)',146535.1,0.007589329,3317.592,0.02378206,nan +488,'10 Completed (solidified)',23507820.0,0.3664666,1060.851,0.04421021,nan +489,'10 Completed (solidified)',235861.6,0.004956563,3352.018,0.02365749,5219676.0 +490,'10 Completed (solidified)',330183.6,0.004969667,3336.658,0.02365863,5225622.0 +491,'10 Completed (solidified)',46899.71,0.007677762,3322.209,0.02378591,nan +492,'10 Completed (solidified)',46968230.0,0.3540134,921.0528,0.04446798,nan +493,'10 Completed (solidified)',32903150.0,0.3668937,1006.74,0.04438909,nan +494,'10 Completed (solidified)',140699.1,0.004812783,3370.296,0.02365025,5221471.0 +495,'10 Completed (solidified)',471692.0,0.004798292,3318.109,0.02365137,5211433.0 +496,'14 Completed (net flux is small)',4654696.0,0.3833212,1177.511,0.04413078,nan +497,'10 Completed (solidified)',209348.3,0.004923127,3370.173,0.02365533,3148973.0 +498,'10 Completed (solidified)',234408.1,0.01260851,3188.435,0.02403099,nan +499,'10 Completed (solidified)',104108.2,0.006267687,3350.485,0.02371829,nan +500,'10 Completed (solidified)',20822.84,0.004819431,3390.608,0.02364988,3147459.0 +501,'10 Completed (solidified)',328330.4,0.004943317,3337.527,0.02365739,5213876.0 +502,'10 Completed (solidified)',13961810.0,0.377148,1120.683,0.04419877,nan +503,'10 Completed (solidified)',62448.42,0.004897034,3384.859,0.02365363,3148790.0 +504,'10 Completed (solidified)',145755.3,0.004944177,3375.735,0.02365589,3153403.0 +505,'10 Completed (solidified)',23265610.0,0.3778076,1065.303,0.04437592,nan +506,'10 Completed (solidified)',32566500.0,0.3587405,1004.02,0.04426482,nan +507,'14 Completed (net flux is small)',4608635.0,0.3864842,1176.152,0.04418389,nan +508,'10 Completed (solidified)',469025.2,0.007692249,3255.536,0.02379036,nan +509,'10 Completed (solidified)',208153.0,0.004920105,3370.297,0.02365518,3148415.0 +510,'10 Completed (solidified)',46490860.0,0.3502183,920.2849,0.04440759,nan +511,'10 Completed (solidified)',62020.04,0.007612334,3325.092,0.02378272,nan +512,'10 Completed (solidified)',232900.8,0.007650887,3293.302,0.02378628,nan +513,'10 Completed (solidified)',46585.03,0.004914878,3383.014,0.02365451,5225288.0 +514,'10 Completed (solidified)',103361.7,0.007569635,3322.06,0.02378089,nan +515,'10 Completed (solidified)',20671.62,0.007591161,3329.516,0.02378149,nan +516,'10 Completed (solidified)',139793.6,0.004936675,3367.602,0.02365582,5218234.0 +517,'10 Completed (solidified)',23027390.0,0.3667052,1063.736,0.04420518,nan +518,'10 Completed (solidified)',326092.6,0.007677765,3278.048,0.02378839,nan +519,'10 Completed (solidified)',465944.0,0.004942834,3315.133,0.02365799,5228914.0 +520,'10 Completed (solidified)',206809.5,0.004833624,3372.284,0.02365115,3147951.0 +521,'10 Completed (solidified)',13818380.0,0.378271,1121.13,0.04421587,nan +522,'10 Completed (solidified)',32233840.0,0.3596164,1005.862,0.04427376,nan +523,'10 Completed (solidified)',144699.7,0.007571091,3318.073,0.02378118,nan +524,'10 Completed (solidified)',46018950.0,0.3476971,918.7744,0.04437178,nan From c453f468db969fbda0d13bfbf872a74a0f6589f3 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Mon, 21 Apr 2025 19:17:32 +0200 Subject: [PATCH 012/105] post processing grid : generate csv file and then single plots. still need to work on log scale fro x axis --- grouped_data.pkl | Bin 0 -> 84644 bytes tools/post_processing_grid/plot_grid.py | 461 +++++++++++++++ .../grid_status_summary.png | Bin 153237 -> 0 bytes .../post_processing_grid.py | 214 +++---- ...rams_Pxuv_a_epsilon_fO2_extracted_data.csv | 541 ------------------ 5 files changed, 536 insertions(+), 680 deletions(-) create mode 100644 grouped_data.pkl create mode 100644 tools/post_processing_grid/plot_grid.py delete mode 100644 tools/post_processing_grid/plots/escape_grid_4_params_Pxuv_a_epsilon_fO2/grid_status_summary.png delete mode 100644 tools/post_processing_grid/processed_data/escape_grid_4_params_Pxuv_a_epsilon_fO2/escape_grid_4_params_Pxuv_a_epsilon_fO2_extracted_data.csv diff --git a/grouped_data.pkl b/grouped_data.pkl new file mode 100644 index 0000000000000000000000000000000000000000..22beb4ddd13234f93b080925a6d32c51406356f9 GIT binary patch literal 84644 zcmd44c_3Ba_cz`(6J<=wRE9K3r9yJgjXIh|MI{+hLYXd7 zG?GLU3iU>V-`Zy%-kzr?zwh(>zR&M-`{V8PUejKC?X}n5_gvQD@Fnq$V2nWj{1P>y zrCdDO+iW~Ex2~GC*~4M`R`%v4=81?g%HG3nv%3x3ely#JZR1SQWOjP)aP`r&b8)xV z+~MiW-frXWZsU^}E#P5i<80%en2;FFPl>?KEo>iGdqm;cu9FxM9+qgDsIiPUnxUV# za^=dU{qT?a&$1EhL|a3lK16_jiJ>63DN%zYzJ%=wQ7kqkSz9h<6W}aKybMfT{V7C= zWC>Nt&W3*^D{e)45n{0z_k|A}IFLrNqzje;0VqP9iB(lq_etWLy}I=7H^wAO^@s-6 z3O^~OMaf81PsbF$^d_t&-!_?|5HE^FHNIVsAz7(snX#0x;O8hT9P;J{K~bbS=3|Pj z?bULMvcGaV`~+7!S}L>aL>7>&WKqW~inV)XmNf+!>m|3sKa%La#=%2a+ZmK27V*5D zz*^(mzEjS#gmj{T1JjXM5srcP?zk+9qVn|+MIqYn-`xF%JV~+y74Bhwf^ugxQZi9~ zL9Pf8-7|0Ev80%DvE>m@{IT%34|M1R<5yvdiqTaJHtqVrq14;%eO%z0T?Zb{nMT0tF^FYflXCF0H4Dvyc#JmPP&GgGRsq?0JA0@{EmkRI4fFC$w+1v zVVSs#kBkw8c;@#VQv^QbV671jk(lDak{~E8@hzs9vLWgdmhpe;U5;49=c>~Q%ZX_#m+|6>z1!?Ug*r`C)Jv4gIyV?QF0tlAc(M))cfQykW2y0-WQ~2h1@FJeL#if}@Px{0_zCLf-$O7E z;E_YF(wY1Ff6iRiXjctv!>#8n2dNUz9d=Uw5}&k6aOV1Ts}mHf>rMzH(`2YkDi?qN zLQ(XQ8$!>p8@`X0b0|vm08l>PK`)4O6J?P2 zIKL2QwdvJONPjz@i8QgP54t~N|vigcxrr!bm+zLDU z&}eIl6=WxoMFHM?f_8pUEe2zr#9-!g7i-+G-INcIBC+ zOi^~9G@}4f?^^@}@cBbr54E9*-$&@J$-5F)VWM;66r3P|C-kh5Fl%&wJ02pEw!<2; zUKKkwDtQ_PJ74)ymRV~O@;OM0_}V@SZy^5dxEc{3R+-|^@$_m_TEwq6(OBK5IL;br zv6MA!vGb!dYk&ZhF(VT(;QL5#@w#`na3TdVW}*3j7Ed*{v*TwnK~Z=mED(jxTx0&lM8sGAB78}R7;s^9moqjd_If3rx7n|fa?mD;+LLE+YcGnjJnt zAPVv0C%r#KH!EXSz;y3eN_dBRF#H6qU6Er63XFF3!3JgWEhwLfFLw;EtKpGjvDV0b zek@~Vv>&T08Awu=iCPI0T;H8ro??T%={A%w@%{w^gC7aYu^T*x>1xG!@spY<@g>>; zZ@XPGr#X~AqpIoBFLPrczgCUJ^=o88BeoMhAk1T5h~sSfp`RL^__DHqDjvjp_hon| zjSAU=_wmy*`l2PX<0aN|w(7@&dgu6CQ===sLstwG!x=0*WnRG`JUS=3j6<^Oma6o<3 z$Q0~qUm21G3TNQ~40uWr2QH#Aic2sE8?Y==J^{PxRIEaPG@ZFTND==1o4Kup^i&k3 zWU+?net8avz%9}_i^^r9rcVU#G)7eq4z_eDT~oaJc&w2yODu`bLC%=Ia*D#UmY%MN z&K%mZSv7_oXdiFhWA#WEdAPZf(-e}?!N z7Eu(Iw2K>_&g$111kxf}wHxtv3EdV%QCN&^*RdUi8POPcsV~k#!YttyI`WQ1>&hw0 z-rp+-1c>$*byPhdda0q1hw%bfUD}*3{le#8Qxu|=FGXa5YYu#N=?@2OuzHW;c< zVM0+DOX#|+z`$`PYHay%?dkQy`G9g?I`Sq{4HxB zAXgKn;q?-INRKW(#eib5LK;?;Q!+J0P?j~m)0xYMxbSZJ^fn5?PY`D8E~i`|Iz;c&OQe*ES4P@a9?O_68pWZ6 z6%^KE7LR-pc07#N5wnC}pT~L8s_`4oQuOO`tnOrezZ|I(HEC}6O2x2g#_C@8tQ!%F z#lwFaYlW6yASmJ9t000tI<~)gh?mH}SbFhKENm*igF{gmOWbgt%75#lSVXIs zGR{ijt2WrNnmvzkap_Gf!Ibv0`|x>61hPA#ww$u#H2OM00nrCzW}J7boA zlwuYo6UwQwMu4cfP4`G9+w;MTc+F;#*>+XAs(->cl4-r(aPz_BDqyWUGrn)MCK7%mvuMvspva{N z?J}JLGBRak{)D-EJRK__YwiXz-t(1Oa_0l(#?|ZV^-}@dfA11(BMB^F)#h1SeF5y+ z?6Dn1qur}j{`pie$hNqUNboEG;aeY?bbFD9@<(c;D^9==mojdA3DE^sYOQ>ZJer_K z)1zCHKOr|7G&0^ff0Oe|romsRCP_}Nc)0Tq8ZM)&7tHCS<%J!14*AvZv zQtEIzjRh0*xYSl>E?R-6W3;*RL;pB4kG@lhST zyK4bVS^AZ?I+|p@3r(FP69q1KoNKS+pJqui-*#DC(eh3KO7gs#@iETe=F4y!+ppVz z;{QbPyZs81`T56I_a9kGU}sawr5ztxl%fAeOl9Y~E@D~6ommQUT zA%4x-FXsQSKx56dR*K5F0$_F7*pY3&z{4h?Sv3yXK$(=2_O1w)=;OWxS!{o_N8hdX*w839xC$OeB$S2>u zfn}j~To@9y_RDN=qkP%NQUw%D>G2BbtN4KRRETZ4Zxn#qf(Er`2La?d ztA=Q!^i*m-U~pt9c=&mRmiC@mK$%8nIa@>>R7M2KL0Dok8&Ip+mTT5+rO?SW=X%Qr-VjCQ3=RYkn2Ue6fy@k_p{S zGM_8#cqs1id5AE3M^*Hk=|Uv4!eZXT-Rkp(uspsyZp!rHA3_-%oo#!036D-5SgsC# z;Hvk7WQN83w0ZYH9p;rgIv5T$a6Ys0aCo`=5cLTu9x%10c+iZWg)4M?}J*kV!1A{ zrxQWuZbdk7B8J_%9ZbA%1G@bXo?1%{^Br_$nTxGnD49P5L z-7hz*wvA-w)vtUrDtqP-J8YxVk})UrzzyaZgI-J6E`yf;1i1aKW~irwmUm@^+weRl%zpn_L8a%+b=-HAEcbnu1-rC2Rkw)dv|J&)ju!` zosbL@HvoIrmsHD~+&n}$L;dmzqt1y#C_YA_I&+;RNoHxeZpF=2vL$I~a1vnY3gRqEREnG^#)=VpNj_ZwwDpEM6a%{LL zT)o&6pDU&3l@3v7k7S2@cAG;oGfOP%JruQuusrT}GpEJEx%IA@@1F}Hc68?c@tBOs zo>xCRd?8FFceUOOlSfdSKIr}Cd3aS026SwmF1Hvtbw@;FN+wW}j;ziRje&83_dO$d zGY^u?#uJwxpAfJD;YCjfle>;UF-V%ZR`duun38{w6YK-^i}w9R8BkL`c$v=Q^AQDM z)#uBtGRsM(|GAx8%GC>jVx~5uMjlR$x9(Y5(J{A4<}3GTpYK~$kj%{Aua^b@$g|V0 z@`UQ}?n`19>sSU*@>eRFGhqXH^gP=Zwg~FZD_OM*?(N#(M)-SyQFGDgt=+EvH($c& z9#6m6{DOmJ$?ID`&P&uNp%#_q+aN1qYu8%Rj@#U&y5ZYt4A`(gnAKGX zZ>VK%ml4F-Bg)TPRiz4!dtJjI(U=Wj`DU5NdeucFvqdSWTc)LxWH!`4sXe|V3s^6^ z=A_I=*N>eUN%t+`aPoNlEPC8>e{ihWa+1f~AQ(%^dbMjkl)vDF(+|d0L!kU8RP)VG zUk?T+@>&Z#dI~X|wdwqo%P|m^-Ac!RL0 zBR{TyA*RX}-7AIaxZiq>#^Kpex?Y?3-F~Vs2r?SSj{W3i(3#6e7u~^!4NRYF<64`l z?|{GJJH3xLKq+a^QeHp*1)MB#8hppz7}WqpVTQ@7?Bj4kJ@WR|*$nrTJ!Y$JvNK`B z$F_%V=wZW^YH#Oai~f7yc$}4>yjj;ll9?REySFr59@O=^D-$i=lgvcTGvnm=8zG|a z8$uG#gu^*v^Un5G1uX2Il|1_^emo?Z8Kd^P|8oFBl38G2x=h^#)P=MITC1kRSf5`+ zjmSwRnF%eO+gdurz-MD>cu-%mb{A~#KtC2H2q9nmigUYo$4TZHZ_uZpolP?_rt?I2O zeG(z#8uUlltbFJOG3@T1ZK&b|uAVcjaZ)b_nVKi@nJr!I%W1^zYkiW{iQ#*r1YU`?OdSTwU?KzWKETj6QuIgRcqlJ#MRWdc@gEP&ZxX zyLLV-N}TcDI}Ks65QN2tGT&bv2bAp>s^2*tg=lE2bp06B4{or^6N}XDgTeJj<6o4b z4ZYBMQ}^57FxI3JSuxu(P&cj}+a$6HR%g+o;GHp3fHj*e5B~}&v1d_BnZye4IXpNf zgLxSoi<|fO_m*!fz_FQQ6)_PKNkAUJ5_oF{)Igco4+4GdhHcy02)~4NmGY7J= zK18y&t^xwdRuWhnoD3k~uDT%aCJ}IT<_$}c>(wwsUqz}+g$l?>cwgG4vk$Fqkl`g@Mt8gPW@#K-REr+z_Eb$RW)@ei5 z)e^_nZSX$|kI4i~HhXxwZ$-bJ_LoOwg#V+mA#xEt7+wArOvsS8+>CRh06od=5Nr?x)B-vA<&H~b$@r-;%%bDqndzfo^zL}Q z$rZbSC5s-+!=V0?=5N^`e;j}+%tj~;##-PUWtaq^^DNO?JIlrwSn30gDXX&0_9G8f z-(G&QAr!!P+CzVp0m|3`%L^)(ZK~P=GLv*Vv}+F|P--=Y=?6d^eJGm`LwrR~QX#7T z69(NVyniUD8QhpHvpjXPAg~n5VGJXnY=;oS@8=QzW2GheFY^Fp*UQ^XX-S|c=PR_N z)qugSL!Zk|Zv@IDnD9D20F&pTxByV6C%9|G;k)HQNNeJReof;K0J5FCl_pFFg94^& ztPAadG6_BGoedPzRtQHjP*gIndR3kSpdO7p12ADjyPZ-bfQb;M@y`LM${SA0h48v( z3k5oD&oL*v2UYWU+38Nt!kr}I=!sk3=MnAi0)csl9b1d`zB zUTw%>wvfb<7cY$+t6u>{eYfFn%Xk2zzwMupYzAOn=GycHLL{Nzed_xYtwf-V3u;z7 z30ivB-nR$|f-jsIO@YKk4n(W82PF?srZ2bq%2Fl?vs1fjesx^{H~Z=I z+m0iDV`eEH%%FTZF3MS|WyoSEb1Hhq#OiCqG8?r>%=gtbPN84Uv86Bh%}QC~hI zXb*r%?e~7jz-C<|PZt=@>}-BB^|8RxLc4bp0PXCZnx}8VQaU;^)DE8T>vUD6`PsumqsvxHMX31xXloKvfYvOA;1x=a=*vuY=JS(0R%+Yq|i70m6DS zH<}w91E6bwW=IlS1SBF0yk->DOPpuLQ zMj8)tj$T_!5+R-p{01@Gu4nYsfmymYF?J6oNj}NgdQ3s`_ficqD@= zfI7(7!I!G&7c{`2Ae0~kXkS9+C_+}BqF8vWpek@1O(n?H9Tj*`52iCNWIUZ9K@Xdu zct-(iKU!cz z|1>ck^98`Qkjsqhod9y_j2m<;WGP+og9W@`a80B>5GXmijKXvVEz?V2a|aBKfhmvZ z1wUZ%L&C=4(=Mo=g`5V;3i?C@b=sdB6L9r}(W}1}{DiSGYoQ=OE{9C17q?yU1dTp8 z)_@ykDR@JN=+wRF4*Lazs?q5gJYwSPj8SEvzLZ`sP^TlmZwp?ppckfJ3x2eM<>Nt; zJ5%<7@It!egqT3R6Re~Lr9C`FZ-v0QP>fMINoqz@=baN9^GiDbQu<3ckhkwii;J&ROV!Kpl=ONaVn+ZN~G4wsg} zA(G4nhH!Slw*_F~_@#!^`#8mKB~GGLr)9VEI}U;KD!MpygZ`u@(`WjiKS|qQ@Fk@n zJQaRe&4F1sd!q~iu*jVHb&G!km9pdm96{Yf97|=R8wwn0$;nr!Z+VGikwqv{pq@cT zBe|R|zJoshJNhCB23m{M1mp$m$kZVd#H_@A2?z!pv7kjSt`&V{8Z?+(z)$_k3X;Kb z{&#U>yJ2`Z8dG=m;Nvr8HGR!XDuFL~;dh#d)bJMkjWVdy+2WWKAfWmR=5^(d)!CE^ zcSA>#@SThfj@fkP{_%*6$ZLFy?{P_w~MoUxC2-pPomun zK=2k0N!~NyI74Ce{As2xd+jPo*zHAy5w=3^=ihK`(ghHMM2uigOX*6TTg*nu19o21 zW7!GB_NS=AO~7TG%XXt}I>1`j`vQE0(OuVWeaJ$tzDk3${0@M|$v9JT%;@m@Izq{` z@B!8{e(b!*FZ5l#F;E-`wC@3IWBNks15lthhpRNo@`)tGM;&tSH?VBcSvnbfz6w!E z&m)N=m+4z=UUUWZ?LQH?7w(`8I1KWH?&GN1Z%yOzR6|p?q1E%)lDKTS9ri0T_OAlr z4WiWV3=qDhZ_WOB@C*Vj(8_!2BBW>D4nKT}%BwZRH7>847S3L1gg-0f@r2>gb9WEO zh{U2Y0#U7?Z_suYbg9j8&coBmjj_j-Chr}c=6TfbdcXt&!vhNvM!td3Pf6p_<-QK; z^Uo4k0$asZUYo(gyhjo9mfb!I^NMfAg(|OMGG6YS4RqnmS7g^|Y57hRC?^7LH zpOQqu2`#h`AaiaK+MqxQq34|+nS;wjUP2)**%8I?0mSYr%%E;P-W-L3WAO>a$vz05 z7p8&wAv%|HJL)kjSLYGl6Jh1}aTAgMBo(vrXFP#5lLenY!wm&pKML||;b0Mk%Ten? zRI_2;V)?XVCvM_J3)UWsw^aV_YB;-0V5XON@e1dkXrVK%VF`|Lez`p)#zV0D&m5Gf zU~n9DcmWWhOH%$hx;lisrtO5?lGC~>)&?^@j&c#XA+!mVa~P|14*HM;2d?B>rAT33 zaP>z5u`;IsS*CYMc02mS2WwF%29!O?M0x@Q6YGF zws1tk$r|>k`FhX1^Prw-Ko^hJU`VeuFr6FdzN-kVv*2&=n96J$i5``oY zV@?;2LRaJpqU5N@l(YKD+gl*Vh zp_VY-3WX9TxU>~?JK_67bfxn|2VqFh7*G1#FI+%7ErUH}0NT%2JuYFyPk?=IGJL1)KH$en)gI=ZO z0;|9DgPFp6pnSV6vmy;13VL)OsX2CaGXncI%DM zs!NZcDCVop=7GAUo`|r0K7e1r6`f_3h?Om8*A6mX|NP^)ItLy_dv%Jo3jUanSSCe* zkG2B%c6$T=oio5PyEUHO`2n%KGoIeHLZHmJ!wa6&dVIV4k?4YF@gCpqzn;<$-wnW& zH#c2{u{^qq<2xT|Auu@Q@{Nz?z;jTq-Y$V>YyHLZ3NL*VX{2CwI!{^P(S0Uvm}46d%!%Zdd`_xaD}y{{1^$>EF} z15xzcZ0$)zp(EzaE6`vmVu>h0&VIe7V>g$D&pub%YNvo4Qz+KAlhSES{b0?p~q=L5k)Vp1fqYJ5Z7=#GdXNg;?xY zvtB|vu#;cz{&@Tqe8*7Kp83Lb0+LA^^F)6OP$t~=S(Xmj%}!FS+};7%#F<|`{>~K` zi=E_?ezRmZd^J-$_e3ji2vAb2qg&>L0Z4Wmugv<0D4*86-tq@!3h=HoE&!9_l{$(0&53e%JB% zO#wS){xj`%uLMf!s*`!Rdn`EE+q-^6rzgGWez>*hIjzkeYkq zuSsWjA>qizS&FJKx{s}M=q31Kh@E=crS3FxHCaB|FMk<;q-LV((chI*ENvpgb2K_)X?;7Q1 zR3pmi3o7Xc5LlbN@!oa_T|rCgquX_eBEimxgl{<5$t5O*E2x>$i?aWF+D}&vi0b~8 zzY?XtZA8MCFJ|kTf*S`uJ@}Ha2WiQZx4a7x(ATv~2NtYOqwk0D2uOeZ{tfbpoqTK~ zaTwO!f46U*_qwTw)mnIM#90LH?V02KgkGvs^flpS}XCfc=MBqWiDbKILX7;I@srR)27C%sL(K4N4$WxzNb6bPj;k%x| z`7JBkt{fR+dC;Fg#mN?PcBFvt^W@WO7T!YYJwCfyAy5BxEc5XDvJWtpPnM?8YQYTv z@^=1MoC^6hC^HBG&v_S3PJyWVy!MOagu}uPvIaq5q-FcEDQ2)Til)J%zQcDbdYQf&~$#xxmNdjU#Xf@(;alMa* zHAN}Ey`BK|YZ_)Rl_w3MfH2J}lYX-78tlf1OFbV&C&F|i$Efjp2!sRR3DC{k-UQ&< zl=^Y63jiDuSe3i}+R(9z+dX2_cY=Dch%wJDD4K(o2mKkOL`G*dl_(g(IWgCIi(wV) zq=?d-5vyK~9HJFb_9?zG<;9R8A|J$#cS>Fj!X*z<&AOKiVMP>0{!>nPBcJ&m%AbYf zETUAmM{zT(XQZYm=r0VgY$|gnH=ye0*=?nkVwPPY&2-zD0iWvez0?~`K+-sq-Uh|MbR}kKavS0i;r(lZ@%0ziw+4_6_&}c+lE6wZ;ksM+$ zQq^dYTPXr~+MRG|HVLd!ck?-}KZdAB zlvnS_N`c!{q_+8Yn{iMb|223$R`8oM+%1nh+`W48hrd25VX-}v=)7W@j5Wk_pdADnba3-(0C9f19r#9KKxRv3zAlGLoGV;k*AX0~dmyzb&d zK(UD{w?u#TkUeW9f9EEMXwo(rF&?*X02aJ_ZX*Jhqa#sEGEdKeYL#R?@|o$F6#%BX zRoD~evka0;fluXwagY(%Me)pqvP6CDZ8YjVSDS!p5jfiFU0E|gav%U(2AxSa$#!o{y zUXyId_JB#SYmbCiPf&{743w#*hJstcVD^S}`;M)Iot-3=n`6)n_lKmddA|jwTn5&7 z)$V}tuuMq{%=`2^cZ2%97P3nSj>hb{Wtug6;Rs36KR*3dBMP{s!dvO4O91RIZf{mv z1%PjwcAWWC0GjWY)+Hk6rQm7DQK%3}n)$D%w96oahkX2ue88ZsdrP$?OgXVP@a>XI zkoZYc44iw^pjIYr+9GVwh9Yk_#qCBM3~?k((Dh}drePbi-*(#v5zUbZ-=T`X5 z(NE^pD?bnXq5B_?${-Raw2$*aA4`ujk=r6{uDzKZ@lq`@k}$8te8L-IgkSZlMG+ zc(<)@=U1StX+Q9I0)UiHV@jSrM1ww=lX0LQ=AH8GG#RDY1u}`gJ}Zo01315Jt1d;p!{j&p-(_L6L{y43nWOY{^%cBnXve&W&vSq@8` zNMvV`;MbP706sqNFL#te6mq2ba#@fus9U!n2mHyo7IFX3$vOace{H{?BL=Lkd4AI# zyCALJ*M1Lm(O5T{WTQ&~+*!D{u&^1xlexxQy`~|VFFvc5L+B3h+2|C?B%mP_#~=C( z9;SSn_+YLk)TY#g99yN}S~NuQ(um6ppsXohc3wmWDAygnoLV~;C`@1H;?e3zOJmxB z78G!UX>T@jV6&!tJE)eV2icTr@NOrs1Jr|@d(5C-_A&sE^i!K+0)e$|QE2D=_ekA~ zpZOdTHs@~Egvwti2;a_Rsw$vBC2tc^BOw|&r;FJpvtgH{e2ca^m^K4gax)&++;Ii4 z;P~0|>hQXy@a7iyHSpYW zL*D?n9OdO%ul8}V0FV+}_-a3tfK;6qAF>ug>ZLlk&mf&(oYW-&{fg7CAn>XqqD29O zizBlFol%T+ez2z~+5)BI=jbADSgTaaksaP{u=Dd$cE50N;{{4~u8j(RB7m}SmPb+- z1JGTd<8l^7EBBOdaN{FHnfH8T9|STlzvp-#dk;`_R^^`+g&^hSiMg)*4L+w@PWDQF zUx0+iESE1qv1z!$j@VuWGR1ODYmCtnt*@OkbwdwOQo2ohvQS>=d@f39Y6Re5!8?J> z23EWYc>o*NZq?}!0hyZt53`+~0w|qw_RQTKz?ye! z`&9i~0CdFTGuOBPD|Hp?bly~?KF+i?C=5V)qH9besw+#<{k@-?1DJO_@sQvGB$E^8 zVwMG3hjlV7?im9`=jZ}uHi*SiaYpfKbWG&YA;-vE%gXsu$oHON?U zgtFiJAz^XW=ZFAcT~6`z<0}Tr27go6*tq~U?$o=jisr8KcG>pLu(wh-@CA|ZEhT_y z6|*w88P(L#I+>@6VfU@yN&2nQN#HxC_aXwV)R2{}cv9WC+lkK%l556j5 z<{cA#9a*{_SQ}(}T|15g<>I@BcKvhks7%Vk#d-Ud?OV6o*|4{}?A*-WzQcZ)UoB(F z{G|V+Q#x9k`bvo<(`$@@bc!lX`G@{XYUwBoniaVPgE9QY0~B#u_^0mxUn z28uZt7)mP40IL!Ko&VRegI5JK$O2dKw+6|dcmPIOe`hJG1$r> zFxWH?eu6O*+u1a5TwIPRTfT7s{O!XnYuUv{#3xzHF8`vz1%Ddcasm!wU84VPf7#VN z11#wQ)?S)&V-Za$TTFuzYZ{cU<^TdaHy+ZI3c8invdgCjWb}~??Ix9H@V|q9UBFsn zj~hR*jBY(af+TB=B6(dF0~v}Drz|x+;YWW&H9<; z6lKlkFEp^BTL&0b%3&F!o6~4enFO-P@(nu-*4XZ*Sryl4aN7v8So!4$&~xHL820ASm4hh-n+L!u8XSK|gTiJIL0Sd$o?{)KL$i)jo0uN?k2?y> zSl}^MH5O9A8pb?NF~#gNgJv-t20$(?Be0C7h|uvh=25^bvu|`_81n|xln>@u*sMd1 zrhJku2NKCLd$W|FfU&?j4hFn77?{1Kcd0S|58!~X@hCbijrp0hOgkMdv(|Q+(rJl- zF`qdC_MV>6($Lq_#?@YPx4o-_kGrRbrpYc(FY1ir#lJ%>63w%9NXJfxQU9xconIZ^ zhM(_gHW*wqF6q}veBqAfi`>ykkKm5BXaslkxZ;+EMO@K+29>}9L!s%0@}HXKj$;n) zV{U19%O%}Qxug{(m-H3nk|uFn((#Kc+Bebr_-Db_>J_#oL-sLubc|+kNBdtEcl2`Q zmd2c1(q)l5TIR91qW>AakM~HQB;V`~{&yb}+|hEKOZsQCxT6^|i%UA&az~p|Zs|G6 z6%7RGeVhir<9gsg$-n!U#TD-?;F1RPT+&UPOIkxi_egH&^9v1#xuIz*$t@i?9j4*u zVel#WkEdPS(sq+edMI*9Lp(0&ZpJ08s<@=@5LYyj`PgF#YXm7q>Ki=8`VHT+))2OZsbaNwY*Q>3qi(ZOs1mw2NChr*lgiZ0_i3 zJlOvAKii;|OS+A6N$W_i=mYq-r(N9f>H+v;P;TfW4}Sv44Nbv^bY1>Wc6f%rr{soq zyU*f3}A+mo((%lJ2TQJ4yZrTV--d-@`oa zXky0|9m)Row2NDspmRw_>k#f}PyBD&*Z*j*SuSZD$|YSOxuT`t-=206T=5nG=%UFD zE#aXdDmV1^rrI~-WB5PWteGUZqVq17v{8i~rrgmJ)4}2IPrIOlAXhZOF zr!JQ?Nad1lmt4}ikW2c|aYa+Hzdh}Odo@?|G3Sz|)LhbG7<#ATX21X0cClR2`;$u= zIdVl;zP~-~;*J*qz%NyCLsxgyewzzgbaP3+WiDx+3%y9Wp%W@JmgJ7MmJXqRf7W4SjXzs6l-~RS+fhF_b)Ry!A&jyi$jS`pry@8?8f1yPnYoIOP z|3*)|zkRx6Nhkkb?@Ma<-`yDV&qkBWZvD@FAC3OrNAZ8Fo8kXSE5W~A!CBJ(*E+Ns z{@?EvYVt=j&t?CG-kNLv-n5eS|Jo?>|6@B9W}P73oDtmBqL(j2B<3V?Jit+FmWE zSo|Mmagrsda1YA_<<4lNWW=kra5@NVs-lEh;T2w(<^R;X9LW%$t4`w~ zLfy)!QbD{NJCl0RJj>N$2?xorgckEsmWgg(eVm>FCj=TP7DK!a4=Rvvig&>CNIU9% znXFx2vRRZ2|2qPHf}Nc%Q4}D$tfDdK_cp?B$9y&^4hP9wOQR796Q8t7up55e>I6mU zx)VaZFOwynzX|*Etb8widF01)8R&JCBx}zZIgo|1Wc3xXKm88qC%n?M-WLTGEPsgW zp=Lq+KC+i$v3Pp5u^YeMMB|A06vtUpc1CwOV~W`8l~~y9S4lZ#=f}@2m?gSd8H0f7 z-Z+NuUodc1{zzDkSv-ervHGZxJ=poDWun+;nH?`ND=f!foU;5y6kds`SWFU6?H}Mb zHg`B@;TLe$^oig!XH@lI%Z%JIyaWmvn=x>*a-uwBDcE*kH+q|l!7$9d=AtoWhb7!X zV0Fi$by#@s?-c|k)9Z(wXRXd|rYJ1YA3@mX))hR|``TGfMTanpqnz{zX|bHF@0TM$ z)TFuLrDWJNQ$r9ndmiKI^rn?yf7;9LQ?Hk0IbMIrq0|{O1hMm8OE+T5>io`f#3I_w zUs8c0dY50KWttU%g9i0EMai&CO^;I)qIHrw6-=UL{~c;9mRJ6G0tpkXg0(cuO%lI# zzIQ*K7fbXYjt0@|cmvP0X6)xbSB~ee6v@lm$9W1Z=hsR$1F-__sT453Pkw@xgtPx&%BBALW(&TFVQ&Z8k``W zDg__{mM0$kjw8XJvI0l6OQ{y;g(OceX2~x>;SJ_=Mr#Y02vsFI^-Z}o(6h-kJ-BX@N-U0|U6$89>qbhL zhyOOV6Iy@yqxBiQxs{D z$=Dyq!%Ha{qJ6Izt~3nsXe{jc?NcKqEY&&-4NC801h;N&8M2R6k7(dM5q?rii_(y& zo{lMu^^!;$HtRJG-mKPk2IZ8@h$sG7X50sQmV)uCFeTuuD3+1TETYs|aTgyMQ(B|n z;%&zgSY3@-o6O=l6iZqg-z`~ztb4H(g%M5{dcij{@pPqv>61)CWHqJ)j0tX}gaujm zF(ur+%$lNj=lxcufLtUUt*{F(zaR?H`C>obC1c;xXItbURTD~h1m_mE6a3FP91U|Z z5rSex5A0gy^BvfEap@);Z1F|47>s!mht;D7PUzA7?N~VSq?s~hXUuw4EX?k_p($lSP5>UmDmA-dFT2Wb!R2gT$9R26z_Xkz->i;ZfCepk!{)f#O{? zl3Env#|##Bj8~983^>tc97>C`@BsXTgyktkvT!q#QCxxnXG~u?X04^K+#))2upR%Z zcFYoyWu$$#v_!v3e_lqrLvcaLL=^dv<*<(3vN>ORL}+@;SPFD>aTO4xtY1A+qbuGa7q z91}Lc=K#xbBZ~u+|KU9TGGrh7r8i*>`L@Yaw1^kQ`07PGZzu41JihHa_C-i18Vjo! zUB#i@wCe*hPZDkS@L@?j^ZSnHB=8{z3y*M!#1uK5^?1!=#dxUHBl@4y0kD*Qi-m>a zr((;l{)NiOGAm+1Ju4N9G8)o#T36S^&kqOchK^wxCTxQ?>S3RS2uK|`>lrx?Ib03i*lC>Bvu1l3LB zJIPW|SdUkkN4^M8FpQUeca8MopE%&A;yXB$I%A0&o<%69${JCKnoVigL+OcEfdE5< z8b~x!Kt4O3q7XH@WARc7FJa>$_SPNbP%@zm3j%|eUGNjs73ApMZ#p4{Ls1xg_=IMy z{z|_CNB9hV3kq^CSeGr}) zVcGIWYNIRQ^^t9tGH!ecfmi#reF{wTNpOTV8b2h|bgl`L%y!@C6FpTkKv@4>NKfWQ z08@`%*t-DUtJhSUn{cE7UWPbKFT3H|W4$`s;IFb1)TcMdC*OtF#jz&{&-8It8n%hY zFpVdN>18)edn}J(I_~_viN`RFCx_`}H%xo1FDN%4O_+an+!iEJX78H*ML?~H*LMh??>a+qFr!?ed5rsGcS_CCG^zVKc~e@*%II`gjsSQlZg zegF2~o^~Ak5zOA|+pR1F){B-HP6N40cIst$dC=ylax}(e&unWO!j>+m!~5 zj<==&&Z{jJ$y*LALy_tyg75~$w(=@NJqb2Yb}w%GG43kK{5VpbH?soXw(1dgL~sAJ z7+`5t?oV*(g_LQ{Z$AIeA)u&z^LK24*FJi@b7y1(9|g+fv5NWAyg^2&y6$Ktyo%A| zoN=e^tXvpErdO-}T_aFZtZEq@D|zksU|C4&s3CSz z?&VmNt}_GHEoHl7W)SB=f7nK+C1XzL0gHLYpw|-Km^sKwpWQ!qL@L^>3g^3=Jt|1% zA#r`XS%nbre9P;-F)LseYy-rQ<)KewnkzrI4^d}pp9^p@SO@B&R|}UCpF!BM zdAi(UM7blPF(p$D-W+^nb&hC^7Npg=nZI8zg*RsU=h^93c`k+R`z5i9bu0t4M!B4_ z-}HoJ?$#83ImO8tWF+UEaWawuJ2%Tb)~hZenJr2|-7+nm(EO(UN$v3^u#{|dzn34j zuOXQq5-;eLiq8TH4?8kH6W-1lr@?pZjZqBXEUrDtWd~i#1_u zuJ~Tx{2Es9)%1M~z9vY&hS_9!_*WQW&!Uzxi4~C8;lVK(%*!BL7oxJ(<&YvQoT2C~ z5o?gC=Tzj$TfKng=2(jsNkHtI@W79$avhQzP zQE>YoZr4Mec0m%I8Q(V=-1Mkj zReUBq9=7D#!3$L1iYOIGo`ph{dWyf*GYtlqPaO>o91A)r0GkP*Tovo;6=5A!XB@it4-zWhfoGtxe5EP z91MOlgvDM`wPE`Oh|i$RpP*2&f^dBe?*UFLbB^nchv^iKsT>YC;NGqc!Vk(W z*wtKyO`rF)ZD!{f$kWP#kV&})0A6;@NtvGxc6MeY-M55V=<)hl^tk2zB=b#z;@Y(t zQ1u)4Z*U5=hMBURMsMlRgV*-%F+Ul~&*kXf?i zZl5lS;nC_&&Lv-9jhx{TB`J$`?X^4&g|#FzdH3kpd=X1SSo-R;~YVYJef&}oF`T6UpG1S63ww76N z6Ix}a+rGq5h+)0T%uTKekeTdJANC8^Ri%`hR=c z#k9VuV|LOHPL8%>UE2wpjKQfUNsm#y4q(jjdC;tcayX&w;`nD4Kt$W!9Xa_MmV?ZV ztJl}-!<;BEUNj#QF7Y4yy)w=5AqB6594g)fLXvxS>`5U!KP(s9xTxr0;tQY9F3bV*zFVGik*` zCrbxv{QSt%+dFn^ZJ%#>G{1XxHua0i4*j0TkD;mrzrX?STTxYt!>ihXaF3 z(`X)CaQEvuMl@EwbJSnfFQjqB&uZM4Ue~(inE|&~PSrakh=3K*)z#_Ab`qSNWyW{BoB<%{FPhy{{@ekV@w8@=z zx)D$Njrgap*7iWKpG)c`>N|5^-s~2;?ufxw+gqot3#+o6{ziG!JpH#3Uun-)e&Ky) z$w7ynAMX5~NI~mMbNdJGG}sEq(iPYFy_sdHwT(y`cE`n>Cfz3;Ls|?dv01)`p}(sX z{!Tsj;P^kg!rM~S(wD}i+Q=mtSu;IPJ)n8i<>vd*zvy0gSC824`|t~m5Z))(bWA@( z2i5j?zi~MYfgy>*>btb@pjRNt9*s4x4j-hH`J8fCxd#^o!HnNZT}oIYn3e?^(J z+@j*H3(3@fR*ooa=J;s?C1Rk_!sPX4l$thKXV*Brq!WAi#A3heR?6d)PiEE~3u|p3 z44TnzV@n#tT+$}&a9=Zo8n@3ky4Mw&YVK!cwX&-{l^S>581eFG725OU18Fa&Y@w}M z*Z%aQ)E_Ew`mQfg_d3n%)x5*g9v7#qecCNBxh36G_r$~JU7OQjc6-d>WyzDe(y2DO zsk2L?<%O>K^&1x?`Zz0p{0Zq%paiuUUm&UC^IBBZbAausM`I}%TJHM!3odd-9t^B= zY$%mTPLulkrBWU@`SUi=zYR?zpVu9@T3p_9$9L)F60GP`Ty!o|@HkzTJC9XVRwCRy@30iWZ`}qQAp-+sg_4@0Z=Tn$m~UO<8G^dTt&i^o}D9 z<~LhHC38o*OqRFGbj>ikXF*p>=X79=RDimt%b~ZoN4Pej#!k~upYQORs!UpS9aeHa z1>>7LhXn7T%sxJ`!_VYv)aF|H(cQzkuFI(p0p>S!*W*Ui6-r`!1E$AsBGBCAvW zMcr@r!-*Q}+Kj3m7*0J`*Y{YZ;%|RZ;~xR@obRlslA7;B;?KXL{bY=?NIF5w3!O>T zYU_I!q&7E;=!PvTMZvB3$93;gcUYNSV*RNTWexhWd(GoTlWxhUy1(tz67Jy!RX6QY zZ@x9Ai*RS-qLJzE#~W1XLed=@lnbVQdiur0LQEDzusa@4dUL_7*7~Cm* zlfhQ;k6!w;*-C8|4o*rl-7(UjD!ks|?VUeTZ`%E)S%+QLrwpoe`9A~(-K1Fx=i_oI z%eHawc3KHNFZlI#0p`cfb zj3yVUqb;BLQ{(HqpSD^OQvKinXKEZbaqzdp)IA@KsMKrgh@(_eGp%5|^@C|$UCU^c z@zt9&?X?`f|H$DLwE3w14K}rUOO5+@oVnSaGE&!I>(-w}`DsRMx~EFzLNukhynO7p zX3tWZbVt7!b2{}4-AbF*MiY;EQLU7Xs7HjTGb3^G<3>}=@?;YLlJBy4gJH}FHTllv1pHtD3sKl>a_U%*} z26WYp2l$3kaBEBbYqx6BqoCXKn;nRKzmCr5z{<4~CzhthhZ|Vmds~xsxaj4GR@w4o zSij1q3Z)NHW23A^T@QOuRe^_gw_BUjex$7fhj?zGr(FV`%=NS@FL-j!t63%XguK#9 zp0(*A;^&iEY0jQf0}EDKO(l8pSji(SZ9U{R@)yH)wABatZJJ&n$qVtw1D^E!Z_6$( z9;;QSA(>-q5ZRIcvGJERT`91t*Q1%w{Jas5<|aFPm-N+2KIxaEw;Ufp)W)0*dzzFQ zNF_68&)@U=G?nt_CiLeyMu$d5Aky6*F(HvsTKDc;p37-i`l};T%Xe_orjMipP&? zrLhY~D%Uv=9xD%VCoja~zv0O|EW5mTtdcG2{X6%jR*L^^y2!ZBL#-6^De~0%m}hw+ z9(ll%d0BQ_t)0q@=|wJw?q25D>H%6QVRlf?lWHjsapyk~kNPU@AergDnT zq5Nyq`D*$2MikVb0^MP)v~bL%`+?hLQJebIIjeu7s`~B54KdqBL7fpBs}@;DRUMoI z?Lrn&NljEGT-xaKoT^%x_RxV5y#l&Droxfw#uUjemVQ&}B0P_1_NrzU>|bHfl=QRJ+sc z_6RwB>t|m0VY*){^*1{_WnuPxDrpr}$gy4}3R=p)jMIxVURG1O2buQNN_Ow)v6EKPFKbpM;;0)1i{w|A`sGv=p?;~A!aRE2t+2#`s-`-e ziF>z^N@}Z}D709YP3Kd5s486DDl|pCEZ%J_uAdxTo~nYgsOUQdbHCl73#^r5AG}kJ z6+35i&YM$Glx|t?$L+>Z$A9zN_3S=t3RcMZ+*vCv=)QaCppIKCC*AV$k@AvYLgR1BJ9G&iZRyUvm$xcxQJ_S3I*ly)UH-Knl^Dr4 z+5NB8wZYRa$+J4$M5#CBR&6JD>S+|zTc>n1&-x8h-D4>gT<5%g``zw01p)GPY)hkX zQu6^VM=y{Yb7t45qjOyTiAt)aP;0ueu9N8_(JPFSXC;$zg$~md^{gFnln-nUUA*U8~YD}LL&~ho1=24sQZADttH%MrGsJZ|`o7`K4 zbfJxMvcIpo#tKQIekS)g{nGRa^dIGD~eXT1tAF^=?qNi5;WnXzVl(WdM%7Wv6`7*eo@(XAiue zwd7}#R+=%!ytnH}O7YmM{?x1KV#aT8uDsqFx39jOl2qcS&J})d=;p{786S6xlC}ih z%8V;C&Vr<#FEpMn;$Jn_dRx-JO)u!T)+KFCM6V>r{iW+GFQF>(fo_aJ+b(3c}|`3Ur)Ov z&#E+o9H2$7s}c380GhWvYc`}gmh#54c4BgBnF|!;jMx%Q%x@!un>;B=C3T)1zCL6z z1y$7)_pGg!xXyiW#@3`9Rn>^AmcH%=CH<$3Z;tIAN+q?_yz#7KW=E?ztu#Aa&3>;c zbU${aYbBfGFVl+@wxk=U8C0cJlYImDi}m;*K)pSi2@|a(%d+N}8yv zfp%tz^B=r#u9e!{>ge2K@Eoct*QRXig0Hkvy9LD>xMk7`$IGULGIMxYca#_3T4{o@ ze1bGW_>HB(Pk!H+vQ1eS`=nnf>Dx2ET%snl&w@+&ZB|85;|1zD`yBT>oIh$hwTV#| z0iyBAnlum6)u0mPrujtNQr10wohUQqRfS(48o3Ie%{izt3{cuBNhT)C=|h#2TJ5Xu zM?X^?ve71FBK72M$utdH(Ep5}Yp8d2Q~HH|wOlhoE)i>QJ_HXHQ)tqY7nz|))ICr8 zrB8k8t)UEfp(bnW&|>syW>B_OnK8NZL@TNqn0=qlj7E`}114P2P_D-urBq)ZtCcn% zmeY@xaj`+E@_WLR*_}EoE4iS(Qj?{8*jsqK70YE7G`tXj=Qx z&>p)wL0P8wCprwjIQ9ee_mXjHW^X^G?Af<Nhi>sy=TK>j{S z*T%D9TX}m5>ZxmQPjhV-t9R7DT&@oI(riNB> z+(~1dyt=c=ynW_J8V&SbM%$Eca$e;s%F8<{DXX5LR||O=Yerwejn#vCnb-5$(5)Y} zDbVGX)#AYvG&p`FrtAg^8jpHJU)|-rQFGL*^^fkgt?5GgS*j~8%46mGvClaZu3jHsV^=C@YS^aH9c8^oJLEZ=2Zo*MO~Ygv@?2dC>PM(c zxG)d--qQjotbVl8&GlnwsVYwxw3V{b4slcmdVQBcIqxY_mAX_8>qZlzJYlT)qs|K0 zN4M#*8io%&H_+UoXTWucHzxRP=)y`nbI4AGdQX-ZGK1U#AR zX%_)G{|QgJ6}q?~?n4cF+Ljd&Uex;z{cKWrj!`|6d{pxJ(xXOy?6kJ7f09Q&JsKys zNnG{uYI8aGPjScfnt46vr}T)=B`a!#Sv7h9>)Ng2i=#2d^wcgiU-}r2ZF1Gl*$Wmt zpof{kGrn8Bre90)uUzA#XXdRrbYS|riY^LvD1Ha z)zd@uz)J0kb*kNtN+J$@^#7b_P$H`S8}Y~kp3KX#OS-Ig%%b1Kgs-zJ;A@KHW>}Kc zaM4|Voz^zMFTk#}uDwB3K%F^_dM&M;Q<8FKutYx@jahE8wdFZ#yvAu&xe4@Kdh^O2 zCx3ltNmV0k=T9v(oJz9meSBoJNNXE4rsuEcsr0NkFCMFWOmxTR6=?GbvA>NQ9j2|S z*2!8_l@75c+xuC!L-edTq)|ubZ*|{LFnnCmj&FZb@Za!c9+q8RJl1RTsvFGpp97!9iJ{}xK$Fg$vDPH2T9F$oV z(SHV2>3eH-$5QvxTUY6q;j<(U#6z8xa~?7o{!2#6n;4&m>*%K^%E*5mtG%R5`h3Q; z=Q?^KPU$%Ls@h=Q=-zQURrlL1Y(B&vq!ODh z4pxai293SacYwk71O2}^@NaNei=``50o=$e&EWrRVXkz+a|bZHU$=47M7ZOjDkj4 zU)pT(r}Vv?pk0|Hr`?#$gF712-3jT`#LYOIx=l#4>iq^J+ELJ9(D(Kq=TYMe>Do_4 zE>a%1Io9;XLb|yjV;jvnx$+QIEh#+nV3d5Uv0rq-UzSquZa>j_PKE81WEXmLuGgKe zM)bL}qZZAfQ;k07-n`SQZ`8&nq5GRe`ZY=4H*ncYOFEyhSNHy;OerrXtG4){9u=vo zv`NZ!2hx1cO{}hM)#jL30#H9B-2UMb9ccQLOE9wbhpMyqhSl5i&RBm?d z#<`i4TQMni3l`C@)UmOn`nu0>q980d;o))`jr1`)7x+H8N`ZCTd&R#8P*vQwZQphC zsl;kecE(fsbxUtu=512GRTOx%wd%9bg@W~KX0CWrn1XKWJ>BjNqM+@zH5&_epup;x z!{m{#DHu}FM@x$^3L2-j8gk*U)wRLXE=nn6lV)Gn zuV1J1jlNI8#}XNZf3%Y)=EcdIhfb2)_^&OdnIH#Y-3}e4p^YZyUN(`z)HrMZn?c!+ zGd$&4N7Q94H8hT9E(PV7Vlz zPeM<+`Pkv|FYA8JY(RtE66?xor5f4Lo-=}^locM-=G^(WgRH&i1%Q}o%X&1WE9&hv zVtv1|v_1%|)PD5+vLh&^PPfVM$)wBg-7dY?+avU*1kx^d;2kh5)1i^0oUARZ2RN;z z&105k?rcB_gvN_@ZT}O!pFl=vr>`Sr*Aot#iHAWc13EVe;$uAr`F3GtH;vs=ph~Yc9tA2P|$u`lhaQqOG1WKduHe- z()DPo*9{u4O{Y;`-?8yOBfZ{H&|%x&v;^vY(I*Oqr(PURB?Fu8KmRL)0;~J~Ms}is zQ$J~TGwoD5XMMLtf6SX4k*j*g?JYrD#m1JLIp8<-RsFDruIpdMQ%TIbe_Je}iB3Pn zv2fBu>fy1Q6Bo3+wSr{L;%x~lHuzjBq|Dw>_;zJAtN+>DGL4=XlI zF4=G>pCv8oT(Z{gvz%I7YwMNTEyLy$<&7?6U4`rI=>n{%dB)vg{Ybf_&e_TDXw1-s zd>%U6BPYeJFaMjFlbXM87x17fx}wmAsAFSj!qA1d{c%};kVuklO=LQEwwjuC8W^&!(IAuUkn^EKJF4DN-pB$a9_XE0y zXcO>bj>R>TW|Wcn{;B6$d=9la_IlNmu=^AoTu@-=L+V!k$9@%x-bsTHwnE^ak~S?e zY`i>G5Bf5?$|Xvvmc0sn96o?jYNBb$0<-AoAsrsv8(qwps+w5qOui>kN&EJx&9~A( zwK;J1ipgsz6+$Y6t{ZifI(3M7=*gdUdnp*L&0JKUX4a5`ja#oAWI;jGm)&o#iKC!A zeU;sEoC1rw+Qho96bz4?QR94Fd8K`BNWE2Ms0(*o<~wHIISMA~P4;yvOG&op{M+x- zDgVM=7hK;(>Px|wRI^q`sMkeb?q7Vy=iOBDD*KVg6>GWi$)^>Ak|{XXX<qHyZbAQdp22Za%OzLFRf+VatnHnDVEy3Qa>>TdGuP~w10QFP>~C`8)Gq@w=+3xgT`sX`&uBR~ zR%$8wtsiIcrMQbM;&t9tGT&40QO_w}ce=FxiiR-5e^Mt+ok$2?n% zo!5)1^u3clWz#`*q5rH2wLB{ajo*yUM{~bUf39?cvK8o?ZrbMBx6WOYtIV{g-4f)W zz4h4jl*hE^Cky_qNxmwaD3genB%D=)G;e zxzEj@gJLW9h)pk#{YxFzJ;t)Br5R=R!XA6PLrthVh&EAEPgzx5W>A7EuS@xN_`8hD zNhyVk4Y>BF-M)Wlb>e^M+p7|_E>lWXs9VUzm@*)sz#{9|$uvMCH7Wnptu6;wof=Ag z+5bp#pv4JF4y0y4rP=NGj-wGP(B;U4-|Z-&v6cVv`deL<}361 zHvG4hoSNMtc3V=n3K>=WZodjWsw@1=lU6UPF$aZHY zZM8J5X7vCXi9;-s?Fvtz^NAWVaM{*g^l?I(#yx73X-aJ_Y;^bN>`qlN!I_gMKgLhpR=bRzu=%BP-Z)iBt_s?8=t>y>0q(aioD>QybU$+cj`-jFteTS5r zXE(lW1y6n(vo{RG#>azej%@7;sWd2PYbRtcu7sp@Kb{nHh4 zk`3&#_~WIQ@}6A_+1{hoN!aJTUyc-)v;W-BlKHz({zYH)Ejzms-P72lpQU%X(GK;) ziobK4MV~7+Y~KAzvy7-@@GVP=A=Kw%!?OB)i=?qoZ}nzZ*i%ZG*i(zHL`}U(L2{$L zd(KazsZJz-)@3wr{1;Kx))J2bk_S`Ckc!ze-%(z~ z9(?gQv;97L+9lx0Tu;0H7d*+PVr~=gf53H2sO8or^Dq?!1(=6dm&;NEoG9QwSaegm zrqGKBM5rBn;*}E!ffAG=C_rJ+g-~1CN>G5gEIOE!LRlkZe9Mk64+ILrIu7Z zr3Q6ot5xN@dQn7;>x@@Rs_s`y%C}ZZtj#^tpsE`58PiHB5f)u;obqHBrH#YMD@xG$ z^mQ+a=-*loZjq7`bUrzj{#Py;GtVn0&}^Ha2Js`6z&^kF*&RR6S4)h1IT)=3s#o{d z9AxmFklfGkeDoh((Lg6Pz-9YYP%Q~+tOTY%*Qmj__G<7wQVo7{<3PR-zv?JecKO

    Wmr^B^%G3i&oBqQ- zC{>+LBq}AH&sJ7TPBm78)Z6$+^bGzlo9Y+W-a8l?t@cPl~X6Y4)$ z??3W=3Kfx;c&4*b1uj(+-`gvv%D%u&rOMveK?xlH9i#^D>UvQ`7qXOk(;PnzQ%edC zl7qjxror-v22>RwFbUW~?Pwn+fj^bN^y?k9M9*FGyLxt}Kg`ss0KSl+Pt=lS50t?4 z+bFeUZO-e})X6F@QcCQ~E%Ty?1j>><)RHnAloIQz{nZlF0ZK`yk6cwjy;prEcuCZV zuJKVdB^{FcDvceI+bcomb3sbbf2=+wpWL`}>Mf;26DJo?2_#?5wijyE7ImbuFSb@` zFAbbqzy-Uiu2Mp$ESlrTp>|ygm2$&%BU~mEPVFd&x zro^b8hE*=X`oINYB~vhaaJm5fKnh_u6Y!ppyO{nFb_h$Fg5rba1q=w92-}*#`82G1 zX}|-qFCszPWUfyG?~B184T~cZ`l0WY zv@zz2B=$};tbc+J)UZD$03#MqH1I$f6og2o0cn(O3ahLM zL=6-*Fidb{Ur0 z3hl~ySxp1}mHkK!19U>Ls(!vx&g)yz+u3 z&{(LGVC&#su>vhIBMXO8Kz5L@%*$6)-cAB63z@QjO54H4(zu?AdXjUyLCwOdWb6)F zmh`;{?O1+6n(ET@6%D zuzsow_SZD>O|X8_18=>Onyh3?%+sA*n4)fHGp>^cMAIN!^WJCqJg9hikB@< zV1Sy{+uzCGt*j#fPwQW<~%?U{h6VA!IJ6KcvVQ2*%ztZ!h!tc^z*hz!e8f%(AHlG~&zI4$52#X&h8BtW#i#_|pNf$AK3^KNWo9ax<@- zbB0C^&X_$^i9top;*aZ6avH-Y2XM?nss_ZO>+G^MsX!N9>vfIH*)t4t zMvx^as?0D-H^vmqS%yG0*D#B$VOLeajQ!>X(T*v***Om%%s84`cKG%Rw z7WOLRjYW@+R;EW-=)jbru`=f<{!U1GR3=$D269Id&32%b*^CO#vbbg%FGx93((0kC_#PO9>}#y)*;D6b2#?HK8!ENi0xPRuRFf!DV{8d(yI{<>eRT zm^p=$2xv`6P3#s6_!M#?(`&GsAQ^)~6_z4nY|xxw8v{p$t|)jm_MLPC19c-i-y)0+ zTq?XpfNc<;>PN48v5Q_~1E4AdMkd@~KtV(XsS1-3s2fx$UG?W{J8teyduC)6N+Xyz zj6J!7+-@V<1QvQ;CESW$auN_LOOBx6(Ae$d^h$O6)7yriSy2TXF1IcPtTngxh|P7E z4}1ysr8l&|x59tS4an*2i~TOpRem2U7wC!|!la75)9as7Q7x!(F5)#eGKbB9zCw)z zJ15$7E@9QinOo}ldVNC+7gLVb`8F(!8 zN$_>>uXO#Z4hlU;Z&rbmg+~d{4kDIIpod$X+7}IgSqPON?qFnjrWUX6I+M;B zC8#`DTP|-WRJff$FJ^(R74{|LdC<3DQ3JDuh6zp&9+wOK?Vx=V>CI2@w(v0l>_O;S z(Xwe>?=B(Kkr=#%mouvb!-RzoBM`9EgswP-I zxL!iJW+oTTCZIl5`uc13FnE4Nj9G85=;0U0%!7N)aITOyneGE*4+$HTE-X%g`PFP3 zIqN&Ur3)Kd;JU1Hg7*{HJ!{$Ec7@l;;2-#V;b$|z3&9fvAV7SG+8}vhdIAO1wc4=u zpATMiuh`fYNG~g&U;kz5)Y)VrN)NN*a;eHA@5R5)M+<<%`fr1iz_oip_*7I8$ z$lRcPVS@q}^s^myu$?Kr;s~o7qZ$HmgSrOKiACJ#wbeegjfI2wlvKiC;=Lb60n6RK>$V=4rv%ehG>HzH!KX=aBk$%@de^u8A##S z3RWqw!m7zzM@2PszRC>wTkKJ^@%aB_^Kk6EFx$V?# z6TKu4Nu4QXVYxB}ss3pM>Klr!PJx0m)2C2$*Z(sQ313bCRS<5N$Pi#8i*q2Wgn0ur zg9%9(bON!0f&)gQKBJOQ>A+gS#bH=uZa&!1%3cLy*_h&v!VS`{F{r!_$hEZ~9A5(W<=9$rjZ z3KNe}EvP(#^rT z5V#V7ce4HmK1}#S448oPga-&v3?dOolUN2mAp{Cy3`P+nCm=sz1_B*}S_Jk42q+Xm zFl2Cy1cbsYsBi@tCWCAQ8fD4V=O@0k-ESZbf+~Y`1S|z~DD1%m#>=1|flmP@Dl|go z%-|sbREcL^6h0wWu-Kg;W{O)Sa-hQpT%99n-V0D}moZ&w?clj8T6~9)zs~kK&5U1+W(4CCEIOTU$n{Mkokd7??oxpl}J38mKL+m|*qb za;=)6o(7m(IGKRUb& zYv8y-*96a3QRVrBb`36 zWo^*B!uDhw(A}A4$Cq&i);6$SXrF=$WISK!+t5DQ76c#&PCvKD4G|QiAk7sju+Kub zAbxWng6A%n-D0X?H5BubZ(Y?8r#9-h8fJj86(B6w^R$VMW38B<1(g{4Ld6^* zkAp6TeF{9$|6m#N6weY#$rd>CBaAT_z9b{|H!#hCB12UL%NQTjLqThyoC_wIomD`M2`yxAwIU(u z?4AQo7V;|7jp7d(u!IguSy-%$IVSXXQqXo`qcg4yofW)ee6p%%s#-b`kP5d9ua*9u z!v76?gcv8tDK=|>UKkRPiXjJmMkqF5Fc^iz4h>i;xHc+`VZcPfnFCY>=|-h807n|g zbD*nW->7&7C`nj!0<&TbM{pWpqk_^cm(Y`4*69Rq#Xb&z4Z%&qt7Gs=h&doPLKvWA z!43qLFm!-$fTU#84k(sTb(rM_2Am&B{ zzR7x@;J?@p5&#J}PWJo+2qr`!l_U{BCkuZd!e9&mDFN9D^G~2*P=~;l5Gxee^pk9g zs0yH9#MmJMWCGR`E+AkrArpZ%VdzsxfuP3NDgt&AeLk}0*5{sb5( zG(o|U!7~Dg0v;5;AOmGWIAS72&tNFTK|z+Wc?2{Cgs3nG88H(I5;H2Xhr{N06ig&E z1u7x4W^j>ISmm#8AO_E`Y+WaR`2J}MT_nMbW~ol00dxi_Np*t-Jj!w*=rhm#^wW4TBlW}pC=8`RfR^y04&_7&BncB2Gc77xfxbN2u_&?#32}Fh3p6#4u|k( zX&F?js4~Ff)ES2DW#Fwse`G!m{uDH3aIV6G1SrQM6?|p@uMi?Z%)zJ%8NTqJ7E6=p zw|S7SFe8D^v04Sg83?RUB$=UuV+GY2EUa)P8K#456@AjSpxfRzG!iou3uzKm9jq(; zng#AX?H|#o#&j&~NrBf1{c8L_4;s+$1xywiB{(~HSVD(pUe>^;RDnAoWPRQfQKuRK zTL5Q;Sjpra%q)G|{n-}-=#>``v@k3Mg2#%M{#cb^oAq5~{fbdqsFq;y*wqrsG?-d; zE&=61+7j+G!?lnvLFWm3YlvsRnZa#ja!aLbg@wtOo~m*EHG3F5zarqy|KW*Qg=NhWeRXl<>w9jY5=&Zpb3J{&FGTP=>r>R5@XRCB(8y}siJ%! z>S0_1$%V2Brca>ttX_l7g}Vv3PZ##O|D$pL(1ZX78<1|ODsh7H3-s+{+%IAFUoy5z z>%!&~xSyczRVy3VuFyJ}`vZOtJsbQk{7wM?p7^zFQGa^r(w#kRe2c(n4H6JQer{nK z#wUW@2OF{ zO&JwV1#%F^Fi?Rgia#m9 zCOJ@JsHk8S^`R4s)z!*-W~E#($Lyp6TGUT^l%8;kU`*kbcLRK^%Bg~0wC-RvV=v81 zLNW)13`>E?YwA@;vu<3-Bs3LV57AbRUe}b)JNKyE8 zOj8OG5RhGFnsVb*8c?fIX}iDHC>5Jh6H+8Ae4YP?Z_0=y5X897%F#8faSEv~UcGd}mo&bnG=|t|&rqi$e8m5u zAsVc2H6}r=m3@Fdg>NdvNkEB$MSQ7j1-jG3-%BlFDAAM+PZRivOm~{Fk0_i}HYn}d zj#2hw4KNj$Cu5YtPz2^_UPs*ugevI7;rHb;qx%WoGdEZXOfeVuDV$0;pJ!E+7KCbN s$e(Z)6HY1TqXDnTmZe~78Cn4o6WTf&x;NA|TBJNH5=9C;Gha_uV`08269+#~tHkj6_j6=XdtrYpuEFoNEUi z($)BW+1h1XTwLF49#Avj;^Hpm;`-OFe|?QV5sy&Pz%Pn!>c`v+old!V9(Os(rE}cv zjJ=bay{**-kCQI0woZ;arDT4TlG(oDw42)*S4C-QhyVNwDJK^jX;-I(9=yu8XAT&< za&bv*qW`||N>laV`htr~Q|+h2UI~M3o|mIvP0J74#I=9buP%P>=Gn97zE_u7om;e; zd-vVA1EG>0l}%PbviiA3q5RzYb6Q&|NU?O z=iC3jR{yga|2M0#Ce0|v;!;dZ%n1A0&e~}AUoU#pr|W3X1UR*qc^hYZeSWw5`ma)B>zKW` z@#kvA0?CG*waQWBvO}-#7~$3M0DG@SOMg1jXC)gOEg6FN&;Pr%ySBQxxKt|&a$WmQ z(z0AxK-yycwxgMWLxn9;g&hI2H_b|1j5C&UaXl2+&pG6WMDRYVR&F`Cbzs--JLaVx zCli-&+2b65VmG1HY|x}o^# z*kf~nfy?+MDy2#X4N~mziR_>96@Q6eMXee#jdN#*OXJ+v#>X7kT&=j2t4ZX+SdEhN za7pj|*d=>q#)sRK{#Y(((m9Z6S|n8+CvD;VFm%n%&VojZkUxL_hV|j?U)*MwD}H@- zd+J@hQ)j$WdCIUxdpQ&7!9kI+zh*TK=`)BK!@qXKgSyP>>yMnIl zw2AFoi>GMWv2+-}aIN{}&BNbh99xV1=2?QCW^RK`xi81QIICk^=H;B={q9O|%Wr1G zHpd6@PdthA!t-_Z#94;x-`Jnl(_9fSKjYOCW7HXAWD(RAt|TSIm}B2GEpl9&hYk3} z?)pPPyupSqzgsW=W0L>GYrpyD@p48#e6_dAdoY(}#qMNkvOJn?GvvEMcc!@Z*68B} zZ<-YRT5eq(v(4zynk8w^=eOG@{;@(>$bV*BJa_8LZ z+j{03`XpS-dTwPTX*DxiomtGYUwoP3(o>z5z{7PnZDzVRx$EYQ@ZePzUK@4yOn2w$ z;wR32ca8IaTwGi4PLK7nDuY&zeKg8ECNncRl2a_mRk-oh?IR%_^TVDm(zN6HUfwbs z%&*sdaqXwyZ({qmTa?Madmg_knX%ygoshD;h3``m8yvmDPD9f%*&5V>?fJE6Zmqjx~H@7nKHk1uXyhOgk_t662~cPYQZt}$Cb z$KruO&8ep^vP_HeTi5+rn2qpzamOepPxni$){NwUwQcv^w+-xyynX2UzV*I?xz02F z8LYP#goXIG9X;q{mZ<2SqMN9g+u~TNj+g4JQ5p%olYL*p!n-FXm6c;&x?5Luew+?Z z%KosmwzaX`1rHKX0`?&bsF8Q=F6PD+sA9|Jex{+q%@^>38urPCdVB8{%89Bd300xnN|t zq;cNnc05&n7Nvz)jsHAKkXlNnAs3gHCNkNLet z+w6*yJtFGlMIOA=m4B-6-RGti0sem2FqspNFHVgNX4#G#?5P|bGHt(KY zk@1`QFg4NQ)HOR&5fQXftO|c+)nAvEi%__USATg^f78`nw!HMDHHk{BjThGQ;_zsk zIC+xa=YhkMXmP{N=W-pPJe&4c%60gy9P6!(z!F#CC54eFLj*lrWtJ;+UzG0*+?1jd zzx!mOW7TYbMsjCXQG4+9pD$Dubp)(aU7T~>Zd$ng_TgK+?gI^B;ety0iyYfJ5j-J^ z?(g{&7n*XdyTat!17)nMDknP_!rZH*Lh&~H>9ZK$JC)m;b%nnV>jn#5K-coLX%3%I&DM*48lc%wXbHkn$AuDy|H+moi7(92F7c}2yj`$>q>0+>#8B6@-rg)WMtWLPa10Q~c9JkdwPaOd;G7{*xp@E?aGQCgR5tUiY!rV@(#L6FYB$zYMh%M zvu?V3oQ;j*tR8AxAT35+nzl8fLjU@{^EdI1h@`D1h4yQFhf8eM`+fK`7u8X{#HA;= zz$3k_Hum4b0aF{lULmx8lSX(b9Tn6UEBtt_W83|kh`5(orgD?+LnxkZBkd{@hi@KA zS76M4Y?L|k=HY7DUl03Dwt43M^5?l@_fF}&%r;kg-jKrb> zSIP56c*{O)c<6eC9UV;wb1l`4h=m;*OYr{ywL=A! z3Y)wqhFauo=a7j6lqPZvR4zmYkIheZh%J1$pt?El-aYPdlcTo}i#wy*ZHbk(w78~B zXHgCLW!YTmudT(-oh7|V+s4P*nouU9s+_QW<<{S<5K3tB?C|GKu*M?riY0kjS=FU# zGHYC2@yLmle|{ru_NJpYCjy6YbIqRF!OHF@QT34;KXvF?`hPsqS-Dd0Aku;3J%w_l zHQOgy^40HsV%{ehwQ3kA)*2zX6c1#V@|F(Yx62*Pv8)K_M844Wn`5)i`!MKMCq6QRZha+uibT^^~_)22jL{~ ziASiV3=Z2MhxezB(K?E##&tB`J^r)R!?RxmA#toyoJ1sV&Mq6Ru0#OTM`|BW8UT{x z{YJ>^m+PY{Z~wiL*D(E{rgnCP#DMEi?AEs4Aj#+7iX~hepGJkveJo=*@A>arh8gM7 z&Nv2IhduCrE_-WqH54|v^}0$-PBKnu`d4||72+%dymvXa z$}d+L|J#eu1Qu33Toxd-`2FoWDabo)glD?UQ%NI{OMw7L3W}b?>qpAQbT{(`EvXgU zapHj6`*{BqS_(jy$B(>RPM9Nk$Q>3ztWWk$71_ zVo9qhErklJmOoT@o}{8wqdfLZ9!qPD!?ZNXh!52}Y^+vQm7Z+1ijUiGy$(%xHxfKJ z-I#5zJoV;x(a08HMJqtdpc@f;6gfw8F(3fx=(gAUTN#vCou5h;RsQk)I<3vMmOb6w zl~i%*?oj5jv3JiySi%hD`<^37FB;2f z|7B-pI~Ii~G;yF+hbtBrre+3mxZRW4rj)5$eFm~#+&*%L-nlNJKh|z;ik&;z?#lv} zGD&rPcWx1P5zNZs#Ck;jLjRCNC8Y}GoaO2^V60lhub zCrn&k3)B|yS*B^lL{6;wI0rcYu;HJs>aLD)iOb0Zw>nK{Hggp#@8r{9!I3*T<-tV2_kSYJ}vUO*JsgsUftK{?M0=upV#+>!|K%+D=v1RIN z1qFo{MNS>XGw<)zC$z=(*J(?KJ(aFdMgSV7P7@ZVPmP0|;b(o%rZyZnK+XbrOr&=b z>AK1`Bk`!!Qu%F0*`bvE3BZc5D5cF!*D}Is8?(-PtWX(0GzjR;W-LxJnBfH;L(K$E z)}t=u0u}E%5!Z3weV~(&H}F>0V@7h?laCVqNnV{n@~Au3z*e>qiMN=ZEi)yqb{|o| zSmXWm^=n;Uq^LXBDD+mIUuAJjg4uLs5I8jgKv}GOZuF@Y%LJLs%;WV*X9BdNJx}W| zn!ALok`DiQxx!{4Br%ail`QN8uw9uKN>oUUc6y>+sr!I{v9YnHFtumC;e2g~&!~yU zOyhgeJAvsoME!%73)cXG1#gN_-a#ibzselYHNmsp+Zln7COaIv<%WhM6DphTArs;imdaB|{rBw(`C!0+pmBLs_)Xt#OKpb%M)JeUlpZEMcAUHNgK zF#@d;3&6c|Ag3a{sNH89;C#%Ecl<_XJR0kr)lkKYQtNBw@lzIaLr#pR^>HyV{JtX( zJlDHElX;P@6Mq`<3;4&xF|n=;1+5@1K{qV~bm00pMh%skmhpJS~sL}O? zH};1S9_{-3=M`c2`TY-kJ`!$V*XyY=@xrU9Ndu}E3uFBCy1)>7|C z?LVP1AaEI*nuIB=Z6_r=IyKlg?P+7aaM#{Kv2EM75mHN3@(f3D)j;IBj=bG%-}D1c zWiUGSDjel5giCw(aC3yp#IZpDPihA1e3z_wGgQ>kKIoo{Ufg-~i5LqoVkI^DKu|0M zy%kyu+EaQ7!x}*amlFwoqfa=sap=AP4`t!rZH_tUBHf0XWtXeYeP}}H+`y~pm-|gm zAt6~myDY<|^bY!9D4D{f^!-W0JJk7)U38WmcnY^pOP2*)?wPY_FgZY{Dih2mNq`L0H3cCpdxw4DKuTYAeauc;*5IFY&(4O;?z^i zg~((1r(bO8(!07#Ja?pgHa&E)S9P)E?^*_`hNktaOyiyUh%jY;P)fRGIBr2eCpuMg zDE+)hx7O9lcMji*q>$lGv;(bBhRPAI94F*l$rFU-aoDYkuTB)01A}+}S}tx4Q1bKn z8tEgMM40K7+SaEBAw#?0V^gns8U5__v8~|;D=b8L(_F(fFX$w$;6UIlx(UZ_dZA1n zESsNb339Db5Stu9=i9OU!`$J9V7R9O$9E4K6`N17NG)Z2_-zt_&H*YD=fh<|`%qVHP%eaU z(w-ZzT-3@0&W&;|UzizC>#a$Y^Ij+i^HeuswocA5AtMn4W#8+2HdbJ~;zQ#Ab-j=& zs7ywsIGGx9GWC8y4m5Gk#}^4Z}a1bp|p z+UjbpmpPWI&W|@<*qjY?UWtZo_v|E{@h68kEYOL3H@@Q9%QBlRulG3#Kk%KDIxI7R z9JEqtDBrN!>%w}M1J{#nYz5p=w|5e?BSt-_6_ZH0&db)}n$wEP?8=qu!KI=C#ZGg@ z;10R@Cvj2_&>P!k&U_pYL+566)63N0t>sS^wfG0p6gNv)R8I}7?!WzGor`KW54%F7 zBVy0Y&4;-N%Sh_e>Y9L^CC5i3j{G)T6+z`o#&=?H3Co<``s|m>53I@2h!8qoz4Ow6 z&8KRU67&9b7i?INhLCsa-!j;dc&c=Dmx=j(c4cSAA@;IS6Ct<1e+oMJG?rhnvF@&@ zSq^WIWX3~(G$5K+dr_(M2b-w;wz@Z(2Z4Fu^U}5DD|Xt}TO--87+14o5_n5AT=D(Q z=K-gWyQ1)fl%?dvs9An$tZ0Yd*(qVOPDksiNexuFEK}Q<& z?CufSbMx>mgSuXdLgE{O*DJkZXl$*%fyxzxH0dz$r8&}8s?k3!Ob?cR)zdg>J#_B5 ztSZ7d{1S(^o+$(E$3iaq0X17sap8rAFcTb&WvZF>U&MwG@I&QDJLddrL*|;C@|j=V zunYZDpdf0km>mZ503U%Bz)|O~-g_!lT8Y?+bF&9XmO=G$OD$AKlXfqKvAA$4(fq?= zv^RR_{T%YE4e&nKiWwXjxD^c}(dTFlEZ#J=I24(Wk94T!<>ifr%>#dljuv$wwRtVh zPu@V{Sej%AeB%Y~;Rf0fO_nO4{uBb3JBCG&05G(s(GvO53-8p4W!-+bEHLov@7Gp} zCHoP*R4rEs%7XgSWrD7TRA#+^K~j(nZNuWFvW@z8f{eNR&V>VmTh57`Y=PdLW%rr8pz*)*xCUao06+j%I#nq?QKX z%SyJ}`Qp)e?yh#gav2ACKt#3| zFbi^Uv^!O@majMj`@^F|LlImS6|@7nq>Qsmg=HVh0Xa>1x;sjoRj>cIzJW>p=}U#j zK;MP}_U=Xzjpk5?n%owJrwGc;^++?s>spgkeD|a5M1}jK+A}rnLWuq&_|L5(t)!$x z`R2LMWMjpG>~iG~5BIS;>~bEYAYwIHAbZUEyyELC7>h(NUGH^3nPQ_0O5c9w0rD94 z`T^jI5ac|C{w8e58Vy1B!`H`VUwO1Rtdg#nbE$J><2)ys9yT>Y=?}I^^4?+{oVNR& zPmBd1wO%u$Ge~SdprE!wd|gj#$w#eT6*C|&>gY99xpqP+nwsj0s4qO4-u!ZU1R)?< zcj2dd_wIE8%!LuP*gu7b6##J{?$&yk(_n%~2;MbWy)7pc9mN*GZG9sQ(>;x+?ql)Q zQFd==$4SYcNzBs~O9nhqq0TPV8h;svV{|)uXLA{-Q6_Vh<$C0&6WfjRQZwt(1U0?? zhes~_Rl4lMKI&%sn~>5c2~b?J7IuHB@7#3!f~ep+MYq!?Ax1llfylbBgF1>6;Duvd zcApf(QVaR?UAJN$D42bR2oB-*)OJU-tn$tnB6tq+Gt#9D6udK-P6641slS95F6J}9 z6sd<_c{ajWoVDVQ@EmNC@tGV>bQ?sR>pe?UD%$HT1gh@wlP6CaE6_{wPyZ}f@d`Xi zs#3cYAXd9qSC|zmB9MRsFR-buc4p)oEI+~Dx&@?|I7g9_4@;uX-6m)xQkOL^ZK{&cY17 z^s7w^s2@bs)}w=xI_NuskC-7G3#h_Wv%$^CNg$_nPZ5jPy!gt(S|MDw|4wCaKz{2 z$&;c(mWB4l;x-2N6O_a=9X<*T6Cw2dy4{-vOZ(H~g9R76qx8aeJ2Y<=)NWLm1dk@w za3NX5pu<-n|Dj4$ZNblfd>5o9a%uch|I#TUzY(rgU?l}dDi#9{CMhcrNG65|tUhTT zR^xpy*kCj^QSY4Ek^a$?NQ&XM(p6Kxhl$z_H5YI|J(5vWIB1SN+2u3u^K&?A!$|vs zuHU{93IyY_IRKpBvAe(gL~0=^q6ncNRC^}C6?*CMronmqE?^zRl+(@Sw*P|mHTP*_ zvWPjxY3xX~&PenysXiC_s0l%6Ma5Zh0x`<3v=D2BqM+Bj2)GyRvgKrNw4tuJM`)Wz zizJe3at;ISjd;l3!lP)f{7`1EACvKDb8n2czU8r9@+JuO&5~P8K)W{w7=gqO2Z?mI z$(i~cYD4RC&n09)80JMJKX0@|k(Hn6yAhJuku!nwb`vaI`GTJ6e122z3i&r@(J-s$ zc180w-Wk6G1Xs7<=J$(8l!B+hF>1_T-Z7H4T*L=8{rYuu5_D3rAt$uS)YwScrOuqG z1HDRGC^I}c+_x6HqGG#)M&MvKD=kuG0NDIZ(3i5{1> zlW6*|wXz(QqIS=0rRBnCoELDgHQtAfr0X7aet74pXMu*g3ZngTqtWTyQpFp3zBx^D zL#@iB8Pp6JNjqvTvMUy5R-@*iyV^uzG_evy)8i9{+=OfxEp54numB-K`UKezCYq@@ zTR-~uw?uoZd3d~c33l$PVydq)kB&7%T6?U=k+$x>n!&j=82;J^6(YkS@i0^LhDL;L z$)ThXT`7;q+a0+iv_DyTKkcVVg(j;ZoP5w+uuZy*=o-U;=%(Ys4j}+cci>Xn%T2C(o zi--_Q5N*A*WWiAj&tQ!O+Mj!0(<|aoAh#uf*@^Cg^u)-`*;(O3{4w+o=Ygyu0r69i zW4dtg!*GB!AO;!=O@gt@C^G_$dClu{*yL0!DB3c>Y!E!MHMIe4rqf&jba8o?twD3c z6F2m0HH}4}2ah5BtT|zLAN^cZ-Y+?K3z64T=UYsEn-%gM)E`{^WRDvU&kM9lsY*Q= z+XC(f7=Z`S)lLWuMD-9IT12O5HQZVfpP+Q=+2z#veu=#^SzC0jZXQ}hEehtB6I4Gd z3c7I~0!*@tO(!+y#0cgOUQ4h=5(_>td4?zV^}t`TI8Dal6*3!M$tCfrgD8#)@Y*2q z(YxYWbD=|gtaf#R!$5RCT45>j3>1U-!s!)yk!@Z| zsD;iaF%yrgtDC#KP)oGd7Hwe4nBE6INS8c{*o>TTvOCY{_tG)s*H0gqjFAYq-#*sA zeS;J5LUr!*#0&%ew?j~-nbqP~NW>UQ27=4&Ouu)aY;&V8fysn;O%&D6lHF0S&};xP^O2lb$Mzn3$K;kSJ9Jy&!Q#R!u{+Tn zM_qyIhz{45&3M!Vu9|dKE#n_m1Lmft`ffyMDQrV3&$46}HDg`59v?!`={6H^NOMnw zdaxh$BWn1oD-stD6943dMzSj{&LY^UV!lwVp-I5wjNXyG27DCX&D59YXXT3C1bWJ~ zPW6$j=kYaU*MBT+J{f0T$1B#!INwDX&@y0Jw4*7##t;cRjHGD6S;7k0$zCG#PnokT@c7G;_72Y2m>E6DS_fRaktRFPy7R^{sVao5lCT zl)&F;?rCo3{_2P(%hIPmO@6Y~Ra-O;Twawh7z~axgH70kLw64rT0~M$LXBh>z>!TI zJI=yKoTP1o8W;zE894QgCsy>AJF<}FQ|CqX`z}kIy1l--nTUR1>|k`)EI<^o0YuepM{9Iy&-S&GC@P;c1QhFnwJ$^ zPjMs>)VzYj%hsKJbO(sD**aZOFHJ%*I51RvmnkT<+;>Lkl60;_f7f^?vL0#=N#OKz z=aZD^Kmu2GkzWjNuU4C+LR6#ZMNz(Ioy?G~$%;k)1ZZBG6_<1c&ZgE>F?R;&^tWnF@(eKXk;kh@6lh;x@P zRqkz#d}|i(jUT?`G6=Q?2Br+XWFexB5&oirBpdr7&$Wrve^S5m^V;aiIC z=xBhC%|jh89hvF{=;%}Dk!TLinyAA#Z3)4b#LUdfqbM&b3PB)E3h=^t?Ts~=q0X1Qm2QDCsGbaHor zb)l={6X)#b+tK)IDoo&@$2`7@lo|xI%K}vt6)p$v6uI<9lQTW$S?X#gZ%(7!%B+9YBr=x8IEZQbU&GC6c-9dCl|M2!+Xu* z*N~gRC3AE8NX6W!_Ij7c8)Z&bN2hi{tPTSCwiVecW}-k_+qNPd`~1n6_weYpSBriY zVBMMVA#O*(l*%35t>xtjI zkCXAqb`hx>Z=|Vk6nqXT(79ueKHkp6Lz3Qo0}L2Du3`>^JNV1e)w}JFm4OOi6F&t} z*q{c8+@0iFGFN4J$h~tol5XC+fR<(RkhEpFAMqE?@2=PgY+ESw;N&2;{<;I3dDUkw z?vCt2rp-yq=CM0xQ^P>Eicbo&CH8!7zWq5e> z4#2JEEXbMJ{A?l*juoFde%)oO1L12V8&N@Dgi45cYdIV%$IAV@bFD7!x#dycfu`1^ zGsCAM$^r`H0n<$Ca6n%~y4f4{-l_^)2b<)RNlo^i1Hz4Wo{Lp<%z*HbJ5m@> zo0B!5A9udXVulP}EYph?xNBRR>R6UwFz`JgbopAN3PYl!@|+&kH3R z3utR9_GmsVBb zjCX;V4noD9kEene#x`Q4?t76;2*M zZL3|d5-}>A7(4aXg%x_*>+3chab_Z@>UkHzPB@u#-nF&#W2-k?`9llRoE-q(LU-QU z-AZZCb`cvF^3SJzjaH-!Mu!OM!1|{ke?n#oDWVm?$*xkW ziN#WF(J%EFO0|a0zZh5OPPT!|N}i5&xzsE=4A@I1*wstFnzvL_Bcp{N15lXFQeBXN zr8FwFtLJH41hvv^7%s}2>`n$p2I!zIoBGek2HH^rAy$`5Y>2}^YO~H6^bKL4srk3= zzuko(G3_o3*!HR>0m)lid+xCM_t9Y_)YvYPGl+K)$Vgjz{q7<1=s^d{uk-<988xx$ zgxqsvQL!OmR4%ZZ(#-0R#n+M9MlE^Ua>{ntE&^XzgM~~>cwXpWdcVd&bMlO+ziPbh zt!xHy&CTTs)`m*qPLid@abNW|kFP=Uug(R_!MDu=xGe$eK#JiO!CENVFy~2;>C-AJ z%2sQX_H~2wrxSdKET{PUErPzN?Dct7v>MT+0|KWq?gMII%L{%za%r^r*zHFtZiDc# zq03(9RHi>DpQ?Ae?MrmT16C{xxG!*az!5Z+LCRsF>I4h+f?GIPvS|p0#k|+kMCK8n zZ^^*ZDMQg>)RhSKEpgDU7nWp-ReG46T&Gx@kK=Rr8XLI(P zY-rVjy^9k?72tNT{Fg?*T$PdFqLtg_G?ZW$GD2Qq6xl6;O5GQKPC?C852s*b8S1IT zxXeLmjDY-?@tD2y6}ghu?Y2Ll=r?W5wicrsZ1IL?$y$@MG3mq*G zb`r7vrk}nMG;s zjkok4bN2Y%0yUr&MPEHh@9p<60QL;WFO@+z6utBSc&-5%*kyEaoh3bI+kAHrw^q@LP&rt3=-nop=Gp{D5VMvg#GiQ9%b)g&sSJ z3dgf^)W@4a9MWV_W`Wc!;WFs(o2vsm45^t|+vqNv1ZVKalTZW5W)+ z^!*zX84J@g&{y}>n&PyL55C^nqabyJZN39Az$xL(qi;1?WN{=nomT4Od2C+ODUZMo zgKQ(y;6@8y$t?#jUu?JxYR;{0()+JJzQ2Lkw>1xW-g1<)rl)EW)}V*mYVeGhPVtO{ z$cZsK&5qji7THF#C>QEpLeTHZkekp2N{kdYe(z)hStO-0sk` zU5CD**?Iv`k7I^HDF$2w)YOFxD#Bbq4(2IpbHbKeHDMV(%%HRO;}1gj6Ru{1wmDn#7i?TWM;FsW$HLdIQ5^eSu; zWe;=7JVa4Q6;kDi{#F9P@Q-irQ4qT#svTRI%Mx0Lx?o<%rZRQX`LqP_TEu*4vW8LT z$P#CTlYBn-$}?nkPD>Ayb=C0;f&xGY+dQ60GGos?Xf`OrUeX8v6Ag$ke7@T~%Ft6Q z_tdnDCAt~pb{TrLRJ&(W!w7`j^LK0(Rx_i-{K^Pxv6+)FQ%9)I7S8S4ksDgtwiyps> zZjDL0>b~A%*tlqBIzjk+yGAKT*lfLki#Vh_ntK*}KT#txK+mZ%q{q2rlnXQFWG3)Z zXpcUe8Z!g9BkgYj+K}>6Rnk;nqT%PNjjpSMmADU*WnC`1E%Mn%=1ol(kv*vi!bKMf z__HB9q@m`~%_Sh7--)}?$UzG7I$5Be3+$!r=7Cc{wzWDoGAN7VzD)3dG6iFym@%{gv^@I^BIQx@s(aCH_wKR!QDODX9Ujz**w7peSA87gu!Eo74t_Q2AO2mWJ>T1c8E$wP6Jclz~|?XJeM?tI4T0iD@W!lKvz1HWY$B6^mr%f#z`m# z`$iUY-o6Ccn_b))OnQW42ZW~^HnmC4&~unD1h1uFCjTRj$13taf=S%{^>?d(fM7qr z1)cQ>BZ6R~cn(crU|K9GTFGkxfRy9W1(LNQ`y$z~=xCG7K@WurP195)1dxykYxSPb zBNABI^aBbG4u$_7colmZUN%qzo|R+`r{-i$U~69rGXj+1(iHPX^^I+(19wg zJ{F(EF|5xP#Qkb2A?S&jIHA^~-B04fDuAAu6A`Sg0BM}~z2k05 zSwaMIF1cW^@t3iG+Pjue3{LKW7dCD+^z&+K5)Ei-Cxma2jO38U(r)%M=~Ut%dsU}g z>x6Y8u@ekaI83a2dSt*!gymYZ1SEzbZSj#i>d>FOX?HMKNCT4OsY!D4gEGU0Eu3lU z^x(*w%Vfi%=^t&QVEaeO3*#B&RT&$USkFU4KZqrzV7@VA>F0*KEcxghki15OO*GSw zGA0c?f$Iq_XegAZ&oMH+Po%+{KY!;Qe=ZuIhg8WT4@i3E)?kMoMI&;?V8gl~Q)_!a z!`vXZ`Dt&I9hQzh%MALRjwPs zJLs9dZ3e-ymgI30t6Xo`$k}-FRo;!&VTs@c**F|&qDM(1g=LmT6!R-LX<+zQS5bRYL2Fggmc&ZqawGYf?Cw22=aVFKjp&vQ=v%R1t8eCNsA#7SW zbo6VP;YElX7J{pbW@3K!;6bSmCP|!(E#$${eijCG(U20;;~J?M#@gG2UGPwBoSaqL zmW$>fQpsHPdd$$OEh_L>Fv2Us8J=d%$m*6S{LeoiurztoV<| zVHhy`{`=djcbz^I^x%SaS3u!rj;R>{jD=t_9y~_*j*>zNt0*YiE>qOzF zDBO<(TYuG(8O-06!1El86rx6SCcS$0o zRvJ}n`ofFb;Fdue@}V5zj~95SCAD`jtwqDm<#3hup?UfC)Z=vWc#%L$hB2$02NMz> z%J8yNFwp$a;WwsZhJz#;X^OhY5#^jaD~kv7o(3Ozd%pjns!w@TI`5 za6ZWrp?ST;K zG!W=CI!ty28oeaz0-2`Lw)4^W%;s4=9kPcXz0TcVfUu)MC?b|=K$RG^XYq2<Gz*}M z#UN?;*1OoKsFlR$Q8q`CUqdz+Dq0{_tU+q(BZ)L&NQv{@@Om}DaLU0B)Ei)w-zu)* zc~e7<5wqaT+efQml_w;!MK5VRXq<2Y0G}o-$w*7DV}+^c5FBG#D^AS>x|bu{N}g`= zl4fuVU^A@Aa!LLcYUrr#0b$QaL?Jd#4eMr&(i#E`{UgmxMlVGhk{t$S&yU2N*7w}` za=j>$fp99Q61PF4o@;iV>Q0iT;aYs85E{!uB3Og13jqbFfjE7tIJ5(B+Kq#y1eJD^ zRi9P@MPD2!mWOtb5E61G z5}iQejLE`KD{mU1e2J!6G@k49rx(DM7N)i8j?frU<{Fz_qGJ%tc`+qxuH~9QF z`1OZ>E_3+wCuhIYFV9@bRcC|OcvdfjAInFz49)wm7rVBd+-~G*Yn`Z)O}~N*0PNMN ziOy)lxjPLQyXGVc(WkGyTbhcic14~1e}2#Z{SiVx-`>D^jkEvrEnT+p>FfW$t8vyK z7jsoNG2}rCCHOz&&zqTsR@~0$rrwo1(JUS0v6G3spFgW;s-LkK3FX8M(F_=kN?@o) zpS*Td)|$|>Sb=?$7#P>$6m;t60s~HHzLkDjc{5L<2E%18oxBy8vL= z+-q%voJkasni>)C-W-tW`BDqcR4-is zfKo+M;vr;Qk}Bzr>_gv-0VI)j{4)`i3l(sgzrZMYUt^Bt9GrgVB^)CH=}s0iAe)jQ z`l+Lt$MjRpKCk{``(>?Pa4j%svYHI_wjpwunB-i>0E)Ergv>A%VoEhc#O7j@BR|XX z5Q^J&w(QKr&^o#-B`^@^Px`*ckV&WA!tGs2md2Sej+^#}27b9*e#8CVTjr(&LFqS~ZlxO(LD>Kf^&E-aJ6V<(#j79#h2XNu;PV znvXQec658V#3DS9G+#XidM!=`2hBnziP2CM_=ia8*^d%wDzwy@1dh{lpcEy@K-4+` zx3PK3fJ0_#GMuXDi%x^fyM@Ol)gek(Q5@hVsRibjEFuAkF{#aEOh2`>>VAA_X>cj# zMmYS+EH%C40TX8-H&#;P+Di1IAknx6+dn`34kOM?>V}o%Z<2vr6c~sr8k)Cwnsp?Q z!TpI?@_M{g^$`BbnA!>FXEH2O8r9SYuRA{Y*g!M#E)l)}-yB*216O`gc4qz`U(s~Y zD#ap5pX8HI$jAX}Uf?>8TPno(vKRm@A$cqH8$f_al2)W};g zKi_}0;U0}R7kU#-K^{I45ePOnVE=wVF3WFq;|c;u0m&ITsh)x~=3#|ED;e5ZBY&f^fsDZi=1&Pm0=8I1p@MR7ixsA&V5o$Ivb6}VgPN)G}0iy>gzyxh?K63 zB=E&_wTGnh=Pdg8JEWMi4$U-&21B271($7O60EmL=wYTa#M~>{gLwJSgiUW-*j7RS?sXjiN(V4O`2M zUEKZJe_`QW8QI`e+=g2f`keupYj#ewc&}TB&S3b0IADwbHU3xg!sOBP6Z$F{>L_P@ z4%((s!n=jWWLIbfJz6rvSQtwt_q7$%4=TfGek}}3OP(vCo4$c4E*Zj*IgROt#5}Dc z>#OomWV`zuBK4W!SN;g!*d`O6>EsS@Wddj*^kx-v%0{(r@zuq}q95yBFw4+Q)G0p& zH^j4fZ?t8a5tt$gBAR>h%e8W?XMUzEr1E6H8rp_2ztbm313&|q&-~}|ESQ_^IS;H! zGrYv5Q-tyGFc7nJcb3G_XLwgq<44B@Av!O;8jneG>PwT#(WMq0o1SYxf5cHX>W%73 z!<`Zq>4Fn0JLodv`@`2?!Qn~jfFc`}VQz4KJS`1;ONnZ4cenj$ljWj?h)9p?a7P7W z4!*yzz(Dd>q5(|$WBb_5kgqM;8k)q<&-r{P_P(Y`O8Y3Vt>JW^#U_yyY$6*>uYl4! zD(CnVPaTR-*=oZZ$QzoCzsu%Fn@cyhu|Xqp$j9M#-4EsD;t` zDMtNxkCG$KLW$Bn4Yj&nf85{0f>%gHq!T4eXRR2BqP`!f8wKa2q1HY7?sE?1Dm3$; z(<`ok*?+pC0x$tfjPBGK!+2?;6SxX*G~W|y;J)V_Q&Z5mom}LG&RxamJ*m;&GsvFi zgT?WqsFOVc!Wxvnvnv~BKyi)^ZnwaM5l!&bmFwp1n~4k%jKcj4*0h0S^QOsLYw$k7 z=xz6-FsFd{<;=+7`=f8XAZk4xdmCIaKjh>c;8`)W_;Dm1u2yGk$GbJcN*?w!)+izZ zl5jl?6_)x*7QYb^#8_xz%K{XF`q84#jm_TIWY@4aBve5Rh1y)sfkG(ebkAXI6LgZN z_3aQ(A~f7R_`QyyWO^Nhd25wVyBBieZ*AIgmD@^02S#ggNl35_$m*9 zHHmr5$&{J!wiUHO;-G9@U#@E_P$y;xYLF;HO8YM`W=C#2I);}(BvF?jT|Om39#gvI zaFj0}deKvTtQXCB?p{-Y;3o+}9c5?CU;b?oRRnT#gH7p~Ab$%@Yqd_$tnSEz3Y<}* zasA*$CMJt60pUn=fNFfxKQCEVk+D0V_H%l>d(wbT=E@{s0do3_bTX9EXxtTa^W_Fv z8iLL+kDkKHR@Ymgvgf+^AneBSwe1?&un=_H0>wR0Yj{01O~4Bb=!}yy?v#3EAfBrC zjxJ(DYa!bsHHG07y2PW3Y(>SmudfpZj9|)Fo^vz;LwDlPc!N%MJ$OF4s*Y~I2q%39 zJXW;QQD9~AV?$pV4n%mqyMn%(V!oigR2wnyW0zVAPAff&&3k!RBaE% z366W6-Bls^b;MLh&3OL=aZ^C{qvJ6z$9&;S9nIIieR0nf%DdtrEpiPQvmsFmY=wm0 zB7|MxAT_IdW2hIiFp7U;1^$bTv&lr2vjU)}ed26Ae-TpqBvqIFm5Y1+W)IX>V5E3~ zAB62elU5;i#bjSHqz9TT<{qC5xC1i88`onheU3pc)7`K|Jv=%RsVx;m?5Ua>2-_|U z5=dOar_%T75^UqP>Jn@Txi>bAF7RqbU(qg4!H#1;1>fNw02BXwr6&?jC=r!I1JS1t zbhz0?j;)b>$yrm*L3_nc83S~w^YU_=z z0_D!S__ygd`9QnOvmEAKTI` z{-Y&)33M|?$q>~gx`Izx27W=ZQ#^B^(bhu>G{37?ii5i}30GqNv#{?tQT__bcf#x# zQWH!acK0|J+~|eU^`J_pa0;gzXar|8pZv)CdhWB?U?@UJfS0Jsp-jD_&E1c~!zCeP zOuAHIu<>ptnUF5E$+A;Z19hK&cv-*pqpFJEfoC!#CrChtz8?{|I3&QR7SHHj19$Cg zMQd9NjDrGL9qqL?B&w5zxJU(MX-V&ou$L(hAy)`^{fD)^q756>-m8Pi#oo=-r(J(X zaS|VDbm)nP;0Wd#E?My(K^+~-=Pr$(PbZ{K?X$AK=*Kydc$9~r{oPmNJeyMbqRANx zGb&T$WDpUd(@a-ym0Oc0$=IgE9W z9cT!FrUCMqi$?%XAW5eACr0i-Kc+kcqga~gFg8Pg%6MR)wp7Vx0He5(%N+r(kKkJ> zdg4_fNbb$w#?X}syD+fnejsDgAG&_d?2pYj_lcg397pNk_vw94x7&eKy3*o^`d$Vr zx_7uOM7?rvn#d+)U=IYy)^gJG$R_wa`wHegux!=!o{*eZNsL&kV8^FBfmcfp!yJLO#d^VGu4rucNt`=xk02+kEhLzdz5n%*hyz0Sfn+}1r?767 zHkrA}t+nL23UJ*hU$H90(WMe+7CEB){dV#hfGJycnZ0-qMKrX@y$qcQwWLfm$*6Uu z4sep~fM4Atvlt=?DO>OUchI3pw>*_M-hybv3@!m!;yChJ$|1XwnxPrdhWhHtSQwgB zywVZA=+T}|V$S4WD|vY6j>6yGecq3Y)5{9gMPR~rK^&;zjE)W1EUMxN zjgHK%aKsfla7RC_s)+9>Z~S!BKV61<;07oG77<6v{z$Hla3Cu6B%thWLK6_%k`394 zNF#L<)hG`^RouidmXC{n>#dmDn|bL{iXJR$${04I5K$N}=OhFzrz9ebVY#jp=fpLn zn%fEj5$@5lAv?qv^A-eKeFL0*(!nnnkx5 zf`UO)P6XdIKe_~b))fc53)NKWZ{^P)X9o?Mo!wOmYXjZITQUSVwm(kFyc2^NHHN!X z;Bev0X`t7u-ML6s07Tv@pEgR9;&WkJTg+KXxZ#nSdPiUMw#;BT(i*`gIFe!W1x6#S zmgokx!We^}=)JQS4dvL%AkbL#UmtS|UX~EKTLid+CDr98t zeq>ngOJbm?jyaA_fwSwLZlhb7oN?PQlhY%Kh>3~ORdi%uc8TCzzm4SFYA=OLVQR;+ z{>2jVwa4AS1&3-s_uXfo9ogODjZ4Xy)XNbYM&8O?B%8n~OZGImL8;sUa8=S=9NLQN zwg)pt^g;xX)J-Po?$Jx;tPtFzT@y&N(3s{yF}jSwINVcv|3L0PbOPnt1HxZ5C4g3n z*4pU*6WuHcd2uzaf<3OLb{MQS-B}G9E)@1TnpHZOyho+Km8?MIT%?O|E;R~cM#%An z9}amJ^6<95%jgZsJxexpH8r|rE(9+sPb2f>Sfi=B$tKLMRpNDyTq_0;WVkG5W4ap$ z@!X!zJQY_V7j>Llzo76W(bV#b34B~@%yLn}_G1o_I!@;b-Pw^;Sve^#>8CIbx+C)T zpI^rB(t--_cd3(LJ^_{gR%0S1Ql)Q$a<0UH2S>OEv5>yqQ59F_U}wt%fiMKTvp}3s zXLXgstkdP$ORv&B3zg_ne!veu8zP@HY*}0MoP9AUrwrKp0tI9VbAHJ8Ciolbw)syB zeLl~oUy~7DwA-^{)|}>@h;~-Tu{RVd!p>E~=_G{i!s?P|b4|WEi}4DHPrg4j%B6)Y z)#Z;C&T)(q=(tLTD3T8xcSXapsuH~PN;GD{NR9k`zpo&Il1BDuyvPf9moC_0nk-r_ z&KQwa%frLDz7%c|%bU{#3CIFK$k5FXf=DpA3+Ue2B}_uwq|cWO5n+^fZ$1js?wqe* z>>@E>2*Ks6i$Z|O4n)Kj&BM^4hNSfL$4y)TIGndhE$3+TB;FuJ5Z8o;B$+1fjWdi= z&9OeCoBaup0sO$+k59@O&x=q&F+_hZ$P~bR8}8r!TEw8eY|+D~UMz ze_$YuDWafO%Wo>=BV0}uKtzNt+9u~*SLo-T+$#RhMdpRyQ#Qf8X^i_?``^g_QvB|! zUA5x(pZ}rOZ-S6?{`2+tU!?H-=j;FFxAdN8B{e}~!TfM7vmW!ek`~}cM3h>3hd;gS zW5CG08_67jI}9uvY+=wBfKcX54l~?$|Ib^W{m%N%e*i!JXvaCu6z}Kf`ai$jM~cZW z+Md(*;xt}eJ@S-3-s5B2;xa=S2_GZ=>s2MsF7Z5Tb=&0LuZMGvJfP?M{MFR?|Dhax z2=e*aFdazbU_3a+UZfZ`wDT!o^eL^7LLQWz2p)o3G#a5N2Qc&~+ z`({2rF^QmtZ*+gd4ac0Dd4YAOt2hJAxa~N%lP*G~%H%}`d74;?Xh93u35!$^t`ysj zD8?=O43dzXscwU95r$RdfOjkD9b{vs%kR*4#1(%0uk&y=5bndXl7C?)I6A*rMG)EK zUXw#n8(dD=O(&dv)haRCdjp}t>Em#o>+=?Pcr(UqSffRbOIws_C=wk^Wms~m zl+-3dDME#GFq@@=oElOxr_zyfQFPj2uapsb`-s#WWFRoC}^zijK}{r>zu@89kB z{d~W_f3`pNew(_k*WvkmJRgty{r}>p0 zJbF*t5s0&tv>`JJU*;))2;QKc)zGryiDM)aJ2c{t*7x}sXY+b`rElc4uV!VKPAUu(oaTr_kTt;@wCQhT=&rpWU2RF;< z0P}n4^hHK{kcH&9u@1lOWi35o_!CD%l52GjA!OcVk6Tj$Lh@($uA$d;{`z_unIpji zT#wyCQb&KFM5)IY6PW9FJ#YY#%G%R?7*1zsGkSZddMeTfrvekR>RPGA7FNihAPEde zU)0}5Kh^BDmXb)y4`9)IG;ABhfKuvo^S^vsOBXb`Rq$8Na@YoySTkV2%b)e(s0)s5 zk~;4Bv1*WNYPP|b{Y=PBclW6(F1O*r}0h91HH8e=l#v7saBZO%Ak(dT4%l(pQj41Oi5MinD>1Hs z>TE0u4b@#ycrY*P$Lj3s{Io`CtS%pCvI|V*Y>+18S(6gjdj@dSXvwIb4RtTx-yF5MhXi4 zpk`re02H7OJfMxOaZ6-x4J=Xt4|H|U775<|)h$EI4pD2+hbGPH8$h_n8Edz#-moc5 z(m-Z-pF1Pc(e=3z10Gcc4Ivd41e51 zwY2JGd2|TPQ|UhY+^HMfk+fkacHrsjok`aVmz|`)4X*D!jZIkZTh|Tuh7=U~ers<}ff1&s0l>Y-9|=OlOvu)i z^@?fbE7h6+Pb^CzosBHgx-JpAm`l!%ibtO+7XFpnVkxw&e4gUZFhZmxu>qf-jw;`} zvfh;a#TCFfea5L3mm&VzmPt)^UN4`tmw}j0HN_nq!yz1HsyEjG5ut_tl1rMmu>c8B z&v$1chvQm)s#Tm~EB@;x%iEJ$m#%L(p8%FmYGnV@%fDoD*Q@&%rp%?q`^B^I5gLyG zG!e;htU%2&flVn_qO1{osc$-)0xV(?i-A$wu$uOiAcgqKd>a2cCs!@E- zSmpowbE@47z`GJ@W{v~2JGm77tXxZ;tE(Gax%tI4XzZm_W9#g`Sl&Z%@t%qnwwXZR zp%|@;8{g-C#zOEKpQ_JG*k(gZ*sx@{Zaf8?G<3cvmQhzXeKOF%@}Ni;owxhkIg3Dw z$yGqCZ~?^Npxj^0P@m_Toxt=vyJee z92?6Lxh>YG<2MZmh3jQD#$`|M>V|mN@}hItSErA~ZUdhA>{a*JrqQ~{B^2cn^`Wq2uk*Kho7r z<%CggEs%iym0NT17cFubbKv$yQ`+Cw^uOUW$&~FC8BBm+qwCW{jxFUBXP`W`IerDj z4^7PWbc0d(=)m9+)1S1G?d8OvJ>B#c$I)NacD5>sjh{--g!Q9x$iA>%~SsE z__)*vB##p27qxCUaHOiHpj4=)6Ck5+{ey9d0e(PE>Tlc4j>x<3Yl8`XzdQH%fa+*nDnWL46Ip zteO$-l|tenaH4vt5&&FIKCPOh*0kYmfJlDbH>g&sRf74rA4nYK5RQmhgo2U7NA`51 zd~W&;5A!GC0&P)6qIuOJ0%4UnQS@7;FLB{Uqn|RH%@@RZ50HNIr$RMa2bSdn6bf4V-ZTK^&9|&)08Y@N=PXZ z`7vtAf2`cFNgVZ8(TTM%>f`{|tgqk$BW)XocQHR8*5toYbR z$srG;?0fm?>|&s>@TZpzIQwa+^c&>8lDk4>!XTD%@RUX}srE-vf?X8h|pD*=GWT`9$c!xbM! zf4vK6`TD+m%io9)=FDjhYTOWvb9WKdDH|F9db59#Ke#bYaQ<)2{(Au1>)q@9+BW>p zt*m$O%|9Cc^T+-mr}zK!)9CShRkme!U|Y!V%@yx{9QU0;@u>7m%img{x4>M*@rjB} zWM#{cHS-*e7x_jX9)HrpwnVGMpe##Mab|&5!;sHDYFKlya`yZIvT}YavlP!M#tj{X|*0+d3&4VpEv&HK&hd@UytF?BtWTu|1(K; zAfU*<|55t(&#{Nn|Kg?pfpjY6pMQO1$Q!WAhYO^BCV&2=p5hQ86!g!RN}tO=kN2O2 z^ydcQ|NOIMqz~ht;2?br{{#o=WBBKkkv@k1!Qk+Pqr0`&i>J4o?Kzed(|e=?-Jm+W z3ymAcY7ua!-y<6WIRC6WrA}v+sp&`GfB!voZBtZKd_Xm-x3soa(Q5>p181(9RG9rG zyaZMA&eG=|M@kZRZeqVKO8fDH1`VP(D-#{H7owuJ2H2y58O!9`a%YYP`(+C2dI0aw zW|L5)$oh}?(B5g3XeXE0ps?d`x%6!wt!F4{bP=bpQHE`;gpSZSx;Zwt|GQ;oRzK+& z6h4tQ!_4DJc##^Nw(iz-3-SsmuGd2-`v?|S(grHjQ)QVqpWP%#Pkk2~fU;oHc>e0u z&p8!?-6l|~ZqR-9!RvRye-9r%z_lHX3ady15JTurSrWYusCfXbD13Z!Iqp2)OjBDC~QU}TEy9ME-dCvs$GIjnOwKo+}g0=uA4IvU_H+n^Sqc^9#Q zi}|%a9S-G5bJ@p9KNnP4F{U)?{p%kc#CeZZLBt-1v{;gN~=tC)kw#@+IT?m zlun}+;ONaL_*Xxe1M?RSl>PSGZ!O&aN=|W2&v(vkbAPLD5!Ry(=6+L2nE`WlvuM5` z0H96)X*0<atCMtx$je&9Rh>070Z{?X6yc` zywkIkDU@}0G5hQGWn?th3@N)3NV^=}zA9bY9A!X3BW_+fjS_qA(4j+T+M;^iO-;~_ z9T9!j=OKh`Mk%=|xe1cGLy+!irNUeLuYbmUHL&qX$(+=#oTy^lZ~d>IYRm$Z6;(_H z%n##T)`$v6pl7A8v8ivm4F}(Vf|9R$;qlk+%gAiM`eDKCoHO68sMYp33f4Qh# zP-;Xd-%L?U+aGrQy1LX&3*5-8mssb%Fz1Lfb)yE7;b6*?Dbk*HIuj7)fvk`t-j3&2yxLoK}B^|GcwYLbSCU(2<3G>#2Zyyt1R#ry2 zLUiLkY|TwMRKHj>{H2G*_YNuR`j#$RsO42XmWAc7Sh?~;0|SF=P>?PudQn}S4y}y> zrp^DVlUjkkEz+%=PStte*yBTv)V#Rzie2f$!8#SUV{GQM6Uq0# z{Yl$v-XqP4^-|y=fv93U2N(EX& z#0q?#%yBF`rIDSN7u)8px>gKzcfnrW{x8vd`YyH$LQOj2=s19mF>MoYNj_-tn#>?W zce`M0x<_yv^6Mb7!?9ot5aXp=@)_J^iaX)JGZwS2?@h>yoL0u!8_`YH#U<-PJyLJ_ z`1q^`60KnokL{r~PXHXZ&)GjuG!xozO$u^A5gn!)ozR~`bSe=q_Kty2GwDjfiTU#saOf6I9R?iB-Sa|O z_ey+k|D8{&-BBkVptRJ<$%$b#_VMG#MBF(`Y6dJmJ4=dEKEi@hRCH-98$%}u@CxaO zrmHUO#MvdvOz8CmYAVNwerZN8%th`z#ewPH?(6N7w{mP?v{6{Ts9Zo(40jpe8gb&x znX&Y*0D_qba8#aRT)556nmkhihcOz^aRTtGRxx{XDe|Q?L&iQVe})W`#2Wyp%wb@8 zqo8|wV02D)c4XTd!6rMp>AmgG5ebYRfQCxxE{M$l6Jk2U6Qr#T)NynlX~=J-T9Lyo z3s+Uq{SpxBBgZ1>zoyxn7VQ_y-w`J_hcxXA%kVr2z&5g_ssE|T9Qz6piH&->sDXp)HiJ=+}bw$h^Fr#Mky$cnWT*a5dJDi&P! zkzG6_hFj7(iORMy@f2+#f9dEdRu10&1*JzeHvpQc6!HJ=*fCRbKNbd9Sy@?j&Oc=3 z*2DcOy1hT@$qa0KbqUf@+Z<5v*Q|4O(C|Ccqx_o9BJ6(U+wPt?q2U!i!^YIYLIFfe zOhFgth84a-go-S%>c3g9w2?9J5mB4o6Y}evoP}>BaMaRAn4@9Mi)`9lz=pRDTgab3G7=65!k;(nvE3l?IQACxi%&FVb=j+DW5|u_x{J^&!s0|I#^pyp~j5RVDZw zuT^U!)(b!6f2`=^pYnO?(ztPrA$&Veb@ zr{|su!SXx0BF81t?%fcAg%v<-rJ-D$o-m}@)dP5F^#jQ+d97N5C4caz0 zkX_Iz-+osgeAVM^kx_PzZ?4Adz>h6 z4X?zaiQ#uN=;p#6+8p)t^gN4kV_N|&C!_P+p+=}A)gC12t&q6WTeqNuUhnku^DBl3 zOxmE2yg;Q02mcIq_C#TfXH5ttAa)^wN3(j_bB_<$=vE1NBonjg?7wCp78Om0#tFRA z7)gz!Cv?hZpXKb1hD%>_erVtItTuKtvo(y}+!Xx)>o%z`W4|G}KMq-WRhBrbN(6(w zyQzKksixvt`ugl2GhfinO~_%ioDv(eD+dHNExN9uxzVqSzJ1d4B3E!w$5kVB4cEn) zB~8k4zR43Y2a`0H7&-^eK{~2M^;>#+I#-0$B?%H777t50fN`6UA7YhZ7qV{6n$f5$ z=$^L-T@qwp;4m0K5ffW&R$f`X6%UPc7ENQ&=W3`Gl3NmnW0~4cO~{h%|0u>(yRl%0 zTzj4m*1dO17fX`!8t=lxizcef5oq;a1&IC-xDZ?I#AH07W~gEemL0B;>)&x+$)9Xj zR#e0uk*9VXmtL^D0i}%`n}_K>mtmh@yK5R;*XWoTxl>Y~+NTPqH0vsntSnj@7jYn* z2b%S5ANczERzZJTTGdVUt)wSU!l9$~&N}@I_{e>mun}Ld0~4n~Lz@mg{y1(Pzpy$o zA^FCRoja@6R&<;4xMFUTTvCsQYai-W@Ni8pIMWFh8YyMi09-e#5v<|R)`1-d9)b!w zj}SnAMvlv%CoQ}1r>LryR+ZqHNS6ze+C8+sz=^x+LOgmWOK3ZQwLh6s*i*Wm9w$zm zc;&PXdx>pX)6r(YUZpBXQlcIunvB7|plWIc;$$cEyJIm-wXDGny7%#P#h@+4j-9eNH@`lYQTU5pUzpQgA=2SQK^OqWs!tJo~pQCHhU z4tU$Xsw5mRG8YHh?kBJ_3VhV7qMbXT zD*^}e2n(}!Z(}1pj6j^rBVCDXitPX0v<1pxDQf+HmXVQt6DhTQ)sW%W_hwLxy_UvWJ{JTAuKJ;Iw9#o(rgoY zcHMtuPejIlclBX$A2^-XJ4uAZ^3SB(iOsXDppZ~`(;|J|4$NLsI&-#pjD$beBgYEY$BUKO zKU;~}O+F|_3W~d4uP**uPk$NsqHb1%&~JH`Vf-?Ujx(XTij=##NS`WfGAF&_jsUhb z>!7YozLou|IQ|04fMyuGr;<#m1Ul#!3JlGOeFH4gn+g)E{{!W6ahb64>@$3BwG^$I ze&^4hN6m1hDe_4qCV}Wf&1VB4r^w0^=F||ssoGt4LyxCUBmf|XQDqu@^UXJGQB!@VBEiXqO>~?zPHsh%Hw{=edaxF4*|H^LKGfGK5GQBS zqzRfgPL|4qR&+cyWvGBmlhi_%Wzxus1vX)gjL<#*p9!A~VYPT#Ve9igPI8-Uc(`+I z0?NM{g{C%i5M5XUOZn3S4A1kd z=p_V#{;VGRFvjindf-J>K)VhDF5+CBl+2yk$?zAZv!iVe`ZIpvT#L$w3rFOfsz5XS zHM;Pkiv{ZBd2+2l5b2eF;oP}%7NKMN7xvTS;O>;Pl;6=2Oc0+2CRv@`d8pMDnoIyo za6v=HtgjVvc$e0Ayy^PL9uaC&VnJxqo3Hwv@iF#du(cL`RetpCCcj8?UJB9!Xm--l z(lXQPjJBH#z1|8+YA{oC%WEW#BfSr)F>wFb^Z~}Ge9@SRkrUSq&aHTD18V>XTIbMf|XsIq41p+D{OeK11D>r=Se-PYHDiO)Fv0) z=?6doxea!QtgPG&H~kH-lI!iOuZiL7Tvb}p=kmOju$>Z#K0vnkvM!M&TM7%i8X z*uB9svZ3~*FCKd~RElP!*ya8O`g89TheG+&%}ZfUz-}1$<7rQ_aiPH+z`M!HVRe#E z05Ob&amMaKF~>UqhKMW_hM8(!GNv2)jn)oaLPx=ISTV7WbR>TmOGeRP!KWm+o|H)eh{Q)S3)ivUbwe^nVj1(Z2ESNALDB?Z}pvi zE8I|w_^M7O$S*;=vUPWITu@FD?`8v60%PZ_5EJQhCYnWl8k0uA{Q3)WKQG#rf`B67)H!f#K z9m`)ZDf%_-m=cMuGf}1d;H+U7c>?wL>Z;bkBPIlEoWV5NKzkLpUcTz);>5(n*RU(% zD?6(+$_0#H=b3lp6H=kIHjf8eRj@gLeR!O(x+y31k4tKtM{~8*nPPCBadO7Z(*vGK zT27|kMNf2*!n0rXJ_pDZRa}(PE>bC?tigUZ#>iNN=)zvht|9bds{lxRX1{&E7$;Gj zExPJr9!nL_{n(NVV`Xe1+>d%C+Q3aa*w}3 zi6SZt{P9P04+L z6sAB-kznZh6uDMm6k?c0$>gbAs7ORe3)84yjLa)@q=x5Ws2`ynnP0I)>?u*}>k5_Z39t`^CDD?aO(D*v z{*4ch4Cu(Bb_x$t&bqhGtP0beBTwm~<8cP(PGRPR%k3LrFkZ_rsHg$yCAzhA<54Vu z99K6N=fWo6c>Ah(6Oj-`Hwyx$BXEvx|U+c^ax<`P_qvJ9-&llY$%!k(1sk zh<%)|QPFUV#uRKUgq&wgD_#?)>8-pl4`xb9mEzYq4fl;g!_i%&-PmEm>`QiU1==~- z5OOakx+l~Wo|5Ykz{Ze$~Q;7f`@j_)y%jdmdu5*3LJ2oJTH;)G!S+qIe?Vo34 zjvL7kn6Z_{mB8Ge#nghQEqsMsB&wi`g#ZZWS5A^0l!*ifKk#f}mphm?i;z@sy;0^S z!j#B_AsJ3IY@3NeES}oPsbOHlO>Vt}G9UmZ5$7Hs=|%T>c=LE#_eCXRvct+M_u&J< zwB-cloSI>Cvq={slsKM{;RVICazbvlH9&kaJ3HH#k>&x{HFO||if|yl_pebA2hbvY z64clOlPg=5uZ88;a=@6BFN9=(;vdd;a)Qxf2IUHuE?qJgUuUfcQI$dn+vC`!SeOJ} zOC$P#x=C znk@;QVPjZ0GQ-u~y-2qSBsN}rvGpSu+=`dfV5&w$L+0Y^sC_r&P@+`Y!9kUO^3SjS z=AWyBs04%y$O-2@3>HbW7S?Vqu?uv5P6jc<@M$(bz1O$7TRs)^Ub;9-@(QOLvlR`U z+i_{VBw&!IG^Hyqi9w|oWsWWBD(*w>ZwO9lrY)!QAx^|+Y|4W|!QKVUZDxtK7$H

    y1Ihg)1j4u_M1j?HpqXX}kO;+THGFo;TzOd@6kxz9ATo!f#`;LRP*{tKA zYl)#Q;M`Kz=e6wagD+0_0pk;2v4JdTJi;=_f1EHjXqz4rPt7ExXVw>Xg7q2=8#Kw8 z8S`*NvG6td&(yfNs518KUX>ArAKLOnF6X}AFR8gt^$?d<7^0L$VRRRn z7k#F2Wt8X9!HF8q7cZWqba-;06&P;wRt^lr;ob4F+ZjpG=T`88VQHt1AY$sq=33;c zff}g*SlT=HASsYCnlx*agXd{n0(@ymsdD5^ql*K~uPl+|vwPRCi_Z<0TX0KEM?fqU zR?a?uYXM;K9g*cZ7*werOI$FRGhb5Lg)}SNLKw0FmtM-AsFAw>Ewf`dc*_7;wUiR( zBqR3SLJ5Y&+5Q#uB4^FwF=aa^HG%YO*Voi)j z#qsoLp7j#znDbaT);+RqjG`iRdh)D(k*L@{_+i-Ck63SA3jGG{?K~A|*Obu%(8-@( z5V0N$&c{fd*CAsy?MVX34=d_L+U4QDnnMl6<~N(!-NlcNR%n^0-h;+w@yZ@2r!x0P z&&I@Kd-Fiie)eaZ;kj?-9Xa#(K;^rQt7DMj5T;6iE6;fjB!m=1TBdU|P{o4U>U=_i zCWi)cE)6Hkel%^(@>(m{`S{S5Oj?U06XF_mf~8O<{-fy(Ydjt@*(uu-TZAga-hRm228D+wg86GUzde<#?j0 za1ouJ@6tW#36cb;79UwP<)bH5I%o^OpmDe~dmV;cas(^+ZoBI-SQyPe==OY^(vjR$ zP*=8tHi}e=P`#%RJrDE9xac|E>`mM(7=WcZ6fYEqV<~1J1e*aUF*CY=Cw607#ROUh zBO@{-n2Wv5VH#R{y4_6RQ5_KRtL zzwE?IsZ<<$lFOeHbKBelIa;m?s^i#`@nbdYMloH6W}zTk5X`rK&h)&08OrE=_eS^L zclV(5d$o;Gh|+BU2BSXuC<6_)6Codu=~Mwe(>iO_mDP0Rb{2{k^cL=TIIxj!n{d;b zk5#XLEJdz$m)5#~zx}cCwdX7kj3_&aNnI82qF@A*U9sok*(I0Si@OF0;1#dBEf4pL z(ce9m4m?{cQj!(bD^@+QI=P@b<^7uel3GR~=$)}r3%JAdtwOS;c)t5u;uL{G8YV|z7;4mK&ZnA9MlmWxS=5wuO~eS$2RlY8t0$Ns8JM$Z{Pz^8!ZN`GJ9AMFGcckECoTO`PZHZNDOY)i)OiYhis zX$jn$@NWNA$Y?xzKA;0S<|uQVH&Cn#{_2T7%PzowQRW*T39d@!{$u9b>`{d-!5RMk*GPNg0y4dghAUBcoCD{R8f8Lf2!pe4B`In8+d1A=P>rj z^AKmR1A-%>aSachu?JFq0~F0trjQCWoISx{DWp?FA-Zm=7d!#=(StWWx3aX9jtk^G z9UQjnvG)Q~K>Fa-MEoH_@x^S#2BJA9WUz)F9(^fEGy88|@4(4b74lC*ISw{~R<>AR z(%H-KO&U$+5?i9yO#fB82-lZRXG+pAyXu}y#h9e=G``pN{G$KkyxuNaFfxIj6#>}P z=?Fr;95So&McZl>lls^eZRyzA;ye#pKKs%ny{&hJV4Ds9a+uC`HC0t#x}3mPo!Pkg z3OD`L^P`h*WmQPhpSt6}%vgWdn<+P1849{K34VR^y&d)90piEV*gj$-6Ij00-!)Xb z^mh5SJ}Z7EP92@Ci#m(gAOzA<%CdHoK)$^>Tw&L94;nqNA;8<)PbN7o>?X}XkTz8_ zJ*7mI;zH&D$oQTn?b#_cod9!0BP^A(xqa#GF782MCTVaRqpw3lQ-B~sY}d29x?hZj z8|pl>hZei`c7cHMry(8+F4?f)9ZOJ3032`xFH46?GI{rpD`#RT(h1ocCqof^_^ZCs zMu`y3;!wcaHLX%9D5oW}NRB8Ur1FYbPvFClKYphp6XjR{7bbH^|B&Y4c|a z39|D;{w4OV8g9NNDgGsB>_gZP z26)Tu_Y*-}P_>&5%s#ZO1V2DYZ?STX;LWR#A`1ZmYL>@z&-2|7HK*i(F{8?s*~;VE zKPCt<0lmxQ1c1QERh<0cB?Yz2Q;XGVM!-iFlSo4367x_6r3&eo?dR-(Bq87e;WX(9 zUUG>_R&m?#+hUM`?e&6iCNo?e7p116l0}WbLu&a?aXg%ES__HmvN?I zZRZ7{8nTmqqsnbTXOK)NVPd@h$sx-&J)nC_@7^EapMSej+;V#ytRDZ1FXa@nm`>z6 z1RC*D8j9wDgPV#pBSYIIS@_|_%y>`?L=~eD=hWOg9&;tEXn3law`Mg8qIOyhez>n7 z)>AkX(p=(;62`NALuP86bz5tjK)9vlcK$rV&+YlG|B1vNXFxP14F{NJKUpiRbSA+w*#RInH`KdpMVqspMq1K*L7a&_#8! zvZx{`GV%6~3cF_Z(zmI&t%i1rocN*D`?uRSK5Upd6e(X@ix~^q?od{iD(bO*zckyX zV_j;NwVI?PUz}x)P;?Z%Qv89?-dCH%Z94^FV%%>E`<_pbr5BJPh!~>vlfSGeh%w$$ zvF<@%dPiXD&H3LoAzTqng6bk+gsvd;>)qM4d|;!jEYNO$y!Q+?Ci76aWV|`{aMs!1 z%KGBXn>!oTt!V582=W0#cob$ru}hMN zKRVM0&v8s0@(_8f96v-xoj)f=Ml}WWzap83=f+*%FOK2i2;|oXsb)ckiQw;tTd>zk z@E^S2w1N3Rm^1zlx2=Ee3IZoa#aeRGv+JZqZ1f0|XM(unHE?hvCk0_QH?+4!eA}!Y z9X&#LHC_>)%-*hca#>isZGnH0>e{svk9{OeUzJu=zTKgg@vFw6!s8$OsQgKsAaVcl z7StkwyOz;c>Up=(t@V$c#>)ghrg>Fq_IAmm0FTvb_JW5LK2dlbG9P~Yf2=v& zE&x~3_Fr5uxUt%Jh3*=GW=%%(i284D01R*c@WvnE7F2W%S!jis;3;ZOdbh0pDJZZw zTX6Sp)P7!X`=%uyc7VFf{>RWz>k)GVk5lvN5d0d)(n#5XH*3_vAMJM=?*>*tHH9%? z$t8{~i%xw{@cZK?4QzD&&05lCP2m`RWadk_Ax4A=E5&h|e3S)kz^d-DjLYm`=^B^| zD0fWlu3Nc{@dz!r^tBY!p=W0lGVW;2+dcOis@bihHBYcI&;6wJ^?(jaXSjatQay`K zX`^T(OESVS(7jwfw7eWy1G|^oYl{WXXV}kgjZ9c{*GhYT^7PPhtJ5u4lOZCg_N0jH z5imXXy|ep&T5sX+lH%K~n$dMFo;^jU$FzTAz4{~hNdw<6TkiDD3MILmz@AS%R;n9i zUwUjgYi+c;hHCzUL07&~bv4-FWTrlH#z=z&3uN_=7<{ew&cRmkg^I7r%HDq9H>>Q9 zc3w@D(}bO&^*T)*&g0W0R;?~)h9)f95PHPwbk;AgOw2p>1pZKx?f&a~*4~(Hb3Rur z2(>Q`d}}%4h;f#&qjA1Enx!T(561I)vG?yOrBFON{4(41TQ~h` z@$vDwabMS}|2DmXpW1%-l;av8r`zD07kJ{tT{MY*xcHdex{U0Zz;N$S=a}PLBWT4;eR(cK1YEj5#N9Y1LcAoK(;73Il zC;dGgjsIEiJ1-#!(x$&6W^lObmY|n=1$GoN5g#`VXhh*|PbFuOjo|Z^8|{&2V(5d0 z`lv*+xF;(B4`8)VPax*0Tp zVbJBHm$xV_K89hM5|2Cf3j^I#6NTtt_^qVj3D%fb!v16furd1jrstAx%^sd%cH`tZ z!AsK18CAX4>()$EDG89|O;%L-_P6S=nukx_!Mzj;W@%;zZpi8ayLIBP(ItYp~fuq_HKJ6PV)MTNr&FMywy zm0hv?i2J!-x<^7qia`sguqln96wH@HX-V+WUD@Daa?c;_ghA+!;uvyLkT%~Nxo6Dw z)Y{8uIC)I&r`DEGSy`|o{(u?`VOJ25ybXCVr6dHdDPXes^_TCPVRf!WM-xDizDkSv zQvF%e(J`YFZW(5!v4TA1$j!KbX?br`v6Ozl0kLG?>Xo`939t7bpU&Pny&JducbOW4nS~is-cT*em}1 z*E<&|nG$2Nqe*=iT>>gEcDAf4125G`Cd|KEaJlNEc)5UuuvAx+A}h8}ssIfi(Y6m6 zJHeYVVF@RH{4oRBmsQo6{x6kL_%`XigxEFL6eDi{y)oP3urVwNlZ@g@X6>t8YMm7U zSNK&0<$@J=?&JHnz1%=Pm_`H9F|jDc`4Lrusx(r2Ar?TK9f=W9hv3lL{3RX;L4+s^ zl+zK{Z$J}dE<*aAP%;ZZ(3Q%a$CR+i@S<*)hy|CXu7rDvopvz=$+=2&^s9j#&~Bub z%!)!kdgKkbjoJzOJ0Bi-u`VqcmxPZpjE;D#13K3Y(IFT}DcVHuVN(a_5&w%j;lu_z zjLV39TmmM;WZ$x5N{`k3(Vm)EGV3dNbVRcA{lEAo@FC`+tN}HF#h*X7a>VU5L(gkw ze-K9hpGOu#kzm&#cm@w2;_BStiJqN+(+GNNPm?VTsG3%7>4hXHW`~668)BE@;$uF2 zh<5TRzkjdNB>aF6s__BfkL5zM7rx|_8eo50og6^T%pA~yBPi&y6TNL9sJ)rcM_UbU ziStbd;oY+`uvzd)3Q>6DuCQ)ESy4>76TQdZkZT%$%|Vc z3ue4CV!}AAdw=hHPcf?;L}+-Os_;d9i5C^%t%*+fxB)LQLeDs`@GO*eQ0rJag))48 z2%~`j77UCEP!xRAL)k9Hu^s?{Gf9V@J1|^2-NnYJE05xIn1t9j4H8^;%W@ua?@@zH zog{R`3tC}rKb>=uoo|UVxY?w24M;_QrNt_B)nYnHII;)dx%!)Se>s#?#02s>z7Sme zFun4#&&$!C?ITuNOurAh;G1me9VBo00SJKDHETT-_8n7_>lkZY{cC&6rfhp%l&kBZ zCW*}vp2_!?A~32!Sr6oqUaGTV*f7lk^|KhBw5*d|!167*Q(wKHV_594R8?1={Xmk; z2X7PbHb*=W(orIE@-1MDnK1-;m(6QfeW{I7b-oWyfr?sHq8k6rkQ5LuRPdrmpJ!LS zPH|Ou-I+dYLDc!_V(3 z2YYW?&oAwg9&Lv#JAoPI|H(-X3)CCACykCCI+E9v@g1`4dQqWbR)Wv;m61gam=YAL+j#dN?@igH{gp zGwL6piP{Fan>78PO%xs!R`EDYpIrJ18->V^Wq{yi?TIXZr;G|a%YfyPE*LJ1|Gk1C zQg^8>q!0$7em3~t8^tvo-_DZ~l;b=CwSxv}h(l#wp;w?8Jr6~j8!`ZPTXUdOcVasc zeoMeR%{97`xfr&Xbiam=nu$8CR`!hlqyL3@S%%+ytAm3Nk15@68!bW%VPAPnX82WX zH_jlbf}qNcqUHjNwXps9Q-78YgySRXKi(hx`MJZNKGSkctp2gHydy;(oz(qr17_LvGwskNKp`?s>*!WDQY6}Ykx?{$mN%{88;b|`@^8rF;P3~{KfrIW)2 z9Sr^>Swq!~XohXb9w=vQASxUqLI;@+7uS?2PkWm7QJF(aEsnLtS)Ov<0}(eJ64AKy zw*Ix1cDbO`ON6{|4B)myE5!6tlF)wFd14O(@lMAnqgN$_H z)G~+}BCdPjv?SBR=|QS?NUVH#$fD)N)LsC|=`E~E*kbnzhuXae7TR-9U94k&1mqz1 z!!-b0fu~v*?M~COs1mH%-aUr>E5hp?aUMQP^A`UM`wLe$5b2~ z&>z}9A-D!qG^$x3MSl43-Go&(xUZ~;hO2jm^TH_wGLfc#*NZR)X!)hy{=MZDF*U1A zX6qTQRjO-D-C=-r<&g*SNk_}QwU;PFWS7-6%=bat-}yf!wND{JvVq8vmKoG=`PZBU zXKfmd^`cP{3D16W5%GJ&Hw=UtpFD&*X&fI(I6~%hj&xFJ=-?p&-D0Q-+gcuk%$q}l zLbhN~0YU(rwutKkwA;f?18~UD1;j8K2OOGOEH9kq?DnV78`W!ki;N7DjB zlq8^biZ-kDf2aip((=RIc+796D4*qHF=UoJtZJ z@vOi!qD#H(n80L!=`qbofzj(A|MC|U7+wcs90N)}ZDJ|nbZ6L{4FysU6mT!1c9m(< z{LrUz7sGa>CkD1C9YSWEr68MIoBd}QaPkjz9w3I?cQpQJRkkbNXzeSrl`D^#fQrLv zDt}x;@%`>pfAlDQPIH%N4OSPTzW%$sD#8-lNJ$N|fk=;E-c`N}Wt3HP^7LE3o#&iv zij>Z{Q=a975#kvXNfBJ`NtJX+)+2t6$>Rtcs`EI6VD{}I;Jop6`0?n%vau1#Njpt}I&*oS<@c+~ENAZ=4<7wwk8}Mmtce`^Jl$ zuJ(2#o8goe*X3*}39UKtyX9I8zu>pgan7$BJ$Vyp!$$YQ$UF?#y4jiQnlh3OM~3d8 zOwJ5TK!4)=NYm1{I`E#J;FY0$rotx{oM3^)EWFN~K0(c<=loyTeZ0oX`ipjr zYZze2rbBw?TquVSlXGTSfiF64z!OUKc^}jyTV0Gm?8n>TXpd@sKQKBvYLO9+xT)-9 zu^0D82|ahSBlT~EhjPpzM}UzaxKNIELw@Qj_(RPNdX-8x30VGv@EmZI@%A(W95m>( zC z0yjn-v}GCTpnJ87&On9^fJoT?pwK3P6=$3sQ!;;{by|z&^AxAwlcvXbdn+wQ4rJ8+ ztPmXs^QEd3pa!$*DA2_0568%U9IVv3R3#Pf)VSRuiNoD)ftF$JN0 zqP`Tp;YewVik-*`uVFwK6|YH<<4R4R2rp5O_Hl>05oKIX=<+i}b!O*&@D__zO;`_A z!WJtx{3g5)seuP4x$W?$k!29n#MwbGUGa({rFjth9r7!od;;faw2K59gbf8q;$q(@ zLVL`t-YIiMsMC7JFc^%M^nXQma;i1oi1BO->$1Sz`jX4#IVrVZ%bjVKgAhN^arRJM3gS?;f_1fPt08gj>dKdZ| zG?6AnT<<$m(PL}c028yBb{8m7=vnJr1R+3H5Bgf;?* ze9aAx*n#ya?ScnLFsqI=Ae!uym`G~X&chbmjShMHBoB6X!%}NrB(Z5j;3E;&GvSfA z9zy>yBARukbmeTzK&3S2whQbXov;WuYP*sugqAZ7 zoqfbi*AbHZFgIq?6DXM#fP8+<*7DrFsqC)CGrx<>K#qgyZGfzpSanGHDkQ#y3@KVq zu?ILH@;vk$#l?f67bUn@Urfi#01~CLt`$uL(h-O>j16zX@*yE**VbPz$;8Q_zTJ0R zX|dnv8H&89FOMlHD*l=D9ipLJW`1uGE1umD*~isU=7RmGIX$JbQlUwk*#ty$H#*Dy zlvTfG*x^BkFLPsze1s^x8Hg=_56V~A9K{MFHmPMv#{JG}T6wVs146ahK9_smqdh(} z9o5EtJeFz@g%a#xYCbpUQ^_bfIp*|I z-{|)z%Z=~EgVcp6O`!Mkx+RY*JYI&Vil zKrER+(P0%j5d4u;6)q9n#q(5>6*}EW-{bA*i}|xsAe7|0XgZ*rOqnHVzbYyv_4S!R z(p;QIzRY8JT5@!nx3oh5+mCG+OgrJm5EGwB8VF9z+oUs`AhJ#8M9_(zJx&Y^Xibro zl1k_>#?CO^B=GCjTd4fpYA`nFfhY@{PCGtd+-n98X$su2`(Nxm0TVkUirt^N3zksM5#h7tdxl}*6Sbs zR1lY+&K{EBxr}#Sw*Ag9=FptUsam)A*y??F%{#GBGzaIu@zH!M>pI~#xmsELBalgY z?O}EtMXb94kyUX<6JSiU;HOqV%@68&Vk;s2x9VFV_}(W$NnfoCz?-ZZ*mi5>1iOIF zibD<6Keg}R=6B1sHwGeD7jv+n)=$1PJDK`v4az%qUcx)NW!@L9|O=wZ+%n6f5Q z>X)Sbv$j!wbYUbkTTbj^!Z~JxQEh;z5(@STZt8_^()$Uay!g0nRMDgBhFt|X)UE-5 z$b=AP3>C!3Jp>mrY%$UX5FW?t1vRKl_~rk5h}?gV<^iuem%?K*3X}w5ecc8rS-7lS zNI}UD1HnSHgt#V=Aok#JJ2JV=KMVLA`>5`^AmXRph*>aDs-y-0ypUi_Gcc&z=g(f= zGCZsw>X7+*;hu@xClXUYQ`cc+f;S)QktZy%_7)tnfAWO4P^?y>@jYwEO=S>-f@QH> z_k}}RZ(R0tUuq!Ce|Axh5WKf1#=OhvbaJODtKZ0yeDxenk^7xA#25qcWC&sgVjrQNyA- zQCa4ILl|q2%35rji3E3JG6gj3*OLL{d*aMf(b+b7M+K5%8#r++vll$c@Gx?u4sJN^ z}ZD{d0o^am*_s3l9TX zMW6{w!Qr0@Ue`G-T#N^72R0Ni`TqBd>BV#dKU@ns0i;wFUmbw8o%01%nar8vu5WdC zdsFy}D@gC9JqswIykGSNFL5jPp9Fp!YUEV51epd`LcA}T&%R&F0KxU9efnZ!n)c#j zouEz?m};l@GI3vO-Mx~mMy4=GEt(J@S9${@~Um17z3MmvDC4l1P34B+h;x>j&v zvbVRlx>1#`q{~=pRe)q=5&VvkEwp>X-dP_>9tV7gbTUbI)Vm2@+l%8#BXIFl_BjHg z=14v1Gz*+bIFa(z6%|47jA3b(Kjsaw!R%OUX&k_PoluKUl!B%Scwl~;vhFlGe^Vvg zIvcXL{#9ZH8Qi0 zV9ha2zJ{&z<*|#3WmX>R#Vm11XtO;mbCuv(U%iIUH$FQ*v9k5x#~rktXvVCG89xXR zc$uqnkvE*XVFc8sA$;AyNY*g52 zn2Qz;pQ|+ZxZt9fk4J=H-=L`j22lVr75v@mMdgFapf5gU!p_`(-=*b%-mxey+`!r% zXC!zpdM4mUd%fNselH>i-V^`+g}RuU9dC#7<+s9#f?M&jBb|*RN7|PoMMVipPR=j*re(*fRWhh)gDpJLQdEzL0@vEU=IQYk4V!ulst1lgN6b| zHaL=Oc}xi`?Z+K0d1H0rfwO4My`P*UFa?k~BL%X~42?VKS+4MN>oB2w>*|(0GaGd-5VuQ{vR|_|NRdm{Y|?Z3^M&p&-BH|5_;Q9?XS<4zo9OG zs%6?VpL+lTHaw6MS9b3LG$K zH@90>D0ucq#{S=Y$oAn;QPbd;J{nOdae^R!oTN4+!R!v~!K7k&Btr%$#%&OaywO6< z+`Wp&J1Rg_jc23AM8P^c_f}T+$TDjXf(QcI>p8kPfinV3++tZKVzRqD=N??# zq5uF5uh*>0mGfZOGRg@Vwu62ZoE}fFBMqoP=#tyt+7gLl?zL?}D0EBgT$zY+*~dG4 zNc!B^#MyPE3tnE!8ssQESp7cVdrXOd(IWI{M~Lz|(44&+!iV@T z8miqKpV};}{4>75$n#7R$x}G^@Dc?No1>@yQJ`efA zg^L%JKz4ovHHhP-utZOs%skhwy|YcQ_#R&6hobV2?zEinRR!`Qv}7k_xuK6GgW_$U zw7p$hUZHlFTN6BzkSO{(xELs%y9TH>lU)@FAPD2XXv7HtqQ<_a`i-ZCTLvi>Vp0#- zM$}FKkYWS#HYARjNVik(JEJt-O7sFDUSHqfX)l)%Od69d1SP+Wx;m3^K4v?nVWM_& zoKh7Um*fS=OU|rwV=EpY+W0!cfg)SMb!^^@WL5`2xzF+ZY-gZ97H;#=n`iDuGcAIU zpT#%;TOr(5IjUm^J|wbGSgH;)!hI}VvLw!q-Lx&;a3Z{mAgbEPlOEPkn2r#!#Emdh zWIGj4djv_V*TE?`^#O!rMVM%unjlNnh?#{h+(09e7NJ~G)lpvjAs!x-6_H88ZSXOY z^VgcPsHmu{dW=6`xy2o%qe(ffzRX3il`-2Rj7|$rAn}1MP?QBamja7)AvyxiY8Z{0 z8Le$@tM>$eNZC;EMr@s}hKmCSkQI>wmz5>@v+^bJr@bJ|gv+6!PTuY;fB`@o{+L}&wrHn9*R ztgN=6SR_*8fKb1y*G6p@Hh)xSu+m~|cDAZb!}N;ydbT@qB~3tooB;UjMOy8GiF#vPN}TXBr$ z5s`&xNdNbH<&AL=wrSbP&`J+Hq)dQ?mbF2&SioAg-g2ZDxpYkX`%rkM#ffsSg#*?x8p|GxbvQELzE)6|hr>cWw5qy>rB?i2!|7`V z1_k2D-GJ1swlF&kudNsC74;!|_I&F`FAY4NNX>Njs^7aa%pms43zT5ZZ{qb9G_EsG zbI0*|7)mPl!*c{7<(?;U9q8Whj;hd1b8krENl95}$?~AfI2L@-#VY1*EvW0Fm>JXg ztgda^v)cnBT13Aa3a+B-GaQz(vbtUOjVoz33Zi56z-M!mjq>?!2?k#SDra(=^|;xV zUa&n;?ZWD2n32_B^1{}-v_;tAK~kvL+#FXc+v`0 znt*rI7pcLKPB*VpaF@frI4~sPB(r)-N0zngRG)b5LUKG=aE2*-8#K{ekYC7%zI>yZ zWAB20{`mJp_4*G_a=0RB}d)}lKCG8Xl8yy!sat$w2fj8Kdn&{+Lg zJ*Q0|n3Gw(3i74!_ZA=95L_vE(R#fnPrl_UxUd`AZL?pihAEf+@xWri-#t~}1>g(K zMd`lsZz4oD4(=VheH$QnQzQ&lrQHVvzjxK-2{c9WP;+B*RDuRcsE@h5LsJNaI$tS& zr>p}yAD=vkYSGw%(oGYdu9zJ<&CEuyQkO51rsm^d%|$N^HN`%+y?_dFwD+)k9S?iX zVXeAwc9ei7`V^A>$%@p>2R63Ri#IZ7-Dyhp00RZ*3t^jFI7r@`d=QP8|jkfoardl9N&yz`C0)#S2+p>tN7pWC-8uTa#Y*LQ=qc>h5+DVMryxf%Ti z*{ez>1T5NlWK3*JC4EIU%(=u3vf3GNB9VqCzy@@yqE9tEfIevl8&8vh5W!GJuY*z2 zkmm9GfrW4_N_{BJ_>cDR5*LOhzuDaT1QgRSvwESHrf^US%ER5s`E)0q2i3U)H2 zLY1>_i{9k5>U!U8tY*t4=jPGwj<2E<$F3aoS>7Cg1Y{9?32UG(n*Ybcg>uSaXdyuy z0DX5XzqkYQ8-v>`o}!I?95ROLn(|GM9Fl;`B!Z-g)XV;QCe%n133l@oFR-`?0~<{M zE|h=z(F<0iZoDiFRHey7gEI{tY%1J4=pL4VD#xs>OfG z*9D8@%gikklEE5sdlE!B51O6>syaJTK{mij2aR#w zGEmjbb|13LnsflUeDfxC=1@vdp>LYt4(P@otkopEgX1D}D#aYiUpDVEt}Gu5^Zq^AFM z$egnU^rAg-puKhO=1b-Dp; zYPvwUm_pV|{0*0el60;XvJZy6;Pm~$0$O~9^H)$9bVi!%FJ-n_odlR2v0?0*bJ9fPv0c--%XgKhf+nF_)#80|5{N|M^l9^JGi5|S?Gh)FRtunAq@Wc z)CyI|^lAJBndt1>H{iiX#{4OMd}f_gDuaC#`O6=&#yVXanD$(_0oX-4&Uw(FGZJGe zBcb?o-JtDC;VQ`I$R`3+E8G(0Gb#$8E!u|31rV9TZ0BB0iIl6sTLY(^+|;R2JoXCS5%;1d)lHA6MAptJR+0w#B#9jwnDhPn&l@uMrwAUlQ& z!j&cU{b!O_R<%i5GBo#`isn~O*uQPfo_eXY)ueh<8v>egqzkZlv&asP*aM1pKl~YV zS8FYy8~l(GIKybf!`FfdUCcI(voom`lr&o{*n(lir_J%wMKS^Lu_W5J z*DVp6(fLRmIX7BP1f>?5T9B5AgetE-)!1d*H`T*j!o(C~iEZ(<@U12=VCVpW3Ly10 z4SnbzN9Pg()0uUviaVrIS~q#<5ilTN7InJbFc&a&6F_LsQwNq^xrUG1Sb)p^At&ZE ztf5UEun1#UNYK1_Zi%=kOc`DRzcI7o?66X!9=#8S9&%zV zo3>lJ{afkk76bB|bSzkp*_M>Eu{DWj1?Z#;2vW4WqX7rG9BF7neZEuMF;^QB5UF(0 z0&BJsy)q@QZlN;YPyk6`m_p7&Y+echz|;FsA$#+F4Q!)qU>`k3gdIu&oqsp|Rn0{? zLM6=X*q`5Fv0sVd28Zbp!-3Xl3ABR1!Na$nGGHhvogOY2oDQWC9c&NdpxgHSq51$S zp{7HG7u8Lx3Il^P5@nIo6dgrIQ;i_Bu6@oY$(9RMI}rZOpN3lFRtqbfU}Gn3Z0l7$ z}yGdx>00O3evAXc_1Hh=akUUHN_Ib`D;kx3JadL zWkEpm5qTg(cBVO@?AhwnfM$U>J8&%;XE#=f==6Y|$|W69^*Bdy%EvT&jHOnS6{Lkd zvJw+}!4-XNZ~ysQ9gG^EoV5H9SLO!HPu32^QHeM(Rt*y*cqtT|@br~-C1ST1mXx*S zU{lGHf-&vo7!=4a9G%>OR7jVs#@Vnq5pFF)eP4=bZ_$ru9B)l!8&pw=NB>e;ULp5l zuUGUph{iPBq~M6nNk}i17O!*)Ed5{Xy$4uUXWA}`DJC&72^v(y7VKDv1+Zfbpkk+n zZd62&qS!%7l8FO~Vxg$0sEE=&Q0Xc~MNm*s1jLV~AQli1kuI?Bd#S^6X3pN{zxF<7 z@9Uh&^CbW8p;*am2+0(E;pwlmDoZJs`KX zY}%{j;JVBQ4g)sRU^vL}=8qh}nSZtDNj84E|NOyQ^a{SJgyfcCiaR5d);s>u`Lw{k z%KwKd6Nm2CAEwLAV-bZp~}NRf2H?vx@uiz^j0Q(F03HQ$z1k7k-D`I8~5Z2tI|M@GCXOj6#4{8YYXOmW^{hvwsAZNkMP{~LdT!R2N=g6^`Tx2$E;+*ekq%b# z; zepJ`D)t8#jiirdLL*QF*`EQ_f(3M^1-D1&l1skmp`Yp)+!5(59!5OXDh(RBOB6BBQ zmmI~u`_N^zZxZKgP{T=KCVYF-&%+DKIXE;k$v3i;fR=GJEGXgklF5=A)Zh$>xMxmJ zIFj(~5Yo-ikw5-GNH=B^{^&i^?|60D7*ryHV32vhigRa-vQ?= z@RbuDI8vgPwtWk&7~|?O2p}@VH|G_N6|l-wopG=;fp~`2-QAoxn`aVhBgY6HR5b0I zYErOMuFdB*7nH;S@e#%VWvjP{ufu(y$B>M9qnVLw7D$JBSO=$}NwGMW)DE2;VXDb( z{t5u4k2gj3rm_F}w=>6^gTwH0!b7+kCYKAqCtYiY_zC4JoCoEDA5uCL>?yEqq^QSK2=mcKo3TZ89oBd57A-g|{I7BWPm892w+2j{TN zMUx;OOG_lSldvR1OScCY1Yi2{DAHr}tpg{FMf7E_bO)>tYRzoK30!Xtfpfz$TC+O` zHvB;7CSab5(Qjj&fD3(i%kH;W@%TVTIVP5)g8q6%3m+s6zYf~|==a!yaEK|5eru&> z0z#zl*c=+uP*-J!)ZnD$n65jK3aEWJY$K=%aA`WYitv0lULZRArJTM4<=cAEZGYs( z>(Axzm)CbN%}8ck-}U>e*G7~8N@o0`5BOS%eam+Ao|CH}r1ZvW(swFvebz{yW%yRi zVAoskP3o7Z$%>gsp^8h0MEER!%S&*c15uY4bY(u3h}*=Kyz$BPp;B2{vrC&FhE35c zaDa&1WXrBFmMi6lSMT#=!4_1C3Ps>HfMagSg)q0Si_UoYSZax@KWklL1aJD=D#Mh_ z5IMMdpe8X2ZY+EIR4v+u&y4S~jc(`z%kgmD328(g@~-cOng@$~Uvn$*LEsvJk7?Rs zvjH35H{@?}9D37F?!A6@i{zp!PMM;xDYn_zF1f#{(f zX2O%z4i*A$_KtY~=92WS=;botux{MLJFOV9)K74IC859-! z+LH<95VoA&N~!=nCd!O-yw z@!+`imVYbea^M9B{_$<^kqi52PqE@u|e_8LWTRM1V5?QAAVFBPBBXf20tFIxh zXDH2)TB)uTi7kQj?;zB7nFfO~Vk6l$qHwwaA4OMUTNwnfLJSxV{qWdG)Z0wMrgHsN zq6lk}=^~p0H`tndhHAmw`PATrFmDchXLJ>JJEe|zP5kdZ!eWFdog$r;=SY#uE*L4t7Ww-n3_B)U+KD)9JoR7~h5w8$fmar8GB?Z*yjGKc zm+n85m`k1dV~>OMnqm9X#jSF0BLU`Sd^)}Hii+c&baKuKrjdM)iEE+0=ByT1qyF22 zLwTao%x_ZgNU&+i(-t+R?mH=wp

      REJk;D|A~_2Hu60}I%9 zZ^RRlj%cET7rJ(JFqE3cO-ZuooS8f4osfYVRrY;Q4H(MKD*NegVCk4!FT`C6N?jFLmlbKe1I97pD&kPUiQ&$1h(Acrr|t-u0_h&@p#HaJh(^hIy* zTIdsVIt`wb;GGU647%)+uOFO6lrzuZ;`<^1Q?xZ&AsW}KI5Gb#O2U&Q~BQX7#$SmSM zVeHLDnb;cOq!5ZTghq-9A|Eqj`Uh|j{qe_3!z%dd{#BU?C!uNZ!0xXn)3^u)Hqa_a zm)7P%9N`!6o9J&tE~3Aw1MMy(0Zsc?z2;CH-v%|%>)iQQf#`h1XP|H{Mh!sF^4rnk z@2kW@mIerR*-%)2R>G-L^iTa;y*6^=6svz|2Joeh12*t_sOUo$9)qHaiM#|*ymX7Z zKn|r*f(Z%gzf)ifMkF%Gc0E-EHu^xm0j9` zQ7gDS5ix51MLKtrE{gNG3oi*$&sX9tN054EF8xUA*_ru&rqmM@@m!bvFva2aPI4%{ zLdWoXv=@Y%hX)ZQ;bA->GG`4-Q7Lq9XH+_|GCtYnNL^$XrrP=vwZ>_rx0!4dT@v?Q z=MWEQyM*>mW{)#GiY$mJMt9b~&Qi_*p^vxIjtaYk#++Q3(F1hhxT^?(1gO1-E2_%>QFaixOghrw zFaGfwA&?RGL^vw~$wBx#fd8Qu02Z0DpEjcgdI_|5BkGL|NY&_*^kN?aMxi0GeIL4p z@=ZI+&|9jatZGhd7F82Bt%&eXWTZQrEURN_=0j9*zg&t@8__cGU|}7w1Pz4b0p!%r zqv;f*over`Y{~80z4yRbR{2#eJ7zkeg8a@}Z>}qyjqhP#MG}p4GhTJdQZH{0@|I>+ zvqhKl_0QOPA4C}S<*d(ir!Hi7n z8!iI#BFwaa(a_dco={j0K;;RWUa`DEZ&eq*-1wIu)X>=Sxs+idc5X7=a zGcXt1e6eaGP6e-O0pvmO6cmO{p{^-B4q8RY4AC}|8b?hY7I=1;{?^k@W?|#kFks-{ z{0!RrtT4D{ZNsyd8>J>CBy11Q?!HnpdT<2b6%ELn87IIM3l4givwJaAe$5;0hv z8b`?`JhXEKw=Bf^AbUCW#w57QF21B6#Lof8MczhtTU@B204%9-1ZM2I;#m?`3cV@? zFKS5bq1nva@zG7-(m)`>5LiGVdmbp;HSqsaytcl1>foi9khbbBMO!829qg=#^jW8fAWOw-TyAg6H3Jd z5YoaxtQ($HxF}{Wfc6*;i4NR~v;uX*g6_3sJ%(2i^kZ-%XOMFunqcXMPtB;^zNLXe z2_YXONu&i%jeBR*Vp0fQU$ua7`CXHLd*lNC(CA!*!}ftFH3S{W`nukK)>JlE24Yol zUKO>MlC2gZ?bR@EwmNVT5V`>>!VS)Fpq6q<*|E&zvh5waoCDoDTQ>p)e~|uBzTW`o z_D@C<&J(5t0uBlad7iSPv-kBEctb-=)wizKpQ+Be0j~%HHrYGxQ?O4sRckJO_hpqX zsY%5guuah@6ZE_hP%zuqMR#sRY4Q`K` z7he1JlhY@^@%?eww?FLvWB2KT3HuDHpI-~xqQ8FdmMA5IMSHgBh36SBDk(GW|Es3y znBAYR&|cBQ(&1s&X9tm4t-f^nu@NKo??3d)$;@zK&ZfYehT`VmTfCDS@Mb5|r4v7? zWttA;e4`0lGbCrkp<9!GsUA6Y+&6)XM+G6}ibx1|SYC*tm(ijvC$C@ZoJVt@*5go# zRS;WPUv2WZ`DL`j+tRbzP7e1|ohKjRAVnc?0ZFrlayvzxn7edb)TJ*%vrR5meKzWUzXeaxqQ{+8?%orPUo&bY@D&ajIg6T43$w*8su^s z*LU^i2vi~O3l;b3rk4)|P!Sg7BfYX51|2JV)kg6pWaTG&sfz^9b8D85`?QwO397Br zX{oJHHuRzMhG`b$pU%yzlk;6Lg4w62Bel|a9y5W+IIuTDXsE1fC~zAiTB6301)mqg zH(5>fi&8@J#z$8e34tDsbKb_qT45Jk4eQW7&~$n^d!4@^Feh%5N%Xn3{BP1v8#W#fgNA&TGks>wq{J;4kkbv?`nXhqX-cwpk(C9b@ zym3RjrlXdQm+|nR^LY@un16F!Wn~e=wOYya#DWoucm`0wD=_LHH<=GDORActCJja5 zhkV6J2_e00EK@r$O4tLsvvc!cGJvUdH`Nz@$`d;KrW-Wk|7+1aD-dQtMe&u-CSxYjU{2&31#?8M z81D%w!E4S|Tt$Zl%pl{)OWeG5Yhg*GIl?dr4>3ZAM-z)j4g$Kcv-S1l9P;E8c+iJW zg&2Q|JFupvW~kNc*RRW=efOdo3HdB5f&;(nz91)6Z-v+P(B6cYBE6~|l&zqQ2%cAa zYt3XuMMip}{9ZUa9`|KF0CPdrg-U|uK!j=HWFTSvz*=a}+;m-D!T#@fo@f>spk{B% ziZS#V9y{_PP`)e&&#)5obkx~gn~GYAY|}G)kmF3VbX6FRev(JzZfZO zP)Al*nDdA8mzOl|)UhgY)&Eb%+naqp$_rWZ`9~gsiys#_Mjyk=5yL*3+Gp_n22tTP zY}`k}`TcUQ!<^l1g}M!iM*#EnZ?%$S@;xSMo|c=~qKhFEKtBzK*P-_tO{D!-6fnL` zi%0aK7)wc<{Js3XUH=}yZ@a zH((uBhyFihkL+~B2MybNN96&*#l!vV@iW#I-K$e(&EIi4r@-*VY18sox2lP=N7kIX zQZ?kHlAsF4-hQs+?uo^t?%UgyO?z$*6*a5(C(oYEx3{lV4HV50aW@{YVcDod^&sFB z?FP%D{6LeNMyiCPXJ5i-LDtXacQ?Wy^Y2DQhOnL!2i}jpMIZiOWp) zB|Pq^Aa#NWEeYo(PwxbY*!#VRWr1%EiuO$I`RUkXek>NP$o)5PY`6ddJ%GJZQ71k= zaKp8)xRdBLtPx!#pI{u%hK!G+TT|tu_z_Kf3TH(3PG%@y)Go2{B*eoQ(FHiG1f6V= z>-VCUR62u~oVlgy2RvbtID|6Ve3U=(It-TshAOK(bZ)x64~c$rxoj-*N3fEt^3XdQ z{hPmo^m;zTR>Kt)4=m8YRmA*j6*x-5cZeo2%4F)`FnnhZKy8`lgw}Er5EGkYqQ}tv z4)?CGYu~|IK@~>@2;e0p<+P8d6RT$Zxy5^aKZ*gUw1KdN0~}VMx@c`}y@#wY7I-vG zToR=YxEqHX^epb{sX?bH{@$B=6HY@u6@y&f+!H2gfM*JKPK$_v&VrGzRqIiF3Yvyz zs>y@h7S(`s!uIbanKN3k9Oi^AY`jgjq;m)eiaxt(^%PRVM608W3{2&)2{aRZDQVi%&`+zX0ztze7pL3lK)#0QC9;>jfYMH*kRQ7(QVF z0l*-jiqzvFqOGLgo$jk3gKA5Na#?qG*bv~EOu%EW$0rfLdo2^Zw`G2g=7e#$y?i5ELCzazl!b8z5`!!`0dOgXQ!th^ zESZ8DEL|4Vc(<^f_T8T<hBc$P6+rj2eTcS>Q2a} zDjbdP3VbdP586oL7N)wGTn5BLfsl19Z5D)WP+W}NxC|C*p?n7{7BkdCkhL&S7D;S+ zl{dTJeR*|$m3uq6dE6dX^1{PYlj~<3SfFS0Ws=gL4JOreaL|o(50rk@5X%53@ZNl7 z>sY#@=m>!LHki8o#HMC+?fcX_TVkYis{jo1oh&LHY@<*1z%rsFb&UCzL*78p#sO$((wE!3w7ukmY>?hv725}cmrZpg1QU-)UC>FKzmW7 zLO;lav>~$c)u`$@d7W|39+=5~GbhIzv!AQvkJ^<(qq-0AmoZYv(#>m&iqy!H3|CiI zM?iyb7+hNGt<{%zzrKv#VwCxiq|WI1?V!UO*SGI_fG@nBIqUSp2x;J(^un$}hcwyd z?}Ho{{FQSnD{Bv0We zHqxEL39Y~m``MQ*g${FKPyr+o6J}IHoUintwR;K7ZOyx+P^^}`!ugkk+1Q6zy|XM; zS*m4}45=9d`gz0Yw4)mUbH4@52u0;Rz#sykf|Rk{kVvAJVkaF`fE{4MjBseX!Bo`YN)7N;1{YItA6CxcpT!Y^7bc>E`!D`!@_kb;cMJ`Lbi1(zb6 zIVtmIa{v*{f7Y=C3G&18``x($^^av9g!$nUW{4^xkDH;jJ(P;TlITXM6_*Ox+peyz zDoZsjnO#oBAf0D~9@b1Ys!gI^2e$nf92gQsHMMPf){cEKsug^Y?L8yWC7W>NstZQR z$;S@gH|_MmQKwEjIywptFGx)_mM&d7-u%im90tc^wY^&$?sgD7<@VjxQP$- z1SgQ2tICpJtS2TuIS3P>S}b>bDk8CIXKk;;{8L)HdgWjlDP1`ikrd!6o<>t?SZ%*S zodHDf*lUL`IWjr;=-E>jRiJdD5V#O(c+Ln9ZG$vdmWmj95rS(cycpF*8)yCakz^Wb z`vt~_QeQ=S{oS28;wB=~_TM3Vf$8m@cs*4~6FRaqY&hn#KV9)c8fT|K9-Mj~Nhcmd zKlxgF;7wYRi=Wm)+9EA)P|e4Z!9CXuJ}xT!AHKT|)YW5b@noEwC{*xBGjNrzi7nn* zBWDn}umQF*e$z6*w^M9b2b9ZeOHFh{UD?T<_;|Aw%dtc}jvecAi2-~@8QhA!eOElB z$&`22*bYaQW$i;m4&F~zR#YVN1#rz|DliYY9uhUhba(93N1YM}7omrZF)98#DihA< z;75`|TXeqwv8LEZxE zADacYNn^N$=Hw$0we=yN;aX*e;Md}A&jxLm9fIJmEqmHH$yh#?OxDNG<+ll=`*@PL z5In=iH#?vT+{4bF5-0s;58GhCkd}E+h?ze<0p@T`9*N)yRq6n~yo{;Vd_KPWIX~Ht z!3Au#d3LHJNRFqV-pvk#+IwJorp=%n>)Q+=tWMEtZimc>R3b-aaX2S4Ad*V=X)**^ zYx7WXCG~3NsN*lwnXk=Q^T*NM@8sT<$+~wQis&geIP$I460wuB4)wy;-MAQsTo8)d z6?jAKh9g5vHtjamxz@cytQWEFF8EQ^xFc6#V9fNzYE zPRbaBQuuVS7|nXYlN5k?=&<&l3V&oM%&S!}cX)dnYP9etqqd!Tmbnexm{lHkn>)2x zI116%Ok(2KIU7KWOJ+f|Ll>w2E18H4vLU!A76|n{ zSKQ8-<1M$Axe}kTu2Vrd#96+*?hw&j^I2{n3Nxy=SHXEWvbL3qbqDJ-iQ%! z!TDVXQ%0dRyd0l#AMrG*DPaa;+Nc_O5xyFc|w?YvC)cA-i6 z+du}Wuwm*|(~8XEteq*4tQT7m1)6HDLu=@~DH1pI%e&Rqv;DdC~3HN=)m>d^qKGLqoW2Br4N1Tpk71s~?(R z@DTLx5Fo)&h#~iI$JQa4z*pS=6wU0hEleNf2}5is;VcQ#5Jp(@rBMbMl>~KPjMS~8 zaUgg9S;qa#kFl+fslEScY%A<8Hp;3YKQ2)`;DHnQ;-w7Hd*8H)#r#cdP~Zpv?}t14 zc(gctq~yGqkCoT+;rHWn{630_i_1a*D0-z~r~S$vB~P$*`o&qbc%bOCT;4YyION%@ z4PpBijL`YLsdq=5rbYKJx=TX z*mHU>26cZBnb?I(u{9jxbO`&T#sW&&g+MqUSF*AsvnV%$YR~viUs2VaH|UdE-;;QH z>Y-XEw2hWm3zwSycz9B&kq)b^7rl+mXFZy=b#R_=?>!An43h#DjG!*BK4@si7=wsk zSKJZ9s%zBo^LbdS)JhCTKWzgPVlTWGNdv_R#vuqv9ruu9?odDgW1I!5+kT^HofUqA z*YN|Dh3~cO@(q^RP7Bhp8Y)*)%`(c;AN-)Gt zIZd>MF~~NtJ`-iZ|}*WQ*UAD zhIbKepHG5#01vQhiNc=^?9Hz^*qCd*a^HrxhP>?f&@g620M{)INLy7G)nJX?eR5Uod`B z9vIHt0#1a86Q3O?(=&0(*zBefZbE!<9~P=0@|2+~eYedn_*|;ydK@wecpF)IWB~sR z-?Ulujsvgut_?ss#DMQ+y_#E$W{P8z^7hsQl;$RHJkKntjj&D%;g1*yU$k?r=y#7U zNKIl9D-2_V!OG$WKxysXaSr?Ojwo^kAU6lOz#)cFrE!BqyvfP*z8jRUqGVEF_71W{ zf=#bB0qmmiPL`@Jc!Rc^>=P;Rm#_aa!Qlx*?IEE0G2MHp#j5joSkD&VVFcjTpV2DB zL>cjKn034RQ_95t1#uMk<1bie(sQS%IQyo6HIgyFb4It;04gq!026m9UGy-A=*LyR zONvKrF9cs|tSOczTc1aFJF~2HDXUisI;!MFJN-#N>BLWK0Z{CrsN@9Z5TJ{$>=lQY z%(8t9MqtHbw@I|J^41pVr@BBMwGmNt8Pr`NL1UcZd4{$NBcch{+XRSqgO$$b%|0t8 zP0bFr7@Z133?~0sqfkH~gIgmbEvp8Kf9JAJ_@}k>kkR7K@kNknW_=mwaDL-?mImH{ zVJ5h?Tpzl2mS~O7%@4)C!ww#(Kp*O6DH6$B+1``&27JF{;F;aI0yLT3^ogPizvAmp zYx$+i{Y>mVU)(?EKd!J8U@0fgmE&af1}ScrwL(-R6)uo`l>k>(mY~)K#gZjw>j1Lu z?Mo~5J%l=-qK}&}UqlX`{7hxeqfS&7Y0T{(>e}*~=hY^_IL6piZ=xt?ZnUs|f%FIj^a2N*TtH@|3zk7VFR^z-@|(H0Y)n&K5dOcB zgWGtv;pPVPj+?K+`F8ZrlF|71S036xKn{%sCtq>U^boXaSPzJRy(-AP0D+l)v>ukv zQBi#4it;BNCaE7j|NL{yo<(XAm$y{+BVLGCNS3Y@t*TCK!VFJ z&y)DXhg) zo8jCSHnyoQs~`5W6)m%qHe~(RDDwYJ$K`+C_QU=Be^CDLpNssTi;OGq zzfw{2Us+P~0LRG$90mR^C~DAVGq`aom}De<38TpvZm2K44DJ&}(NL`$-ndIdcL%X9L>Qyr(xq;0@VF`Tg+XQph=l&@jkG z4AW20iNyT-GzQ0n;iGYMI>AOp;QCzdV$J|PFIvp8-bX6|zJs4Z7n)dkBGAmYgK+6s zn6lZWsT^F+f!~089h*g``DBWb^rAqLGHI!Nw0i48VogjQx&f?-^ZFrVeqh z@&b3U1v7&=ReDtdbc+ZQ^r9p>xxJ+BY(7g}LbQLiSumCYd|nK?>ErzV=bPVh@q!#@-pu$xC1iCeqLPIy5otGYOA3-5Yg7`jSC>A)e~rj$Ok43G$r z&*4Zo1ovf$*U*?F?(JJ`q4VF18r-%anQBnt9Fnb^GK>szoyZQp0_SW>44~BnqBr(I zC!ffmt$1GtRnwH8(dkzZs0!yi8U}zxc6`{UWn|{w`mFfj(-Rnu;Uz`>==;=01dF z_BtL-`3xYsTpLEAAYLHiwu=Bn{_w+@d^RhNWu?hi0S7BU9A%BB&6#+}U)iEszjJ=- zal0z=S%7+Cn71?>d&V^@23rQHf^(eQ?DM9RSL1Ohr6|;sNFi0?;B-0hP`Vc79^2wV z8Jm&^9w}PWYA}rYl{3@>#M@7B-wjp)Z6Kwm*dX+x_;fT-=>03&k&t*%oI|*L244%U zidEiLB$7Z%fc_VhOn^Z{^^t=Dkq zet}_TIIL2eg2C962Q)B&Zxx4-!@V6iQSOA~;}Od%t$b)9mlU5yw-ZDVPJcBq)r`HW zc0l)=APIvv0<4e0j#TcE!A%o$I>VkPjbz6O@kU5YgXn#WxZT)VfG|3-0`mQQ(C06h zpyM~tFRH3k9HJzX$z>NNxE@lAws;j`z+-L#n~+%3u(~GJ=p`X&uU5F;xH=t=SNhJ zusZfANfTO0T4?{uae|@+nw}v}>y9`>j&2gUN%b78G{Xm!0r({!`B#nz*zH`zHv`Z6 z?lGe*3;lG|-RYdtO7XaaAvZS|RQ5C(UshAtN|;VUs&rn51 z)#+thsJY}=Xu9(&A|Qm>9NnRs59%CD#!QZfSQOp` z0k>fOL4MU>4pX2gPnZG=qdcX36;GRBPe|h%PM_{-AR(7N6aQ~qg7Lpm0Q!YGm`I;LaTKe1RcM;klhfeR*ICvnC$<4BYehJAx4dtDg zU(3iLD$t0k;eZ+6@DM0a1|4?Pc2f7J(`Er_F0AhsX^dk9Lbv)z&erOJ3ADWlu~66~ zeRD;Rc1sk`fhrZSkDwX7zIAFl|8h7gD%>)cr5=yC54MI|MC_YvJW{ADwO}+1 z4;CF!wmCd1ME`hv>>*h6F`pp!U+H-M7g`to^R^#u(SHDv4_87)=6^~t@c)w~CCBL# znxN^i(V_(9#!)rIb6`38;-xNiOv=Z`qgyguDA|lgfVmZ}ulvy|?9U$X{7vQlPf-AN zL=Cj0LAvLbQngDa$ly5ZDr(H=$)fKI?q*L$k#ICKXL#d?H_RO!dKg6*<)gaQ3?qUn z&}c%mk0m-5ra2QnpxG6Uy&-_e@1a4a;Or&f5XP)(p%@Ey&T9eDPyt3|H@3R@7zWL8 zstn|yvn>xbZg}k;MxOzEA5;B*K?7!i~f287MKK zDp7^Wx)f7XfN7Xm-vL^VW@5e+Z4>&>U!pd`Y=0`SNJzWU z)X+eOsPuLl?ZG&gyjfXjq%oimwT3s45`*o69`j_=tq)R}lV^pInh1#UVobtqdY5t2 zuHaZF4|b{enH$_dg8z|v*R=_=%TwY*zJm~feW~DdfO4U-Hd=+rI(XA%64rto7o~c+ zj?z9>70h2R#8s%^Pzh8tQ#+Asaqh38_yPulP+AA=qrEW20fl}#Q7$^^8L3_=)s^Q< zf1%#;GohSdju}BCC`a~`QfQ_JI|ggjN|yvwR1Pa)TF_Nu%I-6Eu^?QQGDS8XC6s=rdzuge`G(7)Y7wBBgo#2sHv&N47VWuJ0TyE!_sV?Z*hkxIMSi=g?_HpFAFc!<4W8>lnduHhBik5 z_;M^c%`!g&wK{eXTqK67`BpdhJ%tl`!5vhhIn=Lw4)aN1bQ#;&%GQPF zRg4tDUVU-VCf2v2*B?bGdvF2G1y7!=>8v}6=VN*kwxv8L9GjNeoWK+8nsACkozM{` z+n10SG>C03JyfTDVKShQ0OKfl4>>@rPvCK!$OQ3YDaKzkyLtaF-!4o#0q0z34*K*7 z01(QsBRlxHF7r83p)D!3EV>+y71&9)>+CBa>6X=V~kYq z8C!H`amPIG(&wGW+r19}snK{`V)LAsGlVVwvbK{71lx8=5Rf?5mArX~w53p;wbDFd zCQfE>{Y`f08Re<^S%TU+*oT9gso<8j!Fi?ByyQ8DTu9sKOJi)sEPr67aiuMP@1v@( zr|08z+~41@dZz|}NpM#r2m*SaRXwr;}nCncs z%(@ytNl39XBR#Mg%(pRv&Iz1D=5~9q3yV#1j!lowo1*6GaNkA2$rOiAXd^xxl}nW; zkh_Rc8ZnK;@wMMsd?;n*)s$KO$C5uQP+u-e8iIX>zsS;-)#p^(!k^e#Ps39t}Q2oyAGLE^#^&d+0CC^m2 zGH`|Eh3FW1B{>LA&|KlS% zof{trz&=>Q5o&k@1|#cU*z1jJXllVlJg?Ipfo-)ywQ=t+C6yOmG+c& z1s9-frRdFaRx?KQ{rEH|Fwl4?| zQmPgtL~G&!un0F)=Hk7xJ&K@zp)7SZPTPPITw7@iHoU<+E-*$Q&>KyO!`2+`)?^S_ zb1N#+UQ>u2x8nAc950X=(}+*QjKp{0#Ord}$ZPE2zj|Ba;I;=9mEkL0zZ{&^xdjXZ zJUB=HH!K_LpI=e%$_W*?%RF3La3OW-0J9TjV~93f%An=wo))fS47R*^Rxnb`uuH-) z?mmH+F(qPE%d5w-vckbH!~ua}gnx24@kO3l9QPOcjL!?6koi)1f=NmPx~QrKbE137 zXj~W~SwoKu7b3f!L;B>~Bw%$H54}IOY-zE@d3MAAI_E67UeO*3+$YF3a?IqWy}->v z1wIfe>?JijICqu7Wng$+qGxXdXD3*KfgA3@Rh&jYC0e9OYi{nxFt7!l&Zso{3RoyR z22QFPH)rERys}@MJrs4mjbXNK@K{uEROQ-0&c|Z0Y#kqHcy@`y3I|+gP?5ck(>O6X zr}z;k^3y z>ezq$ebc|PFRHC&m+fXMmZLMm5`$+AHuHM#=a@<)-tz0x`0EMgR~yI zsfV5ZUc09CLSeq7s_|^R zht=gQ<&;@C-KxSdevwW{9+|Xhb0$DuG)l>o3M^cX8`5tZ6yUlHk%I;;nBMaTR=FN~ zc;04!{ps|T(r|_C^#=C~Ve}G)4=-@;q^OvR)YxRGCWb4c(Ep}|dowMxXO{(ComwpJ5QN8fOE?40g5)mta?15whh$V+bobyuL6t5|wK|mZ-w0Yq*BaW|L&0C8^@-FHA30`rZeY(-Gj4TLAUJgvN&qo2W^z;%%xz#?vsYzoeT`Iq@X4|Q4*FuQ8pCY$QLpH8C-Ie9-T$c#`U|B&Ri$qX z8eaD-&JtQX1X522ApmbSJkE8wrClX;uKvAJ;t|zUir<@teK80$9miwecy#s83QRLt zG}{e0kQUIQ#CC^)r3kdR=jSRUDxTcc_d{`rQ$^9P1ZKUPx5jq48I(=iK1B9{aJ&Tk z^xM2Ez=T4#p!KA?f1;r`R!BN14j%&rY~6`g9*zwY8g{kSFcbQ&1*=mlUuRUvS zK~*;_=WC8_LcgksEf52P-zV7lIPXFk5G+}A*-|*kLpt8?)h9or+(mln%v=W}RTtOm zH4g#?qFa-;A*gP_h*|&k7L~$< zv`t4d!g$;N?zEakH$?LGbZG2CtEE$Gvjp(>#?pz z8KNJu3}+wvceCB0}COlapj-cwBOeF&BvIMk5R6A;uMx|Sg zhFz0#jHHxVJcxoz)+SqTPf=yNfXR$wVzaZd!hIBS)wXgFC1PoOjNKp*;;3W)^>x@e zkH(?LJQVMA0%EVJjY*pjj>V=yduJp;p$D8`=R3~O&*>?S8SP?2KTgwr`7m0y8r!lEkVqfMm!3qPo7*E-Of-rEW2frZ79j7Jw3cyduJ28r$Spbh5fbT9sk9n zDgcUThYyB0jvjKy8s#Y9$uZ}=+VG1-_c)?JajD>6l2d}w!WAdOLuxwtm7L8s!&eU0)qA&sABpfP4KgrvEB!CS&5FZm7xhU1;+7l9q{ ziCRRu@Qcr_8Sx%A4aT0~$)UHf?}ar1Y|5ZDA7cJR&RbEkFdRz=x(AW2ozLVhR1z*n zT<~QhV}|f7g(H^p5>IUHyA$w;0x;j^3e;*@UuxCS<4h3oT43}nB5^VD#4dW(SOBkp zfmvasD|S}pnup*H!t$(;b7O4dP0t@Q;4@gkahxbgjNsC#>Y%<8;GsSZYMk?;&Lpvc z&^Nm21Yj_;#g&)89H;3%Jojfg%(VX6*UD)WRv_kQSw7o-4(2wv^Amg-hk@&9ylswq zqq}svWwdiPnv1+@5DUtz#M!unH4z*In0&yP$(S_-UAfo{*v08o;|ue_$!rkJ4(`TE z2a1d9l0LKXVtqy6{!9V(i1E!)?!h~erTYWG?*-u{E!XxQD4Uw%Ggw9@HOw-+e4lRZ zg7685CKP;{N57O&swIs?XG>5r%ZpW_c&`bug+a^tQqsHU8GS$BF<~q)G7}Xl1}g z?IWwSx&SrqY&d<6@6?(gRq!Pi z4ITS>6VP2?V%i|~C)lBaXR8xuMjN2&T$=*;k)x$-I@t10Ol~RfwNsJn`s4_e>AF=t zxAEU)3cc1xCXT{YO%4H6ZA?=YTMQ0~n0&1UEjNRfPMX5$BaHXbg1Tc$X}}y8CxJHu zz3Df=z7E|nDJT7b_TC;GIjs}*?DmmEy)Iitqf4hzUXYIhpGhY~+~;}??%5s=dFONm z=!fIAC?CZ!u@TapwYS@d#6yBH0agH0j*0K!mY71Zn}p1Np{a%nmAk% zha&dQt|pmb0TR$I)NfOlgU4q+LYZQai^5P10x~{B@X9EW`Y_urDy4G<>1BfOq^B|3 zwjba}PKVOhj@pgB;fr$2rrYb$RqBKt;!PoxJ#nprDup}+pva^%`1@VnN$uZyU)1jU z;_ZmJ&vVa&oILIEiR$-yXI4iQ{#vxH|Gij=rGmlu1f`Jbu#oi*330J2O}6C6p8hrU zkKP8d-!K1tLEXq5Zf-48s(<=@@w}Bi6Y8onw^Sb6kuyDK&8S1u59M6pFnll!ss2w?A2V<&PNs4IO-=_#GzEn zH#Vzy_R|i_5U=*9C}Ry@yP&;%5Ktsk&%a)N{o(D-cZPt7B8NS)hWs;HXnzFF51mZ_ z1{foVPQ>}}sK|B{VngOeeh@5WR&ooYJQo>6c*-3|*^gOta0CGvpBN5-#8!TD)w^Gs zDEmGsHFG33Dvshk*hJqUw&I*sni0zgGo@2NFDuIjoqb@IoB%8&YP_dB#>4}`)jBu= zGlt-!9ER@TXwAW2myxR)*?b(aEE*z7rly~= zi4&aW?u`p!nkOJpWQk*_=P3Zj2LkJ^*y1jwcD?IA=1%9|WAi>(gWSxOfBuBMcpVzs zxGdgFN00bxA}7cGzW?1%^|!Ex0xKd@u&ueQZB1U$8CSlH`!UsRrN^e#Z<}vLR&vNKHI+aY`;$utM70k6HKSoJ+)*uBBZ*{>fzU6*TxR^Ct{+%= z;u+{GZ#L1WdB&qWVv~ge_7~>ot^nSUIez*x&XG39+$Qd1=s$+VL3m$?lPQQq(9tO1 zosmdA;e<*J4U)@*qrEb=0dS$1M;Ix>$b4e6kku|fWi0c^O7uMWvhZP&l;QTB*yG9} z8dh#jKq=3JqYC`V8?Gupr`tRN{7oGfVtYJytx-P@5SMoLt0a6F`XMF{xeTQglkJO9 zW`)mMv>kGk3RG=-ffwC?4ccg|N>nx}70@G5uUe4amBysSdl%qDUi}scNM5}12S+SS zj6mbR@t$UXPDLH6AEr0fus1F0rkn3+%F5mYf_AVjtAbJ7sE;RN4maCV+Hr)esb@Xh z11qiG_42}6FSyO&pbQvJfy6bOvXq^mj!7RgIg>|2bwo10E29zd|JoODhKa&>gehbWP|F4<_$wy z8Ih#7wBD#7#jT40;|niiL2*zfzisA5gqdtNg{DhB#~GO=>1c?DE7Bn)q$MY$quh>q z(vGTlEZFAby>dR6`Ny$`51zW~CCLqDlvY%5{DGG@J#Ac)OjbCUEQymU5kW_A$FDpU zHPV2g2zL@(U(k~Tl!qC;)r!Ao`8C-*iic8PhTSc^OaTwdkI5PI{162v^s&$aCG6@~ zkFFAH*FZ*{b{mkU!Tn7){LCkT{Dp=F0dc14s6;lhqJR61KWc0BO^PW8G`eEUCC=^;_WMsyC*Ii|}H{J!NWbC;1 zjfV%RVUnNHNzmO;G9Ub=1b;j*;+An}JzBxNKrzprad~oJ{@SUvWKR)kNr{}PQF0|(SG5NjKXc!tmxs|$x~`)tL@W3c0;vT^`ec_nW$o~yuq zL&X68#Olr2-(R;>*31N#GdX9}cQU~rYD4S+zvghAu+#QQ2CAnVbO9GhKndOe>F7cf zqU#G>6Xn)q^zx(|z0#K;e>pe_7{Hn*A)$oA@5=llSU_Yq3M=KU+08n zf8jb`XC%eF2b++*-4C)mqjBqdoPYdYHNykx*F8kD1fK=;6J)Tcez3)v zOk7jY05H=8N2ueSR`y20CHO{;do5a9;EYsdzIo*I%-0lk#Nf%z@XCnX+3z)`4f?FF z@pU^=Xu^k8*9eAxc(@jly{GPPqyckX{FJ?8-)&Wp- zgsxUpezewHnL4l1s zH!O>ri=pX8O(IZ~vQPH5Xg&P;#DSS8Z*2-95~b50LG z!y_*#0m}5Ha+J_Nz59?amyeW#cjx-B!^i}*BrJ1|HRU;h__n1&&jN#*G`Ht{Xb9I*2$tcmp_sh;)OihVM%8b&rJZD&L%R-Ra`59QEM z;Kn0skc;cH{%t=KTUp##TezjW#=KAfde4PQL$gi>y$1X@rtnEEeRq_&7J#0lnuXb* z!RDnzPw|X!kqL!66Z_$W*nF#!xkLmf>N&3=wi-v@a6YjKjRhtZi0EECeTpmSG?K&A zky2}(nLtdODcb&dj_69xnRdbipos#gfnmGE9{+YK~|a+X2MZ602yPZ-v2})$J%+GR>alST&jFA6vhw zQktc3aGwX|IH&;=&LrX13(>Wxzy%#HXo*$Wz^U0tqr;7Q3JlCr7s#=|$%Z>VZ8}oO z4o}|R!id@gCbm+18Qevc0=yZKx9f3+?M3MAXl8d)UdDk!6k!rwCCF_3xQWthE}w%< zz)z1m_G{&vFgwcz;3-CNb1p5zY?Yc_KvWoyty!Fn6zY@@BnQ6!eog3sJz(!27KR_l zRRgTSDWHba*H)%BWm;{{Nz1fSu~7K&Rp7{lI<=EbY%gfO#UyJl(0b=?HxTZEhLW0Y zU?zKMoD_!X;clKu#nraswxbjz>Q1NpcGoNE*YlBioc5 zO+NjoNm^rsFJ2B@bkU1(2&TMZb!+LtHIkmihI968)BL1GR+eBTr?NqR8gQ>3Q(L@g z#jh%m1mp&Ojj^MF*-ue`s4Hb!RdC{eBNn2JjAcy1>Y5SKGo z2LNWuJ%Md3vEV3hcgWyRpE({Fn35U&yP*cQ-QoX@z4wf&^4z*Wv7cy+v0y{7AxaSh z1Vs@{RFJBIG-)bL1VlhUq#C25Qj{VfEefb~=^Zo*TY5mHY??}!qVx`TuAP&k-*?Bj zcib`VuX|5^oRbkjHt+j9tIRd$TvD?x{V8D;t96&t2PXS^(CL>0+*A~QJ6!>?r9U;P z1kh6ypzB!NhG!T3b;2)BBHjAv6ppzk3M+J0JWqqk6Yp5&6vnMGMWd{h;7I!>*1@=c z+o!1)Ac}U&Fecphs^SsV3ian~{>}?(^A6%fM1nY)QLK+cI_WzCTdR`P>SBgP>U%Xh z7*=J8`{_6%al}SU<9$Av|G6Xv@!b+=IBi#EX%A5D&@hC?4CJ;)JCextnFb=um+Lu? z8y1hD$56Y{3;s)eW)9ukD=4~<PjXD!)etp^Y%EH8RPPb+KSkm{ zf{1k7gf=#>^IG8Q<~Ed`^q)ZRyqt>0$Xp~dRnBaXDI!wTf4b~%2%SJoNP;kR093UD zCq*wq*pA-{TL6`|fy2%qB=YoXFV67quoa*xo~!^>s`4gBFgoc`W41Kl#ZY z;ZJ2?8qK7i>4Ez<-5rC?>4vXqK%@ z7{=r~A_a9zjXT5W0(S2xnapBdA2%c_Cj!YBmG5#)8n)1?i;&om+XD^dZI-xI4B|a4 zC)nGK3`%-&D%vAb_5y=eEgvl@fk-RFaw82mO%fcX*j$so!vR}eDfY0cHC0X^Ta5&F z%QX}GxpKd(_iGyQ_~wCOzvmUNNII~D$P{(Sm3??To*Tt>_{tzT8ytlJX$q86qasyw7w}zARs`1)P~?6{K^Xu@>Ds4U-SuKQL8tAOO_1&j(5=x zsm$`fLZWCEiQpfi+|0Bk2o8^2tBSw{%ANFSbpY*=p}21z`ZAyec+z${4x~Xx1}Z>_ zq_yIz=NOMR#1KyzKzLLNm!TSj#9bpaMneFQpQGF)kS2keG4zo~p+86E*yjd1`%|4f zRPZ#idoGto zmT765m)M4&G%?(qO0Q|Lt%}yE9 zLaXh@pJveSh6AQ#&G-Q5lG1}#%t7bIqdz&+wGuB^uzhY^E|U! z!JDfI<;fMqQsn>c`W*W~^e@sh!K4;b1B1Te000rMF)3rG(!NdSzA6O^_jMxN*esf|~|BKx18 z5vA3MG-C~9&?dKjI7P&rr0GAdtbq971Tk-bFO{Cx_TyFkgd(2|7sz#O>!+uaorrC| zwmmd>C$kFaK~iaLw_u{=thlm)Ef(G-!6#As5Fme}P_4D7iIgBq(>nkO5(^d?ob^%w zjM94|c@o)^6|JgsiOeL9QgPERP)0?0j0PN-BN>OpU87heqS8F6G;P$v#K zU(Q9iHMM}U>`t_0$}!&7lVEu!S-qryk#U#Z{0h$J+otVK{cqREgiP2Zor33T zYu*Cu)vx_-=>@Xd6q>$1;QdWhh1v8qYLTAD#PE8fC{P+Sq)G!1@Oh5q=c-KfESppN z7pZJ%kVaAhs-XzpfV#{R*;@udiv06gyY6tfcx@VKF*r^Lafk`x#PVdfU1_yjNwKsz zOx#IvDXe6;ekXD$t&cmEpnI^TpPCeeKbE^7xqTtw5H$Xn)^MQ#uEj*GTCK|v9gn8_ z#sgy9O2KPE`gEdQ8pM4FGE0(W*%cz)P(6=U3MW`-o6afYaHK*ifL;LmI88tw32&Y4zr+HUZO#$cv%~;4sFE&{e z0XJFt(TE-?u3)*BuNf{*14{2NqxBY2@cgbmhAFqJ4eeeggW`xGg7HCAOh>m~5q&C( zWwlH+pvYMi7w%9voVptK&xd}#P7|shWQ}2E4cHFpAH!;ix+&uIm#G@E<57sKq zF3KU4=-M6{!+Dv4K9`x9J7z>x5O1^;*0#Nagt&dm!;y&KN+5;e&;XkZ$I4C^?-N;W<_K*l`=y}9e}1!6n&8r_HT9G z?=V3lVj3rAR#O-_-K?f%u6o}}|L(3dH>7Mb1e0xstqfN)mDn{hKrL7m_!n zCEInaKhLqVqT+-k2-F}G^KuHQl(nJD%L(a}oFzPI)iny5;$R}jnmB=E6?G|&&r1h^ z1mlx49_>?gNLr7vy+>Og)~5M|-TywG&#wZ0c89vdfT$$3o2f_vaW1%5_|2BNeFlxR zpOZRdq7{-IybL~rvH%fKX|FX!1v2cQ5l!0wO&)MF){~dnyJ%I@R8!(-#w8H=9G{z# zi0BvX>pDq({lLk`*H?VUgy~+=F}TB&5)onV#0ezkEO8W1Bf&ogmQ4#ZS^na@e-8eqUuSz!h}<6Ag%@)f!CG6Nv>udU zf-hIen)o*&rBL-|RfBu3-X|2p5yT~XqvP9WbM??9UIsh9&W zQ51(X-85tOIZJ7gq(PP(~l^MQ9*is&5_;V_qD?tYRUWNOfyH zKl95CQw@7Fgxwpj2EN^w=ZWxo%=JV3!%aKh9VTHvz{`l;;O6D_e&Hu|GQp#%p>W6d zmvTO^ zIZ+YRKOF|!F7H=U7u3)uPQ+ii(f&_B#mAf9pjqK(n>GT-6dl|bl7u@^1iDep31P}< zJ&+^*PZ#te-n|Lg^Xj(s6L*y+fkU|nyCyw zW@iLh=p(AZ3NohDb8bI%Ax2le^YnME z`d3DBIgObSIZ{X#!`h;3R2tx32%%jeRkZ}>g7WiJ`vJTPY2^WdZ{eBL`GD$oyo>xn zS%Vq~@BH=~L7T(cL5VmQT9uEm`kFB8%B*is2!!wl3vZ@=0HWYU*2D1_tq@ZR1m7)0 z53e;#E@N%fLq$me)Dc||l{ZwG^J^wWyvrmK39#)mg{CzS;c((q)8v4Kr@BxH3jB`b z%;89!fIPKcqljBe(gteMfiVY&)ew&YG93GHmv5ho?a*R`?>_-$=#%RQrWp@RwQ+>V z>W_r0$ql9~*E;;TeZX&2{&wbr{P{KgA+sM%604UZ&!WE6Z^y~?@;HcrywODE#}HI) zXTwY~f=eNy#Cj@XYchx|Orkp?``)PeZodJYrSe|jY^+{@lo{kI{1{Jqomb<9m^hZu zp6ElUcMG+Ms@bhA+pd*{}U(Oae2 zE^uhRu1S2(b3gz{uZb-z?!c4_V1`SRZCb}jH9EbmhhpYgY8Q~g)kwC2eZN2JISs3l z{{Vd>^;}X~Nh2EmX73?Q3bpE^rX6jR&GzO24X!X;;UJD2(~#zw&`j(g zVhq6z)Xh`NG6rddil+m3!Ym6O0w%(vzjX~~UTp_<*$uG$Z|I%OQ%PyCkL)%)Hv#yC zRo^~XP54$xp%!PkdeT@rsO|d)#)1;WSll(}NqzHxL0{=jt&v*_83a9ETA-|4QSD4? zQQ%cqEgkjk_kUjb^Cjc(%LVM}5yK$$8&+cYDa;_^trrlpy^$7hi=tg7>ZhH)`zE0k z_@+TLj}vyEwrW)NE|i)S}n|PUw=j)O97-i&@(A^)65*IPy-1DX-(+4 z$q@&KXyn>06W@OiJ(`Z@*%`aj&8%+AUXDYB+3@Wv4jEw$$TJeukmsat9s%7z8Hx|8 zqiL?1psgVv|Hp?QoVX1w*lkRmee;O&j=@aHL3KD=kmQfY3Dl>L>)(HeoN!=|kQgr! zE4pcjbEBOXBLxiao(?$y1)&6eMnU_U}LJuonrWS11euNlG5UTvzg{3IV$(|2a5a^&fFjIrME&<)?Ty%Hp1d$a<>nAy5bNqM~H7PXnp?3&N; zv#VW3ZkjTR*I_vBZxrP{03<3r>C)MW11_MMYJMD-@59sv zKN7!2aW$G&Gk-jSLq9#2!r%6y=~80==>F7X)*Bjd0T6qUl>!P+N*>@q`ZebEff5n( z)$!Y7doDyj(;ui{l)ev_rjOOPKROexN*Hw(dfe2MJb)+a^dPWP8h?}C1RP}6Kq0WJ zCIB(0fP4sj6vb}S7zq$4VVQm)*TpWQ9aF^Mm!;{vCpv4xgw6O7)#46^@qax_=WHfO zks4obTaf3HG!(}HuAGcQcYIpUa8U!=f1Qux&QXB=DlulV^6Cb{9~rxhjpH}`Wk)gd zO(OV@P1C>v({b0_Eo>YyaPK$B{(W%~BG_{Qa`=q1040@m9wOtu#R@iG>^$+11B~@{ zg79kmnDNS`FG|O`QQKieI*U?RmZzI z;PQ8)ohDq&bqyeF)DwI6M*FwF*wq6t6^9>2pwJV4Zgd9Gn$Eseg(%=&vUQUqc(3}| z|2W??Q;5QZmSjzl79an-K|8(u9pY(r5SIWU2z&f@AeERc4OwW*Z%Q6w|Ko;1U$G}C zncseW*HS#_N^#8c-cFE6i$?Z^){a2iY0&DMN30?I3YmrfV7T~2UlhvWk(Fb9x|aBL zT|hs{czH{UIBmX2$%A8(zK7}ezh@wR&q%Vxh$GhXT^J-iD@{yqu#(R~KHzZzgrP|M zTtBt%78#hiBM$AP6%;gV@Xf>JpygV$?t;&zl-3KuHpdN|kORF*Bn1YPlBRay-*X0h zN>`7g1ZLM+fkRnaFp2qD`yJGornvh;wGXu?1F40fO`?2Ewap-mTO^v zmLnj)h)wMgFe8>&B?Zr|HlvmUS>g;1T6cO5m#nwhnmKjC7$*^oMttdCTU=_@A?ZZ)))gL+%d3sAMJfc>Y2&N$nPV@YV3F(v$#O{LkKRUwuM0_~k~TtP-P7 zAEveactM5H&b!F%4wudYLkdB1rMA&V!1?DNv)cI$XSdVHb(!SYW+KYcUK;$2LNH3# zL(M|$(xZUC)8r8>{{4^ZQtxj{82mT!Er=Tb{2~7T(En1;en)krNc#V-de)Vn2@TAD z*iNOkvc7M@WeBk1pat@pamHX9@C?(sH_f;m>ZgY6cGc&HvunCwTEe3b<~yk{6aJc@ zpidqdB;+J?mPGEbZBuENMC?>4B#x5#v z)cnF;R5RNdYgVm#jjF6>N{*hYsSZp28Y2wq8fza1=GbJI^$hM6q*!2$pIAXnH1( zw$1$Kp7;c}-`ki(!jzrvAlRt4$;qQcbq8GxH8+==1bKgbJ zb)l)*TmX8+IRwe=dPI#^fcB|3LqSZCofMKJcw_m+lS;>rAqp;(5?qYNVAcIIT+j9R zQe&eqI(!Y@;cD`XRtVVrsI&!qL~;ncO>!$|Q0EyK-sJxjJztCq+!0%TOxGg7FjSX$ zA00ZN;`}t&N{(zT>IC$|Paqw#Rua?dA~!cYQx@x-Xktm)>tgkia=kP|AS)sN7nT+^ zo8QO9D%NyF=tY*%rsT6@U^?oPV?BgN#@VY}R}tx&w5__BmAko(Rk+Nogv!c*HV@%- zoS7Cwas(|z@EmSGu`no56GMWuRV>vrmfFxX+RXol?l_#I0$Sl#brB#kiGMX?-BQm& z?#bHbjbe4{R$4oBo7B^29Po(`UHr-`^g_CiG}W{}k1h#qN+QEj(^TTt6D^I!z-&)M zB&9k;g6?Am=1MIfooX#&H)@Azf(M8U{Jr51N|I3S(}a_zr}0^zqp1~S5|z< zQIs(?hV~p0G!X!_E+@%74WnvxYB$Izo?m@oCZGR^kfAtOK`7fh)=#b#GZ+!dERlRN zMT8DI?{du;6X}D5TCViVY%}>DUC77m@?uLvoGcHH=q? zbrkC^9zvf|CuRPEM)kx?mnEw|Pt;C^iCe305~YO}ozVf`y7e4Jd#oMOVDNR|_q)fz zJM-fT*QI|VZL!q5I`t?X-PvB=O7L7h$5#Ci83uVis;#b zi@GvwY)-5SiQ|en>QOF!@#$t0eohw~t>rml7K@oPUz4zdMENDG4GCybLLr}Jx$lB- za3 zo{@33`}-uXIY~v5Mz84FKTSce#ZGJh`dkMV%<*My)IbIZk8nbxN+vB-vDfh}-po*h z=^i-)iH3HN1Qd%>8Kf>JCB*hq3pvstzUP7ysvOe=q9>xuP=JZTBV$Dg5(zI~PP<+y z0h32d8!Da$q&qf#m8-jc+M7l<9I z(Y98Uo5a)53-_9Abs?|M-WA2>xc;78vb=jnXk%sf0l3l?i!w;kh^)?j+C?8tMk-4& zK9HOZQu+LbN5R$aCAnA9R1QqOn222HL0;o0g{59qGukxIAjqlorhc2#5l|j))LYk1 zWK`TJevYz$fpOhRQZBiU7 z!SEiE6?Y~fA)bc}DmWrPnx+t|_#C)kv47Qqf*D`Q^m~Nv7YdQ*g@XnI50`d_q1@it zwsH&U8pg=-69vH9 z{o5AWyTRf!B5&}L$sXx_Z4}5(fXK9^LX6kI0yL|n@KPwuDTj_Fn3HW1X(4v4@&?0# zoG>U8v|ZghjFOfUZHEKdp<9w~0>}Zp8snz7yIace2RMeeW~jd97a!7x+!jbdE`!?*3)BFhc5!MpbaMCD=($vDwN?W zBn?RZ$vOAUQHojQBFXXzxR~(~cIK{D#E15zGquAERu$t72zPa=l z6G7iLxy{!4i34B067>K`Xoa%qz3ik+FP;*rxUJg9$f!6Gt%6~~AAYq+fT;OCYJy89 zSC7M|3I??%IZa0*74(%Ti?U^+v%Si4$j3ukjZRW^A@(krnxT@Q zfJD+7B9c<;Oj{_d3LmB#;<&PO;8aY(J?|vA?X|$_SmYL*5JQOy`V$oIl(R^`Z3Z8= z#P@1i6!Lf-9~3D#q4HzJIwJGp<+EZSde?9`4x9v*n3`p4Pn=T{?tsE%PK^;vR#<=& zxsszFTm!1Gp2bX^1Zo?SynzL{8*R&SXmweOzHs@-($3JCdH5(1`Ou_(C zGz(1qH>A#@ZN(s99>rIKH~J5SjqWsYK_X*)Sopd-_HK}7B0$ts?OnXi0fM~Y%W{X< zwDoOmDTukOt(xG5QdvT^21559B8YT0WDcIT=MP2gm_s|SNRo#i_AXXageY2MK}?^? zFHiRfYm*LTEf9XTLiG>gDVnX{y%cT@zb3H)#V}b1#5>S_`n@z`cltRgjMp+^wMK+Y z3nh)-lgAwuj7ZdC^vwwV?g%o{aOA9#T=zT)EfYVg0d|@g5co;rF=|usf@Ba|$ymqK zP)Ea`1-W231987e^g+V}BAp^s5q+NyMghUy?dx>+4(+GSETkGCHz!(4Kz}0@5yU_z z<%_g6{W{dLkl%L;$|HA(dswn+@Q|Wvjl4aRCW?tIvCed+z6@o_h>+g!}~FKB;SBQ}!}n z-8@5n2wpBz+afE1UhWkjPCDhiHs?!tA_cTyr68IuiGPl(uo-lqvPU8ueNdseh`oeJ(2M zB8(D3uo*AEw0@cvrV$|K-ku9)yV?OgZoRflU2t~gXyQWm3rh#I+JaGR%c8*WLCbwm z6W#0rmkYCaNdc{}50xW>g<{ck$Shh(>#ms)+k8DW3Q38_l!BD$o@Ri}K8U{JQTW}Y z)4L(n@TOw0h?)|GK;g)S?p=C$5}A1UKH#)HnN}QYQ%8=f0`S*r;WDX}1abdjo%B01 z@v7BdI?=26;PJ-|NMiepKUs(s5i_l@`#2h4^BH7{gm?OYEE1*j>j|u521gz=I`s}c ztq<*bDI%1KxgV|41G2TUhhEnQaC}&we-c<3j8!B0k{;%)ew&QmQ*NGa=dX zAvH{k)4Sqy^`m*b-4^BO!Sf1JeV=R{R;z9bmvDFnXp`Pd@7PA$+AW&k&1Tljz^&!J zSPgZu@HNQ(wPyQD(+McS#=f@F4kY^RD|>U%eeWRea+0;-DV_zUs61 zfrp@C4gm4lLAp(0004h~CBDS~iPY3EmwX#cM*T>y!JL9`o-CQxwc?%2!<^Zh8lfT- z#@+;+zw7Y!8_y3F9UgR|%`HU;g!gHjxeoGnYP)xAlkrr5h9*y~dLt$fBsk^NbG*DqMTdDgO2719t##lb zVq#7FaZ~M<&!mzxF{RBGrZ!~?G$|y2MUCSPUoRiLQO5IUC-V(ZD72lnP@Vmm81z=C zw2gC|p~3b662H%UhFaiVt?5|_q+~eoM`HAv0yO<$Bz0;@MeXpA)gL&Yj1<=OrFP8+ zh@*eE*zEWwWr|UO*FM^?|ELKCDLqW#>$@VIQc;c2-k9R)WO?tfKV`58&+Wr8mIHC~9x%4>22t{hFCxiVpQwxOD4x2m`A+k; z1eo>t=IqwfoHOd@auYUoMOd`Qw`Yx6X|>G@r?$OKGi`z{vY8TV=Jt&UNhNxXH~Tx4 ze$MaF=hZXp(^s>R3KlU|vVsWU!DO1QkBeJA+&^S#9P6}+G9Q*h`}>X`*!e*>3m?>n zOBO?!j1(bKvt(9J{*}25-$nS?_6A2k(ek zPBO`3f4IoCbPAqu#~pr@njU&9!YTMcSIF?79?{jU0R3?xCkhT*Ocz%asgF+B;R+2v z+_)WoZwId)Ep4b#*RqHY9{Yd*c8p@8YG^=IZ0@f|r<-S|^@{hSf{63A-=P#7Kn)IB zT1B;S5p$e2VWQszN|#Mrs#_4WI{{+Lry-aP;WO)74bOl&Ns)sI!qI}#8JMDujnx?@sjHS^d`+@V$8aS6b| z*dL-yA1OXRQ!I=&_X^;Hs)^%Zfv2V!aZ{mAvmhE!u*P{Fir!ol3pF7HyKD6A$-xE()|Sedgr0glL7uDKdqS{hbnHUukirTfK#E~ZwZSf=@@IPl|D75 z1m|O6gb=p2)|{UnV2PCR@%nMd{T#^l3%cZum2lLA2CeY)GXZO=v!Ej8XRWBm$;xC< zej~jFEkTD19n?A0B;YURsrgbywpJumYnfy|BxE)mI*7;wK=^H;#t6`sI8_X^DVgPc z+0-L%xb$KUIsB$K=9XG9>QP4Lc7mGV8**-L zG(jd4f=Uc0$YPsD?5LZ_q8V?H1=NvcM0`!nHs>rC)@%#b2n9s%$2454x1aP<6s~Ed z0xdPuF-a&BMBC(vB9rB@iOT0Vv&qJs7t&FZ`13gVC8~2G4^bQ^h3|l)wIP|(profE zef5R%tt!;J(Nu#PAp8+PqLNhX$LkVOaQhHm)nu`Zj3(5Wy`uGQm~-bJNN ztB2@6k)24?s2}Mrv9T_&Y7|dZojJwU9YjvTt{!hQEL*B3FmP9Wlu4$qDS3B(=N1_T zion3wN@W6=JZdjaQ}ePUat#CS$xbpzrXgVg0?Zl1VIX!R}hxN@i0xg$wZMF2Efh*Av6tW8whg|h)IU!X8b@B0AL!Z$~!$e zgEQlSJ6QmraRYUUC=LrrOG4cvZFvQlAn&PzBRYjeP#)+Z3vtAAm{*-g$f%#v}F~Y=?0d$*OEkn zEy;68x7#KSNNU@SJiNTf+!GmDld5?Ays4q|4Yb;vE)3HUd7a>sZAc<(XpOU*8x3** zow9m%=%({qYcWe?pf_Vj1?Y}a#W-aIb*R?oFTWD{MS%;mhRD`0oha>=lRYWMs9Uyf zE%e;TLG~`ZmptTa&>g)&3o$P9f~ zP6kxfGEU_cGa0@{giHg6eJb&UdOX_UQ;RvtW5_v$$b|=?jSyl2>wWu|9UMAAt31gj z!#JlMp>QJ=Y^Pla^hVk5K^9o}?$tQqyD|@whh55K8QMDz>Ut&6oZN%ab7MB8p2RAU8{D;Eer9^sUfBSq% zl)`VD?p?-lDnxKeR{N}@vLocOGILsB>p4(JNINVIe^Hh^6);7B6mVLYyP<$xgEh1! zz;CXG+_;{k*+JrCv_m{~HDtI#a&)?AgySISkR>k3qY7zQAZ!Z&h=qD1X^vtXy7v=I z?s>)!XaZtm9X=#;B0P~79zi9n6{oppyT}`j9H^YZH1W%x!6B?Qc^dN1nCu9cbu3tn z#-+5^nbb&N^Uy6;2~`n5TTBzhTGR$2O)ee3(X&Ph(yJl_iB?JajqLrw^%Qm)9Pb7- zr!48j4-~rasu*|!T0Dbz(7RBGC?|FBCqNq~^pkk&y_61_z_Ibyi)foX?X`<(eC2Nl zdQu`{2m`IyiI3vduZggVJD|{|_11L~$U8X@x2>ns#o8^|O*o(W|xe5iJrWL zrcabe+ls?5vflz?)Nj4vc~XBf2RuP6ZK+q6eOEo0~6t$%G&Yb>aM%S zUW?8gOdMe3sTPKL5e!g<5MiNqJ|31-5l%-mP{LPd6snGw=*@VmO;Yh(U0w1T&r>D`Io)zh|%Kk}> zjjG(_vti6`xJjvrHAy+emei+E8%d;9Lo>9&-k3)EX7-_mRr}1~nt{)NEN+qN=Kw$! ztw@o_F|yFIa)feIr7&UYZmgOK zrNX`?tLA3$*reKhabEpdZ!XicTUj^_Zwu2dHeOAgyjOMc6_99d0GClGrrT7T7+(QV zM!e6))PoDXnyqv<-WH!d8bxoEM+pP=%7z*>Ht{*tf?+|_uNR@{NG(o;EZIdF*({~w z5S2SgP^S9smicWvV3=$YBFCg73r5g@Hf4v___#bOYaenE>%xhuLU|eiR74ix z(;Mh97b<4cDu9NqNOQ6<9m*Mr*SLu*em}M2OXNYlJX)ede!>d0B8!Cs(r|O4kkhP}0R6d~S?KE(gYewV<4Teuz5rB7J%0Wfw zeFDjkvZ zaW}D7YkPRN9&`G@R&Qu%xrrAU*n(2u8)EVJcZ$~Bb3Wdge;6oxmF{_3Rx^M&Y-r^N ztiv0P4L2@{1@nud=CwP;KiwoBPKBT^K*5e_AZS6u0?QbYFdBZsk*q-dpxs zsNWorytxc9v~3KP^>(ViZ&a7Jre42uy2j?g$k@u|MF}`t1|ZNFva}V+x| z3c>haqpf=!;I~OaU|aF+WzO~osV^p9fg-$c;hD@+H)%<1CqMYw-gQjd26b!$1F)S`}Z#$Sp(-PN*k<=lSS%8tiwdOryDxwwZ!#W z)ME5jKLS%Zls zz~X!~>rqI=gBJWEx)*pa`8K_XbppI++p)*;(~&kHPu0Uo#uf31)j=tZIrb`$m4jU1 zhwhBq#bs8bB?p6xVt>If*?N>k=JNX=Vz_j4%a$#P@g2Rrak%F@QQdR*;!@Ng)^8|& zNpbX{gq4mr(A|V$KR&IE+kW|N{shtxtfiu#r6DeUMsU4+IyCXNyZbwYV*M($h-1r% ze^f0Klm86q;&Xj^f^0oB7W$5t8^IMS@(U68vu4|-L01$}=KVPlmd|W4E2>qA2^B!)t zn*B}_DMkHX@chtw@xq?vVrjAPgivDmro&Q&>xcAvI%1s_6z0yGC#qlgV+?0~2o|#e zmA5nM1&HKfQxDNs7mDAL3AwqsqOcia=RNi2vo?0D#ly)W$nheM^&_-F8XM<@p2hD~ zj80Lu9^NQ>Ac^ovTeOU`6YcMNd>lol%h<<7e*BtMZT3vQiPYKwTA z`;OK?K6L@OiYVpfAq_4>>-5Wora5P3W=fU zuzK=nVy@r_KH{yS?zsyV*Iu@0JqlJq4VocECT{q_T^i31VK|qRp@aG;#kTi!q^ys$ zH+AOOBNhpCbB2Vz;S$ z@V>;xe)lvF=j1n+i=~!;WsCD(jnLV~vrIiu4q)^0^n^`Jy30vq8esg^F*V2UZN7GPi{cPRV7c(+ z&J#X0xVcrp9Zao*VUo@EhMjUgqvdhz7m9pQYPkT=dFWrJpyx-LM1Xw1bC@~FfvDgiBz-) znyd{-I*~Um8^%0GK%m=gQgUz0^^ZrV0#Fi_>M!BaIg4I8%d$oKovDR|MUYof2OP*u zeU{{_$7@#tT-_DFY1;7~ZhOq#*P)a65Fxr;7cM`l>#SY$f%E?T`xhFsQj7jz0-ZSp zrnRNMno#(rl_C>DvxlX)jsCfXI3laMJ-)2LxcxBBqnHWZ0{43GO-|tg?KLrN%E^@T z7cd`o4lB98?N;53(2OaGSK?FC)5EYAeaLxH}IKmG`QT|AaNfe03o>5 zB%!nBC*>y!T~8Fwwf|s`uc(RsUNXbuK%t}rJWCDLfFJlS_6{8Whi=@u z)eR)`6xz@5JAsW+zMjl5h#Ub+$J{-3C^_ync=NOBvlB1- zdV7b5x>T^As5+vo>E(yX(JwU!K#I)Yu3x7r<1T}77qK;*2!*K@GG@(rjXhW2U~>8% zw3-@J%h1FD`uO!bn|}UdXH^Ln1;4no}hYGYd&UQj!G04O4QU#vOY-^G?2qV)q-lV z0cRZ)wmpvXu!aR-&^d{kCyE?FG6iTz>U*BNK`vS<)$O?tX``-eq%)bl{u2oXD2L}$$y_PS} zuQqnLV;bJvgQyT)=YFqz#k8Y0B6U#38t-=*mioz>OR$Ewclnyg?Iu8xjE?h|pd2Z? zo`KdN0gl%tfl>A;ogLSa6?dYUwJj)y)Ud8J@Im<^FkaunGEsqv-B#v7i&sX@8jS)_ z3skIs&5ats$Xq&EaNZ>^kvGc1+sd?3}(B`fwpzxhO(%^0h}*F2@sYOTKxxUQ$vr zS~>C#s89~(&3QVbeT{G725W?9;ekZ@*KyL5J-%x)IeWL4*a112^nYf zcSWJw&bfW4OE|3sSFwFsv7nEQ&FkzJ{WN~_?r28ox&j+6JDIg*cUDeUKCc$enxkJ4 zdmzozY}w+)!hwlO6JOfh;`U}H8k)ECo7{;^;>Qa4}-!kGi z@FRT3X=-8c*6V3}>d?feo^$c5rtK^*Uv?O3E~xsF=rDRQS!VEkf?jjH6Eiy`{MW;W z%a~@n{^c_h)Yg5Jf?PvJ;@i9D6SOnWjXe8`I}pSkt+fp`$5k}(ip;*|`~;vk`+dWc zA>Js6?KPZqEJ#K0J&z;HZSEOd!_{4}D0FJ{j^t#xf#kL=Y@_?^2O_XtVcjqjs4=43 z(~p+jvYm|A?#`vNe|gbl5bW^@-3QYlO_4ggnH zPgmfGm0c)eTiUn@r?iMQ;GL#+Cz)BvvtR6P)2D7L_M3pB14V)6 zrGeL9hFAQC3iWF8FYl{K|Io|KKgcQN9E+(-KnmCk$~gJ91u;6=X_}7V4!=~ng!y$| zKi3<0=rL3fyzuTazg@U>;!G(%a2G~cN{x%ya(;Ad41smqd+eJx7E>O90K(SpnTLSH zj$`*;pllO*c%!B%XrbpF$+?Ya*ZX8Vi~;i@aaCi2u&dBZ5hb$Umct8!Mnm{Dg?%39 ze)w=E``nQ=^@=9jFkMKoZ*S_jvqtb@b((dD(yfb^WsFb7>L1@CuYj8JN07%$odLVSl1Gony#+7>w9ugrXXf5|~K6%Tix{jE;Y-ScI}>gmC@?rzgj`?{uz zwh|+}D5HYYDD*^vUKXsbw0OoS82lKJC$X+|SR>cJ~GT(NX^E9@Ehd}a%f zA5>AeGz9qBbTTxgrEWUAr*<;-6>3hem495(U>w)EpfGXcb?LAUIE|OZJINOw-|T+3 zLPWa-;3b(Vq`1t^JWB(}925ta*ysLFDqzF7ZMd(ozICD-CzKXG&|AclRX=vj?IVAb zWl^L3{%!|Xkx#V~I(X76*9#lCfi85mX7t{g%;W!u<0JfWI`FhYH8kV z2j;*iHq-f@927Byfv^TFzN=JeMbiOnlN%8Z(E*6Eu^bp#ErS4SkJBUG`aeL*S|{NAIkXLzPuDN{hQV zB%Lbid^V#le-&@;vHOP=Y*V-9+OfH60bzOFwxF*(ctOumX_23@yZ>|=;s{0fQ9%LT z`fv)o7PVLVXd9A0tJ;P{5{b>0xIn0cZB;S5T*FF()i2G(BP#tOH}ozBJ; z+D?P*icbqg&J8(e=U!Ds`ESFN_S~sMreAK<-rpvJG$6;_(TXm? z6hjl*GrNN8*dO=q{p^SJ?@+oa-&o9UCJ-vT-@moQzari-a<*Kxy64r#$1Zu`_MIEG z7f0!*v@2lblyt{+jF%4#%g(J1UFPWIRA%%wMUBorO4H;eZ$<`&MZd($gvri7)PTzM z_v!?_O~d;!Q<^~g*cfns_O6l6QYrHB?+;Q|Hq5h8n zZ&4{^-H_p%x0=oKf|oNMV*M5Km8Ks!` zo=c4<%h&(dS9+}+jqR!jjFMO&MbcA6NWqI=Hg=b)Acy zx=7Zuvt6SeieN9W(!NR6>#uVwKpIFzvG21)0e3?MG*)W$wNy{7oYvkhZks2ZRp)@o zVRnnS%20*>wyzK0)HtiO)i=T1F zWa}~1dgSWLw%5>rV?yXbrPrfP`nx8CJS~l+3Xf@Tyx!k7_+t&w5-&viNK8%wccF=S z3JQG7&Daft@YU@`>y47w|LmNvh;kGfFor%><+4Bs*-8!5SSkr|gn64f#)X~3I%^Zd zA*Jbh7clbUJL@$QjcgyZaY|GG)R%Iz$@&X~fb8=KW38<13GnCd2B2Pp>6s!E#ZFJ! zTsu2CA8wC&eS7BXMJT#$y6xDEmoBs`crli-^U~gj3Lj7$DK*|o_4#3+!`vTacCTH@ zHq+A7v0yHnuBP-hJ&*l@)&E+yY;TZFY194G>fN7f_YGRAq`A$k<&`mx+sm9$b$I8u z?aNv192e;ue+q81o1xokJlm*Kc0SB=)gg?P6aiGLVrChe73eVb^(=s;&C!1yOj{Sz z^6B8HueGXnuzw53@%@{wR5kMngelE?u^+)sSX?Yf)bt4k+Szc)K(Wv)F`dmmy_Qkw z)xUxDh_Oa9?5qRo@K28I%En#H)R><(?E3?isZ{mc2;@N%y!Q!Unts51Xi!{k#Nntp za5){9u>N_*K}5L;K*6GryT$3{>q0bJiB^6Xt@=@E>4#JMx_>b0`!%0~Ew}?EM>KW; zd2SFtR}~7+b8JER75$Q}iqR~{V^Ny*D3u!T-JTS;e!S2@zu^7rMb%cLt?alrGf6+W zk4DJeljxd0x0@T0llEv`ybn@RG8V2LbWp=(Oc$@lYcG|K0n@$l(bTzfLsz1JmUO+m zzR1#fj>JT1o@bM*5Oc;bx64RSrs-A2m05QID~YQ^Zyp6MZFt#9H~AJONS_9rIHnaj zC9a$qUBGYGFt+C*`>V=zr797}9w9<>0UW8s=S)3Uuo%-2C3F#$xFt)|30T9aNp&2z z0<5NkHfE1@MU}%v!Q+zZM=M^jceITab~=8=2olAItw3}7j@evyPZQ(v_<1jELF^Z{ z&qZ$##HnX?S=$RHy1>qaO!!cc$Q#*4Zd#ddKVip*%Mre*MSR5$B{Si}mO#R6+KSot z8qGhRjI&Qj-OUnc@MtL!akwpm{j7l<$hgc-r^AePFQiZ>fCNt*r|0q8XUSs(m1XzkVs%rp@o`!acysoPdye~cU)@#HE+_7+;^}#~ zi=9o^Cf%Z04UMo3KtG?6RXob)!A{^k@rP@W@MIV`ZIX zN@|IKjsJyYk$U#k!sK6$K6>=%0!mi=Ov8OE<%jA-xEST?()CY{-T$-iO8F{OPH#H4 z2@4PS1vpjvlrKU*r2yc4L2da3B zA^lG`cQ{$x?_^}i!^~pU`t|FFAFjh59l@V8F1^FEBaGd4*OJjXt(5@Db$)D3Rf zVHZFO5G$}>w}>Zq=fv#hi3YYa*?4f#;AjClNtbmvbLJ16h$mZ*-pSw*u|`$2m*}B_K$P#f{nroIV3@`CYHekOYL&~Hr#E9W49gzD&_B&fIqPq=Dty770MnPsAL6pEK07*vuAc~wF*w4?e z0IJm0uDjRuU@H}YDntcGwGzxmx}m8S%}r)vJt(sY7PweE&{p*JJ=(8}155@(h6WSj z`pW*L+grxW|Wzqck=aJNJj{eqIq?v5+%Cd6D z(W~!wNM5Qwb4h+)OE0_nyq1F|!gG#a&428&5EDAx=xZEoANW^cEm*@R%-Qy4TRyiR zG|FB)E@oDHrrPLK#zzH2J~2~l?0%%Oq&{|e?!8%LVrH_jmrf!c&{M>oz!ZCC!P5*7 z{}&{WzC2wi_(`_hWtqGmuVFPt?fakhZ4;QkXi-pEMlN3FKAv{biJ-O9y|I>vXX&ei zBaYqw73i<9iFHIq!D`^Nr*NTC5gwD5@L;OBn_o#}{pA#W#!uPSwRuk2nn6#46CQW; z<305@X4k}Pj5rJEiJ=hjO;0ze*%M!GS}w5(lXpf`$Mepk#`vu14mZ`GcoE zJ62t-;(x{Vp$j4CDQJ}!(06e60!Mj^STl^7YbK9KiEeQ2bSAdUWNlfwyZ>4g{_Wwg zc@J)%=AuG}DucP9L(WIT=HGr9z^1FpdQDqV`1J_o5Q>ATo7o*}Wa&8FTxP^BFz{)T z^h&%i0$HIrKqy>R>&lf3=)yazrlr-ND6C6PLl0WZ-i=;6m3JoQ3?KwCiUL32<}8 zb~%G8F^3Ve>~ooeM#kvVicG^97qXQ>%N~Mn^TU*bj$?ma8tF#p;+65-z-^ASlxDi{ zCN9kdfT!H%c$})33Y&YjEtd8QNFNfA}VSST!*A9d;a-U@VdrS;Z6y zA3{LfSZ)Z*Dy+8(kkta;Y6z82NqxN*LCpu!%fqCa&4R|B;FucakK$rB_$d6S`#>V3 z4O4gSgz#leNzzD`I?V?qZd4k~AIhHdV*luuW_1i(QgC2>M!+khp~TG^?Z#e|B1YFI z=gph9H0)pVYxFb?r;8Ou4h#=gubTZ>(# z5=o0zrjo6qeYGVNl}5!Np7)s-+n>+7@5ArGcaobS$|ab>q#)Ow5DG6ynj@ubolLM8m1~duTadab6DqmZODyV%U=1l+UFtK`^lEbCpyVhC8qLp!3*BuXk=B zLL2k*s<8Bsc7D;ZD+{xZ*iRAW#A+Ts&=-D+rRI!Iuybs>y|$^WJzwwm5=Y<8M7G6>CciPHV zDD{tB4Jyvr{NdI^0DCKnFP_ma7&g-?eRPLcw`EYpDdD zcboi0#OTztkpaL;lM#X}5lU`|&7I za{v|$r<>m>END;lZYoSw)(Ki{?(z%G(BelgRZc*n@QaqCq@AvR^N-3EFG! zpCdMud=Jy`PiVPvWxVT)qPwSu^zJ>80G2}?XInOkKj0VENT|PqvQ&BdNN&~TZuNiu z30vfI(r)pFT3)-DKIqX}_2}8s89WvOQ;VoDHIB&ts+5o z@%)SRd61N@QnI|tRaw9<>n9TTHCLdoqq*-zyUcE)*M)jHpw1F` zE+?t!Oo1a6TGmJC#?np|oY<2C<6Bd*3HXG!Y+;Uamo6dD0o@{O=*x`SwG)-X3_uX> z=lGI{v!dw2iQ@GsKpYaFpW}@LONMG??i|dq6Cs3O%jjmJN;uz`&|7og0gmhQWoRrPySl|Iy_rwFSyjMD^<^7@ni#TJZj^H z5;bDbFGZK^eNf}=^P42ha@0=k?Dh=F80o*6o2uUW@zm*$N05QcQg;*rwL+qC0SO)0goS}{y<^18+F+a^iX`oBdtfjalgY1% zV`;Fy&yzFnt-tivXelJa=qVgPpzG&|!^Z?vdE4odo;!S+2skf(w{6v`)u`;>o`+Do z%LUqvzS*ZwpGM-_AXsxbxhQ|%_oZwWSd#N9+N&+gU4%-ss$Vd9@-Lh&^d(Gm_Tp`t zW5-&v%*D-lyMSKldxJa{^*25_dH;t&z9HzSyMO)h&`=w5`RA#>d8}V=kCnl$bMJt3 zfIM!W^Ti#%2N#DDV(oE{Ydn0DYI?eYfGGT4_poW*PUgZ+`&fU~j|N7f+ z!&)>yB2f#iNYBh+UGtZhw{=hrC6F0UnZ_l~V0v6wZetfR^yu#4&LB*7>MX49zvp)z z!Rm^Dk2%XKZ;3UXH$xOSX;rLXt>(d6?0T9j+!a->$Z|TZ^pLla z?G^T(RPBg-c0%>lz((7#6ULACraZNK)1{G6yqwb7`2G*Jf1(R=`iup!$b|Msd@ffk z+dhU2S~SXX3FC#OlzyQevFS*(eeSjJb7{wo7YGHTa?d`FL8UQP=Oke~>eF{!;h=yZe2r+`sf* z3gR&1kD1A_^mSSSuPmsL=f~6$t?j8m+*avYrX%=loboruyVZnZJ1#nKs_)&8O|Y;d5|qDEIN#=ekwq(oH228NgLan& zzcoDkOhWvqK8Jo6C=D4d!u5?kDYV~&jNkkPLo8!WNsdczA2uHCRCb3JW=-Y@r?#!( zIYzHImSv7nlVWJDAhCqA8*9t7z388Li#BK`TSLucDwxX{??+`_-FY!;0iSf%y*v&S zIp4OkdeMPYdcrTkVgZnPg~|b`)%R0UM2(cyC{2n`Hfsqea+ox4R_# zxxE7uB+p9$&xopcuXiyLF&ORLsQTUrA zPCYKV>{jDP)fPfD3*k9qVDKdYpMu|e&Q6S9^K`FN-o)({RDtH5ha`oNZtOaD+Sab? zNsdGwK*XH!d+%S14L?X@GQPw;ixS1tMqIX=62fK6j0;mdfyjo&mseNL;X#WZY>a#D zGk9Aa=Lg;rYe7$?V{@%oIE~oto{?#W+pFqrOhj-yAkUYXNWuAJ2$*Se1Se-ZFQ|L|*%1PvtHtpK^LdzkqpZZ|L zoad1qDa7%v&tG)Id6h?~5mwBni4->A=btCDiitFv;$OwWVyQyY zaxZb*1>M(-;5Sc$L%74^I#ohJdXimg&m?f6_elR@$DN~>4YCv)IE3w`3B~UqMGYng% zng;T|UW@SoOgV#Bnnyfrw=bjUl2iFyqU*j5J$m+x;E9A0dwKCbluYWR3Li(x<7Ha@ z_isheUpCPA5gLHUF&1G=xeY+CQjPL%G!D1QO#dtsp)dAocv3d)TgN zdSQiPn{zsoRLA=>Mq*rCHj3QnH_j^_qreH2)hW@e+Id3QK~*{4>>I(PnTe#H**NJ; zkB@If8k|XlwBXv50A+A<01E?>MFFo&@!PxP)&&5k-m^wCYcxk(RZV(wKAXhidko1@ z8oI!mdc^pt_yCrJ+Kr#Tee-5Huifs;)+zL8C@Dx-6|>UDy8AW=0i)*~WTuFCewC{J_$IsmxOI`M z-grLRBGHPB2MxQq*zMuNhuy|`#>rUob))OA(nN3yOgJs+UYyG*fl#j$8bz5!Z0|Q~ zc>t9qoNE{z%Bhcrk6f0eBFm~;XA#nBP4)Sbl@$GLTDKm4ImiRpAcm>x@`PAf52LK6 zt;LQ~lDIyK3dqJVP)B%YGjNb?Sr9>l<4=FR!2#uv)jYhBo^PC^m*4%vIYAH|hC|~* z^MJ;UGX6uzEPLBw{k$!@itmO%cm2tu%PgsDab*WHtPk+?<;~)NH*|8IHhJ=LvR~x5 zFK%I0fkm+Z7@fNp93?N2cP&7rood@=*gD6e3s)oVCcruDYTP=5`ymMoltl&YpkW2s zt{JB4b9AW9IBc7f)#jYOpq>5Z4{T@{Aq&7j5!9$N-mbcmj_d7QNiq>0tbP+I2kx2zF{8R}#o4tb5tosb$ISF4t$+Oa4p}pgO^%T?%i%Bb zhpq0@qLtLW;#s#^l)-)CGkx5p?YNvE!!Y81m`%z?^&6SpNQ=hQwMGG2KjaI)u&vD` zV)`i>x|cV-C^lc!k)_H-sj(d+Bc!*SgUV)-3L}fPH<5_Vbywfz2?-1X)}G569)W3A zGI2|SQ(RcJNyGKpX;9Q#I5Sb4ErP0CAZVq!@xlx|7yk9eMfQSYdAu1ahwF{xU_<{P z+>^_BqWx+l8=`4Z!7XL>{thQwTEfZu{>e?YWoM{g zMdyU^TskLYus#bWpZIbL(@{p%JKPJW@Y05m(flq=n>w|cTxmBtwQu^{(_0Q5AXv*k z(z;vzVwG^lK41LMhYq4$%%D~DZ|VwNfPFvv#o5-}y$Jy-eCO|<>g2hYzK8E9#p+RW zFRAp@$KWX}BDSfWS9wvV!IraGS!;@YCZ6;_cRWPP-5i{GcX>TCOSrYOyy*0}a|9un zSo_AOsaPx#pg@wgjyls)5f$xCdMbH}YEXN#T_|k^sZy_C`v2uCg6%~pU zQR^gDWGTVLv_S-8#L&5r3D?KCy%?r4Gk{}j5zy|0_Fiux)##%7m~#&6JLvwPYt$HPad4SUp!| z5kMqLDXNYudwBoW+^!>*+@U@cp2p`!`#S7mEXtM4;}!vo5O|6lHfX?r>CDN>A>+%4 zI|_6IiR*+B$9u1_*PaHPh@N5W$ObTmz7dBuP{KxXJIKM6v9)4VmQ+X&!>Iz*VGF%Y zYUfQ&ndaMjN*gv|#=|&;mlscp6Z!|ax-*9U=2DR-wqB;sMKsdoupw>vi;?y@%eg^` zkkC6%@9c5F1Rxp}w`t7xpDXD{LYtekA)2!Pq*&w|2fXO~x)8>)8K_Ek=f%-A>-rFK z=ca5-5-(n(Z8j=j{uzVLG9=e%l{a?S2_%9!1S)}cSZYg^GT1qAhjZ{H*v>D6uBA0_ zHjzzp>CH|FO`YfCof+x&!jfk=1Ty6S3L!Mj%T@(Z0M$K2{;&xklWB(MwBe5gH zp`-=$5kdv76_{370}6#W6|C7$V@pO52~>=ozPMF8+daL^K9()r%s_>1oRnhv`%zt3 ziWQ9xtFu+l%J~i5l+RktiLln@5#Cb!sE6X2+2*(i5Zfxg7(bKjui9X{c&c8%4-K|s z4JEjR%>K!hQk@riVa#5UGxI~wfGIQLooLLsL?1E1bq4HAtjDqXDbyDykIq;v5jI9l zKc<^6pPRM0`d(5(@s)5q6;hpzO1GzSqUf<5aa@*?ux-o+cs@L1lDta0#Mo`@IhczG znxN8jDo*x+7uWq2yCHxv7{f;K+FCGN3rs?DE@}Kq(|sP8?8f@uaCdgIY!wvV&w@K& z^iCEr;AGq4Z@>AbB+(Ey2<`7Q7^yhXky&`+^5CJ*oHx zyV9jjJ6~Sy=XgJ|da#h3<&}p2$Hk<~Bubc`baX zD;Ez;oy7(<<`HqW@XvaBdI_5jA%WQNp}BldBZJ`v@FC`o;zW~4@uDwF{{zmdtZFa8 zQP+R_N%%ymbtdol(aL%>;aOZ=5?_TdtoDkV11&uBUNEr29*=;z;~4s40=3sn<(#7y z>Z&okFFcMpP(hPzfsy21(4(fOH0TA9YpQXw&jx7Pj&&n03#^rPRW8s&ZD;8*;=lpc zWf&fR%Qjxqs}fQeCNkF&RdExCA5atgR^W0xu-iA%;a|YzS2-f zB3Ovj;utThi=%{7J&QkB4OiE-y2yGnNIVgR#es9}59LjpHbpy@nnQMsIHFrVSKK20 z>BES*B8k*=8z5*dDtHPLD{OeCQQ8N?Ag@{$lXt5xg<7Cd$TE69TR#u`nWPWeJ)GCy zeRFGnaDROgpzLAD%M6`3`V2=Bm;Lwl)U7V)e^zJU+m0njmqA3+IJNEyvdHkv^% z2`rV)h4QLieos59GMy&t$;RChB1Soe0B0&!@qZOW%nt*wm0a3KjCe*h>2_10 zl2TtDTR%r!2M^4iD%m$!b58NtE4;)@=z@WIn-&P8PEzoN@m0d_$<|%%zG0M<)$OT3 zL6p_pvE*g3KT-tJb)|+jInXrZw@IV(_MmG?4KIFa8F0X^96*B`OUj)6OY!lQ&|YSh0MW{gE&Ocfnib_F#C%AJC`7eyyr08J2#Zml$1#aMua8BQvrK#?UUPG zrK+MJ-@YLmcy>nM5ctYy=;1i{g&2q8lK}2&0!|wQ5eSn5O#JxumtWHS{rBL9$U`a= zC{mv6HII9cVxG|fnz4Gy`MI<_Oa@2EdUuuE5fW-C+AHWAf|J*S2G2j11#Yl0&jz z!B7aRB7JZo7&184OQADoN8F*&N8+6n_riqHxbx@J?DNR(DHmtmT77W)_U|MfAy4M< zsFk{P3#Gi2MnQQQ1XVKz(#7fxd4!7N697FLx=(iK#Q=7?tz;ZM;_QU*NO}y- zs7EL1|1t7e$}Y`Kljf=DZZKt_OQ491 zgDfH}t%qXElh_4FmQt@c@fg+W3Q8>F^N>B&!)FDx<|VRta4}bf#Gs{yVcvGjpvmge z+t+pHc?N?uj@9Vk5 zMEH$?_u5gTY9ZvMecJ%WaH#@z6%h4KF2GVgTiEvQ8eb?B4TPtGIxXN%T|c~E&zc+) zs`ooOjqH4iGHC`d`LpOFT)1H}Px}lnOiDOnj$;R@;-3C&YBI-> zC(A@>$lSb&`?;mDt0Xf+bNV?^rJ!6%mZ!cm;gKJ}xLtYu_tdkq)aaS{ML+^Lq>2?W zgXSJ4mz-c_4g!rBmG1jlm(Dj_8hJ#ClO-Gv9>U%pzZB;l2dog ztFTehz;S8;uV?{p&CO<_LbzM~qbZ~jwQ5)DtUL}1#I>lP@crmdmq9Q2$(^Nr&%&N} ziseMBeZH(EnN;1+k?ikgGHQEUp}x0#>^r;_zInYixIY!V*Y#qJ_uFU1e5pv5Y#oF=eyQLb>2}fWK;2cz#5T$9%hj!eSh0-GL|?)s5f) z!7E&t1-tVg*{)k-{OZb^LTSr$%bu`#3_tPxECS5}FwCwEx)c~bZF?(>&4v@x2V7X|M@iam5pN3XiKdjrvZ6JmANo~~Se+HHu8mUhPHSeQNbZ#Ixt zLBQkA4K=hN0;%J8g)avu*BZ%V^ zC=2A=F7Nq_%j*oEHnzHP*Q_%W-hbbVJIg8~;ClJtwa95ykL-e7*ec)6M^1fBpa2M~)u}IQ$JHS3C$O z%Tv44(s#|%5-gv-ew=p{s=_pZ>nn$1m7~!pP%AqlP#^%z{o!Z}hHy~p?tJynxsfZy z_0{s#D@$-JQS>emcz*JEY?hZjRj=H#16a(Jo<$ERwE@l<29BtzqET6SFk@%IjW66! zy`I?8FexANa{1?HvfTuZZUwhMAYt8H0`T0!BFL=}>jh&(c(Dd6z zY2azovvbtIzyLWRN+rd?F02HGm*B}1H5(L~c6Zh1x1nIslC9(-#s~bx=}T*|_3pYg zY2w5NHG*SL3+ z59Q1I(DvAbj-UQp;pK`VBqnikxU4@m&g&AZsmrcX+d1@=D)t;}i3oa!3m3Q3dXF`& zJN^Ar3J;e4ApoM~>roooU#G5J?c>7n9?9t#fidUd@WNnabwElj0|Vmz`AG%mAuX9n z{v1~Z5Y@y3nYt8gY7Un<|KfdK{UC@g^&S5%`KJ|sCI0>M)=sST4R2nacz6*xcL31S zCE$n8pFQ@6&r!_U38bieNL)ZJhg<*q9Y@Z8zW47xiZK7^B2`uTqJBr~q)Q2gj?WcZ zr3wr$Hb6So4p5K)u_p>@!@%SVboO5iv%7-EKff{V5l~@gsIIC&&*I9)z!St>JZEyd zfIPEPQx(^HXiVFwfzxOL!&3^8v0S0fLtg8R9BC>w*MGih?MvIE$g^xv0aa4e14)^o zeyg^5>#x67adG_|W!QrCYnV--?t%PB=osJOwo?<$)BAh|V#ihGMZ5EisZ^0h7 zU1#HJCUxOm6Pk|}5chY>JPA^1?PO@p$crs9#K5%qGGzgh1AG;tij$-pT z!qPx`yGGU3O%VnL&gRqt8xB`-PU1(48y5UL{kQR~C3B zcJutwfEagfLo^c+Og#=WQ&MvW=gV9(OaT!lb8J4x^pH ziKD5P<8!d!G*C_8C~?tQ{djx1i)io!kx*n{u{L>ndbTT+G1ssZD_?J1ytrn_CFCRN ztDc+e3E5C`2EE4rwb;R=jg_Sxov^$lNBg#MWZGEl46DG7)>(e_Mmfj$kRKBd$gT8Pu(72sUUDF${PzP$UYt@h3^uhi|*#c)- zSk)vBJSFL^OEJY=qsYBQz|ZVbxCNr zzkV^Di0Bf)t=wu zrW9-N(Q6cwEKa~0tb}tU1URpbS?7dRhYIj~a&yzXZIls=Ls=v8s*DK99sViQdLRCr zMz2)Dz}dSLlSdv6>zMr`s+7=i%vy;`IP5|Q7A>EhyE5hlotYA_CPj7M1m`LoSb1qm z=+u2ZYZ|V07s`XVednamIO}3Fl_hA#0(Hgazsu+#laa(dE0(q60r#cgxiYSBpx|@V*_XJHDE>x zzP7c2(i=0ioqAX6r)Q6uw7h=rL;sZ@)NxNJA7Zhqx7Vm2%pY84oT#Qok?u{*eOfLP zSwuO&ewDo+Nxh=qw9nXFft#LVIBo2Cw6#X*jOI#wUNyUthN_XkX z=6-vu)+!)C$;6RuN83Db;J*tw&1drF^Br+a*g&%{?C0KJm|#szyi^S>m#Q%6P5hXD zP(i&7ivJMxd30Am0iKiz#fzraa$ewcfX&np5Isuolfs-js z0bg4070c=%-}6~Gw$}YAXT*CZYZzfbqIx`APqF~r1ol7z<=8+f&2h~QCe1t}qg&cg z61=qn>i3x?jNUsbTnR$TO^yK(?HOvZiqgLXn*c^m;j_D5^Q8J^Qg|Qf_po1IA|P9u zHHRq2dO#8ai{;kvx%g7&1G#Teb<)h%Mo}@J>u)irl*Z1I97tTOcoXX!K`|^p#%KV3 ztw)|qZDr*dNQLKIO9_!)kSay_+}$lbhYZ%EVzVjht2!%i?D{z(wWU{b1db?`;KUH& z_ux|geEs^+zJ2?!^0)#g1uMX~=hIDbiIE3~#?^mX4G+wwea8QX29XbW?h*=9D{+?O z;k3#_6U5w;=Hlj3$BU2+sXq@|Oibkxowk;gr92BZab)M=<(0bghe8VlV28pjY4-nC~FVYHo4}l@-qKxA(x{6=GqXU{{!yr^@JJH3Q^ zRD6w`yRwggB&ro}S*^p@>;9ea9g<)YmkVS0bWs>0e;69OtmX7qZ+3}?O+SZq*b*$r zN?V`HuV=p+v(6UpSr3uyQz#NYOFXmSd1-9jAd8XrV^`VeZhT+lEYq*fnumT06TdyKsr~2h?6ghy?%gv^_nZocF3C-l6&be2lx3A|#py|q zJ3d`^ru_A!yBzpeaPm5R<$JnnOeOXWx86vd0pc%O(o}_aLXa)}=d;42&L0B!QWlz` z9x5J6xTrNUxgIG4eiZ5NCrNRvt*FqrW^Gh4{N$!&pNaoQmzPHgG`Yd*& z?P^Y-SW6GAOea*{ReEfYO>$yWZFzUwMe7(jF}VHp$oyUDEnQyV=LW_>rM+D*qKT|h zZ0Zb(x}Vh*tFg)IwP{3(cBKjEtJ0QbTBfcq^A3cr0hoz3QO=L|WxCX|)73vxy5&-h zxI!9Bldic^Y>Jozg;QSiQ+&hyj~SAjx}HtG$Vik>_@3_S?>Gmsfz@8hM>oIz7{`0{ zL+8#qsv9Q03N}dA^v<82zexWrU*lEaB@=$^H?vEHj*QUYvJZgG$ssf7$-F~ZxRxu? zx;dg&uHr2Us6&YzMZk<>A;wQE(e@P1ood47jCR)B*A|(yR^n8__!d!D{b3t>XtUuuF7guEanhLWRBnt*N`L$H; z9N4=fwLX_(q1Bh)6pcOnH-T{QGx+Fbw1h$k+3Vtpau_TqcSE7=2};3I2cTF8W&)a? zgbjFxjI5qj)F^r}zq{l~jf^;hddP^shkCf&VzVpSp5&@j3gRJA+V{d_s+G~M0}y_O z0a|yRy|0796x;5u=x&gngQHqrwPAWqHnX-Y#V`SURXoH~uG=~#vPsHY2xql~*v7_2 z&6OPpany-D3^Sd`JDLuw=yzC@T)#(S-x~dHm_^zs4_;o-Q(4*0CCD=^E9dDY0D?=v z-{oldlz$?*w^lB(PJn%cBN{4$QtSlIK>uHaB{n-T@oQN$QXZ}8xTgjvG2|9J$(VvK)PpC>= zTHmbY-gjK^r!6p4^>byPc9xwOLg;Rvfh%l>glk^5xI#8?BjWpQ0el99gYZiV27TS6 zz54NN(9K}=I{Z`Rk$ulnj&7~#6qhiI8d|Y5)}i>vj3a61aF%wDp*f(q@vC2+jr;h1 z4N62;b%b-j)cU?vSdX1;-_b@;aG^Zs#gqDUsbw+;-FqF6^o53XVZ2wCwEj=l--{%-43 zv$XEDRT{XYXd~HRn{DYJRL`w7FhaX4WP?6s21Yl0_5u0eZY zFOKOnX@7dF2`r%Y88H?V^opgNQhthepXM^Eej4jfv9#hvo?wE8yDaC;8Bi7*sKJDxhgaIDeVY8)B$v&cse zi*hz;wC?V*Y(h<#IuX;(5e3Kf=HQB>&lpKUN5M#IIsaPQQI7@xXVa+Ur56zzw=Eu1 zVR`5@xi8htz2`oB?U7pJtE7K~P2>t&s+Cn;xomBKfnfhfT0U=10C2JBj9ke<7k4Dd zfcQk{BwQ-DJ!cywO;#uX6G0>VPkJp--DIkyIpD@tH>ZyI;enLsCoxD&f4k~nK77-1AcAnQf|9`) z^ORPyFolQ^f$FBOS8woZA)x_*H6?Oi&{H2-a}y)2E|73NEJ(B>#aV$dyC6U98LRu zIdn=~bK+11@?j9T7dNwTDJA}CkjqMxOu+~SAJpp`wf40or{=RW*7B@UD?woSY-%Y} zIiE$1ur|;p*)#9yNHU8{!0?u3aV2ce=LOy~8}l8|!h(`2AJ>C^4(|U3Hl@APw2Z0Q zT?%*my86zEEtD|bJ6By3QGKYZr)7WKu!7<6*d0e6NLXuA?|NtRnIpb|n%h(T4)4u+ zqH=#<1~F|ptb#OMVZ2a&{#F!vZUVh+o&Fr)y%{i`D>jjGI{iqsZZUYIfulm7k=@nH zXL;&oRZJu($PEDn48`8yBDt5REn`&WSh=6|krgb!Gr|jou%_!j#Gz{O&g)$~Je&BZ)Fw7!aNVsrGYzyK zY!aptvmoD;_QY|hO|orKMx4 zG_i(2EM*Nt8E5nNxzyUHA!jf5GWc|f(?3)D>u!i$Q}hQApmN0q?Abd`?S2`Wz}!r! z%YZTQV-6fzAoe>CB4a@EP-1dw6N0#omrcDfh*;(=?CCMa9O@}@&^1a1I-*UbD5k#F z+4LdNCu;3c$SlFU?^mtA(P^OR^l#g%+(8*WPMuS%R3VipV#@)sjp2fn_7|V1Pk`S0 zU#41N87E$E5n`9psk?fLRLHcCP*i0&E?##A-%T|Kv)*CHF18*@72i^8vmY?Vm{CTX zq2})IXUL&;HdKaPoM_ZFgxI~Ibk;+>Y^hB!0^E_X%MLPQH}|!Y4epsR!~pR#LIMSG z>Z$?BKbOkeVXsj3GS^4^?9_qQzn!!FrbEwgUQ~LV5x+~Y7_Lg8@yYFhbC?Rm*11Vl zot%6vwn|V3WmTb){iHbr7%{PW(IO^{w2+`IK-k~BAT8+t0PxG$Ljgq09#p^iDdaxL z|16ApuR8@9=|_`)da)_q1td#r!lso_i4R9Ga8@3xY%Cat6&9Wbzt+{85nUBa9fwSg z-g@Tn!PVYNDVroDN*-m+i&+D-Q$UE9!8Ukd%0smX4yND`>C+T$G2Ln&M`G;~yFhv+ zc+eQG+6?FXt|>31tJwKxT}w5n3~7HA#zp++_mg$2*TfV5%ac3`L$!d5W)ySLuFuWI z;*f6j2?i&2OO!c^pT4vVVC^VRS6NvzpuJM-_BSpM*%7ryS4DDZSIXS5AiB_`PtvV( ztieUCb$ya&1nNY$m_=Ew2E&I`Dm)igA6*h}5tC$p6UYo@OT)ZHFzt0vL94H%`;3Y4 z!7S`+#@8a@a_;a9Rrb zn8_opRKld}<_?@YRIgi=Xt(Fhx}&c#r7(O# zYKD~n46&jRzkaA_S3_=*w(^Q$pF)TCn_ZtHdi@Oek-_Rz=6h0S_R^<&NYlaH{6tKD z#kshpw=D7?982rfwu)mqt~VcY6kg%p8CJ1W}2oN793qCYU;N9ngP+VN_H z8Xc_Z#TR06tz2pbR7y+b)?28Vk1sV}kuB9odziFnJp2>R=@}u3DO%bF8S8OA2!qBw z!=B3604*cy4F|GbF^xZu5NlVzLxnrsSs|tpz8>}ew_X@TijRpK%vM6J`x(jf%7tdg z9its7E)`RQ(_FSvm2{0kPRUoR546&Fsm!Ry?MH*brHmDuV`*urvh33a{Ql!- zWZi24-?lG(DRqF$q3_6l0-vF1j-^I2U@ny6Q%fboYOvBCor)W+qz42@ptARsH5_TX zQdQjzk1u&?8L&wJ#pmV)<8d9MqQ_yqiz5+6D~BXoIPYP|=`M}`+>dkVQ&2O%x!A4J zMO`0&w04GyTJ7F77Du1LhfBL9zo!_>6NEf#oY&a$_`Kij_;)!@Wnhi;t-JiiQA*0& zipTF~-`W)=btTgr-yS>&}V{b6iVJ?*WSq0l_P~^yv<%e3?RyW1hV^q2=ib4F> zFX+;GheEej>jPxR;>cbMH@wz9V_OY^*}|gQx;oY8F+9kGKeYg0XJK%jI|n>{RIL@e z_Ne?tYUH%tT^>5Ov$)XE^+5Q$ze}uMcAvAZ^JPfN6>!Yz_p@oe9;_^8Q2IM7SFdkB z>;N;PQ;QD+(O50t5G!>0a#);dKmQx{q7NRxgxcN0*=`LP=VD(wXcg46vPg z_N<-lw7;^#6Sye;i}Du30*4%5udUgWmhZ9vL44P{;^>u!%3Y+F7>9F7Qqmv*$?Xfe z)DXNKtN+Imx>6}4g5B?}p9acgmclOUi~D$mVqv-%_nw^Xd}HGe8fvY(7#8vP z``eD&&@i2Tb?Hb{xt(=+aOUDf_JLzr+}()7vS8`;kxAFn=4klP`IXwlBSq5seUYAe%}w%!`E_Sta|mw!d_3~5INvFK<%0a_!I+7}#E zpRaQUL|;%N^&OLB{h~P#+yZ{bT`Ps+w|myTW(rE_$8bycWXu^Z_Y+8A+I~k{8)W>o z`O9xxw3(Vby7W)iu`;VzhdFF*O0%fN=?rP?BcEs&{KlsZzx{ zpSqMX>8>b#m}#wg)S>OH$T__uz4$~fVkS?r^uY75B^12U6(z(JFH2ix6^lpt+kt}i z%+u-QH$mrBJOmq^9+bV~2e2a*WtMX>Hp@%k2>(aoWEP_tRD#OQThm!WBIlwAU49xZRcVbOaSdL|3~yzPlHH|TB+dsJLUKwu2V1?9-TVpM5NhgbQo(`+tob% z+p97B)Mth=0EOZh8zLA7vJB%AJt*^62VmCR%d5}OuNuKDSPM|#g}7R-kp_W*-$rds zf!#4IBORhi2v+nUbjXN?7-MJVjDiDG9Gq^yD zkDN_uZ^jxT?Xj58neK(oK%%O2cb>~a~bQvZ;^IS zYI|Fg-Go!=#h}{R|8RU?vRSlKc}ev;@>9&BcT`WlSO!J!_M@d#Y~pOeZX07dz`hPo zKC`%1P6|Py7ITwrkGPH$)kqxkSZG2r8y;Lr-amN%&W3~cc42cA+qSeR$W~(s3cskO~?3cp#(qWZ%^pDfBfJjK14Xmd{)WHl0cWELN1a6Timj0b0!m$^LXQq zJJDYav0jLtG0ZdP*OZhL3hYPD5iMCk;kTAS+@CUpMIk!RNZ50hry&E5#FoKe!3Z&} z!zoA)qcA?$x?yLlS=J)hBN_#R!H;yhV)Z9%ryQ%{^0M^GkE}(!p=pn1%4y?s@Nt=; zsUYSWi0%%opjsFrefr{XA=2I6`JiKeqi`az35T=Vsk~P%sN(n>;itiKQAXJ5UDxrVsHRuOm!WON-^WzaDo?T`Hm+ zrO@I%tizUO0=2)=g6K5|bsy&RHI39tGKMjj&zH<13Kcw&F}ynZQ?Tgq&@>d5ej-Fv zY?MH?)3jzSXiLmbqLC3G&^%kqIWUwrW*Bo3Ie%s=<7z`wD$gM@2wa6x$a|^r(Hh#{ z4D1>9iuTe2R1nLwCjCXtBzlGn0@$HxMB~;X4-|Aw&ag=RqfV4-p0FqutnWTDP#RIg z6u2f$0P~YQtz_;2Bv89!)pr1t@D0KanZFN;WH}|#Z?*JILI9pOXrn!vQ>lDhsY|;Q z3W%E<<6vL!IgD+E?xfFhnT`JH%i#CEOnyX>EEKHG6K0p*e(|26iQ~ipTaAJizJ6{C z8Arg7YE2|%$*q+3Qld)(KhIHk7e>Gy=YLhdO^JLPoW3?%j#H6wFT}WI zE`fZsJ;xEdrJtuwe`9l_g-32`Te0V{0j(PI{&Sc%M{}6~VOV;a=>S5FVf9uiiS5fa z>ZZHl65-NG>+qEQ(&Pmez8e(B1L~%|`n1We%W%4h5!A8sMA=A{aa;Pi(y< ztOjysezomjLch4j2r|!Zk2Itxl5uW@P%8dd1QRB;4D28 zS{ed+mL3oS50@B&{4U38iLZ%bX)mm#+RJV?{ETIR&2OKiwPkKW5T6avwntJtsQ z%V_dH`iH`ymALSq0CstQY7_zL6fO-!0pj?jS9j%gOO>8LycqB=s^oQ^u(nWxdn`X%t}b5|h%l zM~u~;NbWabDF^xlfGkRe5uif{M0keMi6{i5?(oG@#!ZN0n=XFCZ{ddAh0iZa9M=aB* zg$(R?aqFP3zW&-C=0ZN#@ax~uE`#AXS~G37jZ*KPQ@B*Mc_A~@d0^C*X{Up5Qzw^dsMXNp!Yu6wzV zarNV6f(#4Em)E=P+kNy#SRm<`Q6Yny_iKNV=NY3QFR1TI{O+2z$sd40DY`Lw6L z>V4|g+pNJxURDy787FdwP_dRF8S@j@84GYDUkOIQ6flsXb9{^`1eht3WL9@|XHq`y z0evy5NOv5zF8ML^AgQ4+4k>;Ay_6fIof#riVgV3TDDyXJjqd*>v_6?lz_5^Ra7$_j@!!8(_tobN`0ro#Z0X(l_s{ZQ z-qVi%gV+E4hX+6W???XoH2(Kl{`)Kx6#jP+{nyj@-`(?H4}*~;A37X6d0p4~%N|DZ P_xe+eCPz$M_@Dm+xVMqi diff --git a/tools/post_processing_grid/post_processing_grid.py b/tools/post_processing_grid/post_processing_grid.py index 7684e4aa3..bb860c1d0 100644 --- a/tools/post_processing_grid/post_processing_grid.py +++ b/tools/post_processing_grid/post_processing_grid.py @@ -2,14 +2,11 @@ from pathlib import Path import pandas as pd import toml -import seaborn as sns -import matplotlib.pyplot as plt import re import numpy as np -import matplotlib.colors as mcolors -import matplotlib.cm as cm -from matplotlib.ticker import FuncFormatter import csv +import ast +from typing import Tuple, Dict, List, Any def load_grid_cases(grid_dir: Path): @@ -75,83 +72,6 @@ def load_grid_cases(grid_dir: Path): print('-----------------------------------------------------------') return combined_data -def plot_grid_status(cases_data, plot_dir: Path, status_colors: dict = None): - """ - Plot the status of simulation from the PROTEUS grid. - - Parameters - ---------- - cases_data : list - List of dictionaries containing the status of all simulation from the grid. - - plot_dir : Path - Path to the plots directory - - status_colors : dict, optional - A dictionary mapping statuses to specific colors. If None, a default palette is used. - - Returns - ------- - Plot saved to the specified directory. - """ - - # Extract and clean statuses - statuses = [case.get('status', 'unknown') or 'unknown' for case in cases_data] - status_counts = pd.Series(statuses).value_counts().sort_values(ascending=False) - # print("Unique statuses found:", pd.Series(statuses).unique()) - - # Set colors for the bars - if status_colors: - palette = {str(status): status_colors.get(str(status), 'gray') for status in status_counts.index} - else: - palette = sns.color_palette("Accent", len(status_counts)) - palette = dict(zip(status_counts.index, palette)) - - # Prepare dataframe for plotting - plot_df = pd.DataFrame({ - 'Status': status_counts.index, - 'Count': status_counts.values - }) - - #sns.set(style="white") - plt.figure(figsize=(10, 7)) - ax = sns.barplot( - data=plot_df, - x='Status', - y='Count', - hue='Status', # required to apply the palette - palette=palette, - dodge=False, - edgecolor='black' # edge color added here - ) - - # Remove legend if it was created - if ax.legend_: - ax.legend_.remove() - - # Add text on top of bars - total_simulations = len(cases_data) - for i, count in enumerate(status_counts.values): - percentage = (count / total_simulations) * 100 - ax.text( - i, count + 1, - f"{count} ({percentage:.1f}%)", - ha='center', va='bottom', fontsize=10 - ) - - - plt.title(f"Total number of simulations: {total_simulations}", fontsize=16) - plt.xlabel("Simulation status", fontsize=16) - plt.ylabel("Number of simulations", fontsize=16) - plt.yticks(fontsize=12) - plt.xticks(fontsize=12) - plt.tight_layout() - output_path = plot_dir+'grid_status_summary.png' - plt.savefig(output_path, dpi=300) - plt.close() - - print(f"Plot grid_status_summary.png saved to {output_path}") - def get_grid_parameters(grid_dir: str): """ Extracts grid parameters names and values from the manager.log file @@ -166,46 +86,51 @@ def get_grid_parameters(grid_dir: str): dict A dictionary where each key is the parameter name and the value is a list of parameter values. """ - # Dictionary to store the parameters and their values - parameters_dict = {} - - # Open and read the log file log_file = os.path.join(grid_dir, 'manager.log') + + if not os.path.exists(log_file): + print(f"Error: manager.log not found at {log_file}") + return {}, {} + with open(log_file, 'r') as file: lines = file.readlines() - # Regular expression patterns to find the relevant lines - param_pattern = re.compile(r'-- dimension: (.+)') - value_pattern = re.compile(r'values\s+:\s+\[([^\]]+)\]') + param_grid = {} + case_params = {} - current_dimension = None + dimension_pattern = re.compile(r"parameter:\s*(\S+)") + values_pattern = re.compile(r"values\s*:\s*\[(.*?)\]") + case_line_pattern = re.compile(r"\]\s+(\d+):\s+(\{.*\})") # Fixed pattern + + current_param = None for line in lines: - # Look for dimension lines to start processing the parameters - dimension_match = param_pattern.search(line) - if dimension_match: - current_dimension = dimension_match.group(1).strip() - - # If we have found a dimension, look for the values line - if current_dimension and 'values' in line: - value_match = value_pattern.search(line) - if value_match: - # Extract the values from the line and convert them into a list of floats or strings - values_str = value_match.group(1).strip() - values = [eval(value.strip()) for value in values_str.split(',')] - - # Store the values under the current dimension - parameters_dict[current_dimension] = values - current_dimension = None # Reset current dimension after processing - - # Print the extracted parameters - print('-----------------------------------------------------------') - print("Extracted Parameters:") - print("-----------------------------------------------------------") - for param, values in parameters_dict.items(): - print(f"{param}: {values}") - print("-----------------------------------------------------------") - return parameters_dict + line = line.strip() + + dim_match = dimension_pattern.search(line) + if dim_match: + current_param = dim_match.group(1) + continue + + val_match = values_pattern.search(line) + if val_match and current_param: + try: + val_list = ast.literal_eval(f"[{val_match.group(1)}]") + param_grid[current_param] = val_list + current_param = None + except Exception as e: + print(f"Error parsing values for {current_param}: {e}") + + case_match = case_line_pattern.search(line) + if case_match: + case_num = int(case_match.group(1)) + case_dict_str = case_match.group(2) + try: + case_params[case_num] = ast.literal_eval(case_dict_str) + except Exception as e: + print(f"Error parsing case {case_num}: {e}") + + return param_grid, case_params def extract_grid_output(cases_data, parameter_name): """ @@ -314,7 +239,7 @@ def extract_solidification_time(cases_data, phi_crit): return solidification_times -def save_grid_data_to_csv(grid_name, cases_data, grid_parameters, extracted_value, output_to_extract, phi_crit, output_dir: Path): +def save_grid_data_to_csv(grid_name, cases_data, grid_parameters, case_params, extracted_value, output_to_extract, phi_crit, output_dir: Path): """ Save all simulation information (status, grid parameters, output values, solidification times) into CSV files for later analysis (doing plots). @@ -330,6 +255,9 @@ def save_grid_data_to_csv(grid_name, cases_data, grid_parameters, extracted_valu grid_parameters : dict Dictionary containing the grid parameters. + case_params : Dict[int, Dict[str, Any]] + Dictionary containing each case number with the name and values of the tested parameters in this grid. + extracted_value : dict Dictionary containing the extracted output values for each parameter. @@ -342,20 +270,20 @@ def save_grid_data_to_csv(grid_name, cases_data, grid_parameters, extracted_valu output_dir : Path Directory where the CSV file will be saved. """ - # Ensure the output directory exists output_dir = Path(output_dir) output_dir.mkdir(parents=True, exist_ok=True) - # Path and name to save the CSV file - csv_file = output_dir / f"{grid_name}_extracted_data.csv" + # CSV file path + csv_file = output_dir / f"{grid_name}_extracted_data.csv" with open(csv_file, 'w', newline='') as csvfile: writer = csv.writer(csvfile) - # Write the header + # Header block writer.writerow(["#############################################################################################################"]) writer.writerow([f"Grid name: {grid_name}"]) writer.writerow([f"Total number of cases: {len(cases_data)}"]) + writer.writerow([f"Dimension of the grid: {len(grid_parameters)}"]) writer.writerow([f"phi_crit: {phi_crit}"]) writer.writerow(["----------------------------------------------------------"]) writer.writerow([" Grid Parameters"]) @@ -366,33 +294,43 @@ def save_grid_data_to_csv(grid_name, cases_data, grid_parameters, extracted_valu values_str = f"[{', '.join(map(str, values))}]" writer.writerow([f"{aligned_param}: {values_str}"]) writer.writerow(["----------------------------------------------------------"]) - writer.writerow(["This file contains the following columns:"]) - writer.writerow([f"| Case number | Status | {' | '.join(extracted_value.keys())} |"]) + writer.writerow(["Extracted output values:" f"[{', '.join(extracted_value.keys())}]"]) + writer.writerow(["----------------------------------------------------------"]) + writer.writerow([f"| Case number | Status | {' | '.join(grid_parameters.keys())} | {' | '.join(extracted_value.keys())} |"]) writer.writerow(["#############################################################################################################"]) writer.writerow([]) - # Write the data for each case of the grid - statuses = [case.get('status', 'unknown') or 'unknown' for case in cases_data] + # CSV table header + writer.writerow(["Case number", "Status"] + list(grid_parameters.keys()) + list(extracted_value.keys())) + + # Data rows for case_index, case_data in enumerate(cases_data): - row = [case_index, f"'{statuses[case_index]}'"] + status = case_data.get('status', 'unknown') or 'unknown' + row = [case_index, f"'{status}'"] + + # Use case_params (from get_grid_parameters) to pull values for each param + case_param_values = case_params.get(case_index, {}) + + for param in grid_parameters.keys(): + row.append(case_param_values.get(param, 'NA')) + + # Add extracted output values for param in extracted_value.keys(): - row.append(extracted_value[param][case_index]) + value_list = extracted_value.get(param, []) + row.append(value_list[case_index] if case_index < len(value_list) else 'NA') + writer.writerow(row) - print(f"Data has been successfully saved to {csv_file}.") + print(f"Extracted data has been successfully saved to {csv_file}.") print('-----------------------------------------------------------') - if __name__ == '__main__': - # Paths to the grid and the plot folder + # Paths to the grid folder grid_name = 'escape_grid_4_params_Pxuv_a_epsilon_fO2' - grid_path = '/home2/p315557/outputs_Norma2/good_grids/' + grid_name + '/' - plots_path = '/home2/p315557/PROTEUS/tools/post_processing_grid/plots/' + grid_name + '/' - data_dir = '/home2/p315557/PROTEUS/tools/post_processing_grid/processed_data/' + grid_name + '/' - + grid_path = f'/home2/p315557/outputs_Norma2/good_grids/{grid_name}/' + data_dir = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_processed_data/{grid_name}/' # User choose the parameters to post-process the grid - plot=True # True or False output_to_extract = ['esc_rate_total', # List of output values to extract from the runtime_helpfile 'Phi_global', 'P_surf', @@ -402,16 +340,14 @@ def save_grid_data_to_csv(grid_name, cases_data, grid_parameters, extracted_valu # Post-processing the grid cases_data = load_grid_cases(grid_path) # Load all simulation cases - if plot: - plot_grid_status(cases_data, plots_path) # Plot the summary histogram of the grid status - grid_parameters = get_grid_parameters(grid_path) # Extract grid parameters + grid_parameters, case_init_param = get_grid_parameters(grid_path) # Extract grid parameters for param in output_to_extract: - extracted_value[param] = extract_grid_output(cases_data, param) # Extract output values + extracted_value[param] = extract_grid_output(cases_data, param) # Extract output values solidification_times = extract_solidification_time(cases_data, phi_crit) # Extract the solidification time extracted_value['solidification_time'] = solidification_times # Save all the extracted data to a CSV file - save_grid_data_to_csv(grid_name, cases_data, grid_parameters, extracted_value, solidification_times, phi_crit, data_dir) + save_grid_data_to_csv(grid_name, cases_data, grid_parameters, case_init_param, extracted_value, solidification_times, phi_crit, data_dir) print('-----------------------------------------------------------') print("Post-processing completed. Let's do some plots !") diff --git a/tools/post_processing_grid/processed_data/escape_grid_4_params_Pxuv_a_epsilon_fO2/escape_grid_4_params_Pxuv_a_epsilon_fO2_extracted_data.csv b/tools/post_processing_grid/processed_data/escape_grid_4_params_Pxuv_a_epsilon_fO2/escape_grid_4_params_Pxuv_a_epsilon_fO2_extracted_data.csv deleted file mode 100644 index 2df4557f2..000000000 --- a/tools/post_processing_grid/processed_data/escape_grid_4_params_Pxuv_a_epsilon_fO2/escape_grid_4_params_Pxuv_a_epsilon_fO2_extracted_data.csv +++ /dev/null @@ -1,541 +0,0 @@ -############################################################################################################# -Grid name: escape_grid_4_params_Pxuv_a_epsilon_fO2 -Total number of cases: 525 -phi_crit: 0.005 ----------------------------------------------------------- - Grid Parameters ----------------------------------------------------------- -"Redox state : [-6, -4, -2, 0, 2, 4, 6]" -"Pxuv : [1e-05, 0.0001, 0.001, 0.01, 0.1]" -"efficiency : [0.1, 0.3, 0.5, 0.7, 1.0]" -"semimajoraxis: [0.1, 1.0, 1.5]" ----------------------------------------------------------- -This file contains the following columns: -| Case number | Status | esc_rate_total | Phi_global | P_surf | atm_kg_per_mol | solidification_time | -############################################################################################################# - -0,'10 Completed (solidified)',67703.72,0.004906388,498.4496,0.004871988,360243.0 -1,'25 Error (died)',10861850.0,0.9488592,472.4397,0.005472131,nan -2,'10 Completed (solidified)',29194.38,0.004841147,498.4835,0.004871882,336653.0 -3,'10 Completed (solidified)',203138.9,0.004995163,498.2177,0.004871985,360018.0 -4,'25 Error (died)',72574700.0,0.929459,353.3335,0.005614977,nan -5,'10 Completed (solidified)',145825.2,0.001995689,498.6418,0.00487306,337353.0 -6,'10 Completed (solidified)',338514.5,0.004965492,497.9852,0.004871975,359911.0 -7,'10 Completed (solidified)',204370.6,0.004841626,498.2091,0.004871895,336598.0 -8,'10 Completed (solidified)',87634.38,0.004907217,498.4127,0.00487195,336537.0 -9,'10 Completed (solidified)',676116.1,0.00204669,497.7406,0.004873107,360814.0 -10,'25 Error (died)',51757670.0,0.9287383,352.2479,0.005615245,nan -11,'10 Completed (solidified)',291979.3,0.004854271,498.0726,0.004871904,336516.0 -12,'25 Error (died)',31047200.0,0.9301765,360.4474,0.005602393,nan -13,'10 Completed (solidified)',473930.3,0.004911005,497.7644,0.004872003,360223.0 -14,'10 Completed (solidified)',28384.19,0.004892344,498.5124,0.004871967,336554.0 -15,'25 Error (died)',103600100.0,0.9295238,350.6526,0.005620765,nan -16,'10 Completed (solidified)',195985.4,0.004977072,498.2225,0.004871963,359873.0 -17,'25 Error (died)',45889640.0,0.9273917,347.1285,0.005622346,nan -18,'10 Completed (solidified)',326606.8,0.004938631,498.001,0.004871962,359834.0 -19,'25 Error (died)',9639414.0,0.9504914,486.1506,0.005459048,nan -20,'25 Error (died)',64554400.0,0.9295432,354.4608,0.005612868,nan -21,'10 Completed (solidified)',65334.27,0.004977864,498.4477,0.004871976,360257.0 -22,'25 Error (died)',92478120.0,0.9312607,359.3701,0.005607573,nan -23,'25 Error (died)',27585030.0,0.9301442,360.6329,0.005601931,nan -24,'10 Completed (solidified)',142114.3,0.004984716,498.2326,0.004871669,336325.0 -25,'10 Completed (solidified)',85016.01,0.002000602,498.7392,0.004873061,337298.0 -26,'25 Error (died)',8591862.0,0.9514668,494.8517,0.005450983,nan -27,'10 Completed (solidified)',27577.01,0.004861797,498.4816,0.004871814,336639.0 -28,'10 Completed (solidified)',457597.7,0.004984443,497.8171,0.004872081,359926.0 -29,'10 Completed (solidified)',198417.5,0.002036447,498.565,0.004873078,337419.0 -30,'10 Completed (solidified)',283490.0,0.002053718,498.4335,0.004873089,337300.0 -31,'25 Error (died)',24619290.0,0.9279063,351.2857,0.005614975,nan -32,'10 Completed (solidified)',653519.0,0.00491445,497.4775,0.004872056,360122.0 -33,'10 Completed (solidified)',63086.14,0.004989442,498.452,0.004871975,360061.0 -34,'10 Completed (solidified)',189369.4,0.004981859,498.2731,0.004872085,360006.0 -35,'25 Error (died)',41199100.0,0.9293393,355.839,0.005609464,nan -36,'25 Error (died)',57651310.0,0.9277621,347.5743,0.005622412,nan -37,'10 Completed (solidified)',82716.26,0.004841894,498.3983,0.004871881,336651.0 -38,'10 Completed (solidified)',315341.7,0.00499652,498.0089,0.004871932,360229.0 -39,'10 Completed (solidified)',137839.9,0.004825349,498.3056,0.004871866,336473.0 -40,'10 Completed (solidified)',440898.4,0.004835041,497.7451,0.004871772,359952.0 -41,'10 Completed (solidified)',193097.9,0.004891533,498.2431,0.004871946,336480.0 -42,'10 Completed (solidified)',631391.1,0.004997609,497.54,0.004872141,360081.0 -43,'25 Error (died)',7725990.0,0.9520145,499.8614,0.005446439,nan -44,'10 Completed (solidified)',61023.32,0.004967416,498.4514,0.004871958,360135.0 -45,'10 Completed (solidified)',26836.69,0.004827417,498.483,0.00487187,336573.0 -46,'10 Completed (solidified)',80567.01,0.004910122,498.4264,0.004871967,336551.0 -47,'25 Error (died)',82541550.0,0.9291519,351.0675,0.005618861,nan -48,'10 Completed (solidified)',275596.2,0.004784428,498.077,0.004871839,336537.0 -49,'10 Completed (solidified)',183225.5,0.00499333,498.2917,0.004872124,359917.0 -50,'25 Error (died)',51965660.0,0.9294277,355.1927,0.005611039,nan -51,'10 Completed (solidified)',427317.1,0.004922584,497.8596,0.004872055,359951.0 -52,'25 Error (died)',22166160.0,0.9295618,358.585,0.005604468,nan -53,'10 Completed (solidified)',610043.1,0.004980424,497.5049,0.004871925,360104.0 -54,'25 Error (died)',37118360.0,0.9301077,359.5539,0.005604008,nan -55,'10 Completed (solidified)',305132.1,0.004983641,498.0408,0.004871976,359874.0 -56,'25 Error (died)',74314770.0,0.929005,351.2263,0.005618121,nan -57,'10 Completed (solidified)',26103.85,0.004817895,498.481,0.004871858,336557.0 -58,'10 Completed (solidified)',187700.5,0.001995293,498.5761,0.004873067,337336.0 -59,'10 Completed (solidified)',134417.3,0.004902077,498.3981,0.004872136,336759.0 -60,'25 Error (died)',20189460.0,0.9349988,384.0395,0.005570524,nan -61,'10 Completed (solidified)',268564.6,0.004878108,498.1333,0.004871977,336434.0 -62,'25 Error (died)',6959020.0,0.9527038,506.2141,0.005440801,nan -63,'25 Error (died)',33380220.0,0.9283625,352.3951,0.005613908,nan -64,'10 Completed (solidified)',58981.55,0.004946749,498.45,0.004871947,360448.0 -65,'25 Error (died)',47069170.0,0.9297849,357.1815,0.005607939,nan -66,'10 Completed (solidified)',176873.7,0.004955614,498.2265,0.004871879,360321.0 -67,'10 Completed (solidified)',78333.43,0.004852214,498.4092,0.004871897,336637.0 -68,'10 Completed (solidified)',295102.3,0.004976322,498.0847,0.004872062,360265.0 -69,'10 Completed (solidified)',261227.4,0.004898281,498.1373,0.004871956,336429.0 -70,'10 Completed (solidified)',182842.3,0.004893463,498.2569,0.004871941,336549.0 -71,'25 Error (died)',7687754.0,0.99306,297.7589,0.008192613,nan -72,'10 Completed (solidified)',413206.4,0.004979314,497.8924,0.00487208,359960.0 -73,'25 Error (died)',67322020.0,0.9298588,355.5548,0.005611483,nan -74,'10 Completed (solidified)',130466.9,0.002053214,498.675,0.004873081,337532.0 -75,'10 Completed (solidified)',66245.88,0.004940888,518.5295,0.005025867,2510533.0 -76,'10 Completed (solidified)',589982.1,0.004924449,497.5703,0.004872022,360187.0 -77,'25 Error (died)',27361000.0,0.588702,25.48924,0.01492393,nan -78,'10 Completed (solidified)',28768.78,0.004999425,519.2182,0.00502629,1643457.0 -79,'25 Error (died)',17138840.0,0.8138609,51.71766,0.01368148,nan -80,'10 Completed (solidified)',198848.4,0.004777559,516.8097,0.005025238,2506708.0 -81,'10 Completed (solidified)',463659.6,0.004909083,513.9949,0.005026299,2490477.0 -82,'10 Completed (solidified)',86169.24,0.004923146,518.9528,0.005026685,1646321.0 -83,'25 Error (died)',54745240.0,0.67268,28.95581,0.01547419,nan -84,'10 Completed (solidified)',331048.6,0.004811223,515.3144,0.005025484,2497540.0 -85,'25 Error (died)',38287730.0,0.6234554,26.75746,0.01520114,nan -86,'10 Completed (solidified)',143627.9,0.004781931,517.9856,0.005025047,1642937.0 -87,'25 Error (died)',7361420.0,0.9956705,347.6857,0.007926599,nan -88,'10 Completed (solidified)',201285.4,0.004760575,517.6623,0.005025363,1644929.0 -89,'25 Error (died)',17300130.0,0.9056885,78.74175,0.01212733,nan -90,'10 Completed (solidified)',662338.3,0.004972194,511.9385,0.005027177,2483646.0 -91,'10 Completed (solidified)',287598.9,0.004965348,517.2125,0.005026285,1640685.0 -92,'10 Completed (solidified)',63975.53,0.004932037,519.2157,0.005027694,2518724.0 -93,'10 Completed (solidified)',83907.79,0.004929336,518.627,0.005025754,1645508.0 -94,'25 Error (died)',37069020.0,0.6220283,26.78364,0.01517222,nan -95,'10 Completed (solidified)',139802.9,0.004919013,519.249,0.005028701,1647730.0 -96,'25 Error (died)',26460280.0,0.5797975,24.98092,0.01489373,nan -97,'10 Completed (solidified)',27923.66,0.004922845,519.3944,0.005026643,1646244.0 -98,'10 Completed (solidified)',191995.9,0.004922239,517.0608,0.005025934,2499800.0 -99,'10 Completed (solidified)',195622.2,0.004922123,517.7767,0.005025824,1645032.0 -100,'10 Completed (solidified)',319775.6,0.004912448,515.5957,0.005026079,2494834.0 -101,'10 Completed (solidified)',279188.4,0.004916893,517.4807,0.005026813,1643526.0 -102,'25 Error (died)',52935430.0,0.645002,26.93298,0.01550197,nan -103,'10 Completed (solidified)',639325.0,0.004934669,512.4593,0.005028068,2499867.0 -104,'10 Completed (solidified)',447685.0,0.004960772,514.2615,0.005026704,2497176.0 -105,'25 Error (died)',6897345.0,0.9963418,364.8108,0.007849838,nan -106,'10 Completed (solidified)',61868.34,0.004920619,519.053,0.005027163,2521499.0 -107,'25 Error (died)',16013430.0,0.8297413,55.24699,0.01343596,nan -108,'10 Completed (solidified)',185578.3,0.004857488,516.9909,0.005025465,2500237.0 -109,'10 Completed (solidified)',27177.56,0.004922003,519.3047,0.005026381,1647717.0 -110,'25 Error (died)',25649530.0,0.5948271,26.11658,0.01487996,nan -111,'10 Completed (solidified)',136075.4,0.004902185,519.1891,0.005028432,1646688.0 -112,'10 Completed (solidified)',81713.31,0.00495737,518.7098,0.00502597,1646206.0 -113,'10 Completed (solidified)',309366.6,0.004764858,515.578,0.005025519,2502079.0 -114,'25 Error (died)',35836060.0,0.6016311,25.46377,0.01514743,nan -115,'10 Completed (solidified)',190521.7,0.004856817,517.678,0.005025357,1644973.0 -116,'25 Error (died)',51271660.0,0.6456139,27.11219,0.01547038,nan -117,'10 Completed (solidified)',433139.3,0.00487856,514.2132,0.005026031,2498875.0 -118,'10 Completed (solidified)',272113.4,0.00491518,518.1378,0.00502852,1645396.0 -119,'25 Error (died)',6475586.0,0.9969941,384.3886,0.007769235,nan -120,'10 Completed (solidified)',59801.57,0.004850063,518.4074,0.005025206,2505176.0 -121,'10 Completed (solidified)',619260.7,0.004926559,512.2339,0.00502672,2489488.0 -122,'10 Completed (solidified)',26486.14,0.004914318,519.0187,0.005025572,1646116.0 -123,'10 Completed (solidified)',179368.0,0.004810605,516.9916,0.005025229,2504555.0 -124,'25 Error (died)',15957080.0,0.9089323,80.449,0.01204281,nan -125,'10 Completed (solidified)',79336.75,0.00478303,518.4546,0.005024975,1643316.0 -126,'10 Completed (solidified)',185059.7,0.004916501,518.1162,0.005026554,1644506.0 -127,'10 Completed (solidified)',132203.3,0.004921325,518.5798,0.005026685,1646051.0 -128,'25 Error (died)',34686270.0,0.599228,25.41568,0.0151188,nan -129,'25 Error (died)',24801250.0,0.5441033,22.83689,0.0147991,nan -130,'10 Completed (solidified)',418442.4,0.004801294,514.2764,0.005025613,2488914.0 -131,'25 Error (died)',49732510.0,0.6484737,27.48453,0.0154303,nan -132,'10 Completed (solidified)',299032.6,0.004901987,515.7688,0.005025969,2501291.0 -133,'10 Completed (solidified)',597881.7,0.004841836,512.2841,0.005026088,2483515.0 -134,'10 Completed (solidified)',264584.0,0.004843391,517.0946,0.005025364,1644180.0 -135,'10 Completed (solidified)',57877.69,0.004796431,518.3761,0.005024988,2512241.0 -136,'10 Completed (solidified)',173418.9,0.004913228,517.4552,0.005026502,2510491.0 -137,'25 Error (died)',6081025.0,0.9975954,406.8123,0.007685172,nan -138,'10 Completed (solidified)',128884.3,0.004925041,518.27,0.005025773,1646205.0 -139,'10 Completed (solidified)',25758.27,0.004926754,519.0519,0.005025668,1645437.0 -140,'25 Error (died)',24314510.0,0.7533137,39.87145,0.01462918,nan -141,'25 Error (died)',15599180.0,0.9340547,96.40169,0.01133938,nan -142,'25 Error (died)',33627120.0,0.5949858,25.23905,0.01509008,nan -143,'10 Completed (solidified)',292053.7,0.02410516,522.4857,0.005075257,nan -144,'10 Completed (solidified)',77240.91,0.00489782,519.2741,0.00502737,1646995.0 -145,'10 Completed (solidified)',407769.2,0.01564291,519.1554,0.005056173,nan -146,'25 Error (died)',55821450.0,0.9789547,176.2164,0.009350899,nan -147,'10 Completed (solidified)',582761.0,0.01755161,517.5473,0.005060446,nan -148,'14 Completed (net flux is small)',6281748.0,1.0,636.8511,0.01322981,nan -149,'10 Completed (solidified)',180291.2,0.006135706,518.6227,0.005029766,nan -150,'10 Completed (solidified)',257541.1,0.006126987,518.0101,0.005029759,nan -151,'14 Completed (net flux is small)',18845240.0,1.0,636.8509,0.01322981,nan -152,'10 Completed (solidified)',27041.72,0.01037476,686.702,0.006289473,nan -153,'10 Completed (solidified)',55685.81,0.38437,644.5419,0.00889913,nan -154,'14 Completed (net flux is small)',31408740.0,1.0,636.8508,0.01322981,nan -155,'10 Completed (solidified)',81048.36,0.005861142,682.7729,0.006263289,nan -156,'10 Completed (solidified)',280813.5,0.3475405,637.5932,0.00861631,nan -157,'10 Completed (solidified)',166864.2,0.3969359,638.5655,0.009033908,nan -158,'14 Completed (net flux is small)',62817480.0,1.0,636.8504,0.01322982,nan -159,'10 Completed (solidified)',135192.7,0.01036147,684.415,0.006290697,nan -160,'14 Completed (net flux is small)',43972230.0,1.0,636.8506,0.01322981,nan -161,'14 Completed (net flux is small)',6042665.0,1.0,636.8511,0.01322981,nan -162,'10 Completed (solidified)',189040.1,0.005055634,678.6399,0.006258278,nan -163,'14 Completed (net flux is small)',18127990.0,1.0,636.8509,0.01322981,nan -164,'10 Completed (solidified)',395637.8,0.332262,633.5866,0.008507002,nan -165,'10 Completed (solidified)',554388.0,0.3913497,622.0559,0.009056996,nan -166,'10 Completed (solidified)',79104.78,0.01319483,688.5267,0.006309024,nan -167,'10 Completed (solidified)',270083.2,0.004921809,675.4297,0.006256703,4290414.0 -168,'10 Completed (solidified)',272855.3,0.4007522,633.9868,0.009088741,nan -169,'10 Completed (solidified)',26369.98,0.01323288,689.7768,0.006308739,nan -170,'10 Completed (solidified)',54692.88,0.3845082,644.5896,0.008900205,nan -171,'10 Completed (solidified)',184542.0,0.01037768,683.4735,0.006291594,nan -172,'10 Completed (solidified)',164123.2,0.3825995,640.1841,0.008903312,nan -173,'14 Completed (net flux is small)',30213320.0,1.0,636.8508,0.01322981,nan -174,'14 Completed (net flux is small)',60426640.0,1.0,636.8504,0.01322982,nan -175,'10 Completed (solidified)',547575.7,0.3681032,623.6001,0.008846591,nan -176,'10 Completed (solidified)',263480.7,0.01252051,687.5644,0.006311978,nan -177,'10 Completed (solidified)',53725.34,0.3846744,644.6369,0.00890154,nan -178,'14 Completed (net flux is small)',5808209.0,1.0,636.8511,0.01322981,nan -179,'10 Completed (solidified)',25692.14,0.005025768,681.6201,0.006255753,nan -180,'10 Completed (solidified)',131773.3,0.004987555,678.9673,0.006256275,4288583.0 -181,'10 Completed (solidified)',77095.74,0.005801611,682.2432,0.006262239,nan -182,'14 Completed (net flux is small)',42298650.0,1.0,636.8506,0.01322981,nan -183,'14 Completed (net flux is small)',29041040.0,1.0,636.8508,0.01322981,nan -184,'10 Completed (solidified)',380679.6,0.4032842,628.9052,0.009133277,nan -185,'10 Completed (solidified)',128646.4,0.0134422,690.7813,0.006315699,nan -186,'14 Completed (net flux is small)',17424630.0,1.0,636.851,0.01322981,nan -187,'14 Completed (net flux is small)',40657460.0,1.0,636.8507,0.01322981,nan -188,'10 Completed (solidified)',161000.1,0.3991163,638.6232,0.009052969,nan -189,'10 Completed (solidified)',266613.7,0.4271393,632.1903,0.009335179,nan -190,'10 Completed (solidified)',373988.4,0.4080462,628.7416,0.009176319,nan -191,'14 Completed (net flux is small)',58082080.0,1.0,636.8504,0.01322982,nan -192,'10 Completed (solidified)',179958.5,0.004987525,681.2592,0.00626107,4299265.0 -193,'10 Completed (solidified)',556326.2,0.1823797,640.8081,0.007397363,nan -194,'14 Completed (net flux is small)',5595403.0,1.0,636.8511,0.01322981,nan -195,'10 Completed (solidified)',256968.7,0.004873672,675.1354,0.00625585,4289756.0 -196,'14 Completed (net flux is small)',16786210.0,1.0,636.851,0.01322981,nan -197,'14 Completed (net flux is small)',27977010.0,1.0,636.8508,0.01322981,nan -198,'10 Completed (solidified)',25074.75,0.004976974,684.2236,0.00625874,4294860.0 -199,'10 Completed (solidified)',52367.77,0.434725,640.1217,0.009366958,nan -200,'10 Completed (solidified)',125479.9,0.0115051,688.8879,0.00630266,nan -201,'10 Completed (solidified)',158169.5,0.3850745,639.9072,0.008924851,nan -202,'10 Completed (solidified)',75186.66,0.005767616,681.7204,0.006261382,nan -203,'14 Completed (net flux is small)',39167820.0,1.0,636.8507,0.01322981,nan -204,'10 Completed (solidified)',262141.8,0.4241077,632.3915,0.009305825,nan -205,'14 Completed (net flux is small)',55954020.0,1.0,636.8505,0.01322982,nan -206,'10 Completed (solidified)',366153.2,0.4303609,627.9795,0.009384989,nan -207,'10 Completed (solidified)',250565.6,0.005067891,677.3555,0.006259366,nan -208,'14 Completed (net flux is small)',16182240.0,1.0,636.851,0.01322981,nan -209,'14 Completed (net flux is small)',5394078.0,1.0,636.8511,0.01322981,nan -210,'10 Completed (solidified)',73440.89,0.00497839,682.449,0.006258441,4303131.0 -211,'10 Completed (solidified)',175412.3,0.005079951,679.1054,0.006258687,nan -212,'10 Completed (solidified)',51834.59,0.3845291,644.6932,0.008899971,nan -213,'10 Completed (solidified)',155430.6,0.3852281,640.0167,0.008925816,nan -214,'10 Completed (solidified)',122392.8,0.004965597,681.2767,0.006258839,4297686.0 -215,'14 Completed (net flux is small)',26970390.0,1.0,636.8508,0.01322981,nan -216,'10 Completed (solidified)',530225.3,0.3445829,625.6743,0.008637771,nan -217,'10 Completed (solidified)',259066.5,0.3856322,635.4155,0.008948891,nan -218,'10 Completed (solidified)',361189.0,0.4027936,629.564,0.009125634,nan -219,'10 Completed (solidified)',171339.4,0.005821539,680.374,0.006263793,nan -220,'10 Completed (solidified)',24471.09,0.005058587,682.0418,0.006256397,nan -221,'14 Completed (net flux is small)',37758550.0,1.0,636.8507,0.01322981,nan -222,'10 Completed (solidified)',524649.4,0.2922852,629.895,0.008203519,nan -223,'14 Completed (net flux is small)',15721650.0,1.0,582.7718,0.02587928,nan -224,'10 Completed (solidified)',156203.1,0.05088213,1401.212,0.01309585,nan -225,'10 Completed (solidified)',52043.62,0.04162354,1447.181,0.01294707,nan -226,'10 Completed (solidified)',69723.02,0.004951518,1575.044,0.01255496,3633089.0 -227,'14 Completed (net flux is small)',53940780.0,1.0,636.8505,0.01322982,nan -228,'10 Completed (solidified)',261741.6,0.02323777,1493.604,0.01269353,nan -229,'10 Completed (solidified)',244681.8,0.005070807,677.4687,0.006259382,nan -230,'14 Completed (net flux is small)',5240551.0,1.0,582.7719,0.02587928,nan -231,'10 Completed (solidified)',23226.79,0.00501055,1569.826,0.01256699,nan -232,'14 Completed (net flux is small)',26202750.0,1.0,582.7716,0.02587929,nan -233,'10 Completed (solidified)',116160.5,0.00485124,1563.958,0.01257545,3635707.0 -234,'14 Completed (net flux is small)',36683860.0,1.0,582.7715,0.02587929,nan -235,'14 Completed (net flux is small)',52405510.0,1.0,582.7714,0.02587929,nan -236,'10 Completed (solidified)',162725.2,0.004973301,1563.589,0.01257149,3632383.0 -237,'10 Completed (solidified)',22959.32,0.01200644,1559.69,0.01256556,nan -238,'10 Completed (solidified)',51705.73,0.02638209,1503.474,0.01273755,nan -239,'10 Completed (solidified)',366921.4,0.02297497,1481.911,0.01270009,nan -240,'10 Completed (solidified)',153902.0,0.05059475,1404.908,0.01306375,nan -241,'10 Completed (solidified)',520848.7,0.04461905,1385.468,0.01300435,nan -242,'14 Completed (net flux is small)',5146293.0,1.0,582.7719,0.02587928,nan -243,'10 Completed (solidified)',114630.3,0.004948322,1571.296,0.01255976,3640022.0 -244,'10 Completed (solidified)',232420.1,0.004943558,1568.48,0.01255806,3634708.0 -245,'14 Completed (net flux is small)',36024050.0,1.0,582.7715,0.02587929,nan -246,'14 Completed (net flux is small)',15438880.0,1.0,582.7718,0.02587928,nan -247,'14 Completed (net flux is small)',25731460.0,1.0,582.7717,0.02587929,nan -248,'10 Completed (solidified)',257551.2,0.0421414,1422.653,0.01296354,nan -249,'10 Completed (solidified)',354774.1,0.08814272,1249.201,0.01377559,nan -250,'10 Completed (solidified)',68755.91,0.006081422,1564.007,0.01256302,nan -251,'14 Completed (net flux is small)',51462930.0,1.0,582.7714,0.02587929,nan -252,'10 Completed (solidified)',229344.7,0.004968161,1560.732,0.01257309,3636471.0 -253,'14 Completed (net flux is small)',5050936.0,1.0,582.7719,0.02587928,nan -254,'10 Completed (solidified)',511876.7,0.05052878,1364.796,0.0130893,nan -255,'10 Completed (solidified)',22597.26,0.007453225,1563.109,0.01256143,nan -256,'14 Completed (net flux is small)',15152810.0,1.0,582.7718,0.02587928,nan -257,'10 Completed (solidified)',160620.2,0.006012471,1570.154,0.01254376,nan -258,'10 Completed (solidified)',50904.05,0.02656554,1503.327,0.01273731,nan -259,'10 Completed (solidified)',112948.8,0.00613185,1562.824,0.01256223,nan -260,'10 Completed (solidified)',152620.3,0.02935881,1480.806,0.01278314,nan -261,'14 Completed (net flux is small)',35356550.0,1.0,582.7715,0.02587929,nan -262,'10 Completed (solidified)',67782.53,0.004953192,1573.52,0.01255811,3639848.0 -263,'14 Completed (net flux is small)',25254680.0,1.0,582.7717,0.02587929,nan -264,'10 Completed (solidified)',252486.2,0.05704363,1369.792,0.01317493,nan -265,'10 Completed (solidified)',356575.6,0.02308456,1482.762,0.01269699,nan -266,'10 Completed (solidified)',158177.4,0.004936716,1563.117,0.01257309,3634941.0 -267,'10 Completed (solidified)',225916.1,0.007409788,1553.335,0.01256891,nan -268,'14 Completed (net flux is small)',50509360.0,1.0,582.7714,0.02587929,nan -269,'10 Completed (solidified)',150709.3,0.02749152,1490.478,0.01274194,nan -270,'14 Completed (net flux is small)',4958205.0,1.0,582.7719,0.02587928,nan -271,'10 Completed (solidified)',66794.62,0.006168411,1565.098,0.0125602,nan -272,'10 Completed (solidified)',50066.37,0.04227327,1445.685,0.01295002,nan -273,'10 Completed (solidified)',250031.4,0.04562959,1410.568,0.01301204,nan -274,'10 Completed (solidified)',22268.19,0.004942285,1575.23,0.01255778,3638922.0 -275,'10 Completed (solidified)',351095.1,0.03019367,1456.184,0.01279586,nan -276,'14 Completed (net flux is small)',14874620.0,1.0,582.7718,0.02587928,nan -277,'14 Completed (net flux is small)',24791030.0,1.0,582.7717,0.02587929,nan -278,'10 Completed (solidified)',506222.0,0.05101607,1361.418,0.0131231,nan -279,'10 Completed (solidified)',111348.1,0.004941277,1572.752,0.012557,3636876.0 -280,'10 Completed (solidified)',155888.4,0.004933169,1563.109,0.01257322,3633856.0 -281,'14 Completed (net flux is small)',34707440.0,1.0,582.7715,0.02587929,nan -282,'10 Completed (solidified)',49072.86,0.07001014,1343.767,0.01340435,nan -283,'14 Completed (net flux is small)',4871428.0,1.0,582.7719,0.02587928,nan -284,'10 Completed (solidified)',222672.0,0.004884666,1559.734,0.01257654,3634339.0 -285,'14 Completed (net flux is small)',14614290.0,1.0,582.7718,0.02587928,nan -286,'10 Completed (solidified)',501450.2,0.02689962,1450.255,0.01276485,nan -287,'14 Completed (net flux is small)',24357140.0,1.0,582.7717,0.02587929,nan -288,'14 Completed (net flux is small)',49582050.0,1.0,582.7714,0.02587929,nan -289,'10 Completed (solidified)',65733.12,0.004933076,1572.239,0.01256112,3639198.0 -290,'10 Completed (solidified)',21911.22,0.00611567,1566.243,0.01256111,nan -291,'10 Completed (solidified)',148363.0,0.02612085,1492.264,0.01274395,nan -292,'10 Completed (solidified)',246130.9,0.05095506,1390.4,0.0131034,nan -293,'10 Completed (solidified)',109553.0,0.00491605,1570.225,0.01256265,3641196.0 -294,'14 Completed (net flux is small)',34100000.0,1.0,582.7716,0.02587929,nan -295,'10 Completed (solidified)',153414.4,0.004893737,1562.646,0.01257481,3635554.0 -296,'10 Completed (solidified)',346002.3,0.03098054,1455.257,0.01279515,nan -297,'14 Completed (net flux is small)',48714280.0,1.0,582.7714,0.02587929,nan -298,'10 Completed (solidified)',219140.2,0.004835085,1559.399,0.01257813,3634297.0 -299,'10 Completed (solidified)',14532890.0,0.6840844,450.5894,0.03673918,nan -300,'10 Completed (solidified)',48468.1,0.00493607,2928.445,0.02088377,5347064.0 -301,'10 Completed (solidified)',492741.9,0.04163534,1396.025,0.01297826,nan -302,'14 Completed (net flux is small)',4864114.0,0.7884512,584.9552,0.0362648,nan -303,'10 Completed (solidified)',64357.01,0.004984752,2926.467,0.02086076,2722727.0 -304,'10 Completed (solidified)',21480.17,0.006295242,2874.69,0.02074235,nan -305,'10 Completed (solidified)',145823.1,0.02112701,2490.276,0.02052224,nan -306,'10 Completed (solidified)',339600.2,0.01003618,2710.053,0.02058105,nan -307,'10 Completed (solidified)',24092710.0,0.5212082,264.3165,0.03738803,nan -308,'10 Completed (solidified)',242340.7,0.004949403,2899.974,0.02088001,5338054.0 -309,'10 Completed (solidified)',33742680.0,0.5076395,239.9249,0.03749317,nan -310,'10 Completed (solidified)',150181.1,0.004801359,2930.886,0.02090104,2720981.0 -311,'10 Completed (solidified)',214790.7,0.006278418,2861.521,0.0207438,nan -312,'14 Completed (net flux is small)',4804171.0,0.7956935,589.6805,0.03621893,nan -313,'10 Completed (solidified)',21323.22,0.004917958,2933.341,0.0208742,2724633.0 -314,'10 Completed (solidified)',48194.28,0.007740432,2819.885,0.02067354,nan -315,'10 Completed (solidified)',107246.5,0.004962106,2924.832,0.02086734,2724146.0 -316,'10 Completed (solidified)',23874110.0,0.6077424,351.7703,0.03705713,nan -317,'10 Completed (solidified)',48124760.0,0.4601165,155.9491,0.03785938,nan -318,'10 Completed (solidified)',144567.9,0.007740207,2806.497,0.02067288,nan -319,'10 Completed (solidified)',485143.8,0.007727039,2759.992,0.02067168,nan -320,'10 Completed (solidified)',14349150.0,0.6856179,452.6683,0.03673298,nan -321,'10 Completed (solidified)',337334.7,0.007727914,2780.227,0.02067263,nan -322,'10 Completed (solidified)',149333.1,0.004875993,2927.386,0.02089193,2721136.0 -323,'10 Completed (solidified)',47525460.0,0.4545378,152.2052,0.03787207,nan -324,'10 Completed (solidified)',33345260.0,0.5167122,248.8261,0.03745441,nan -325,'10 Completed (solidified)',213568.7,0.006324725,2859.488,0.0207376,nan -326,'14 Completed (net flux is small)',4742044.0,0.7953901,590.0417,0.03622015,nan -327,'10 Completed (solidified)',47834.09,0.0049704,2926.802,0.02087879,5344867.0 -328,'10 Completed (solidified)',63980.57,0.004957385,2928.081,0.02086688,2723757.0 -329,'10 Completed (solidified)',241329.5,0.01486394,2601.978,0.02048336,nan -330,'14 Completed (net flux is small)',14169230.0,0.6870766,454.6751,0.03672702,nan -331,'10 Completed (solidified)',106633.6,0.004963023,2924.511,0.02086447,2722529.0 -332,'10 Completed (solidified)',23535890.0,0.5759003,319.855,0.03717244,nan -333,'10 Completed (solidified)',481467.9,0.00489658,2868.125,0.02088521,5334215.0 -334,'10 Completed (solidified)',106020.5,0.006391562,2867.624,0.02076026,nan -335,'10 Completed (solidified)',21213.13,0.007585733,2830.902,0.02068757,nan -336,'10 Completed (solidified)',32952970.0,0.5252379,257.2904,0.0374183,nan -337,'10 Completed (solidified)',143879.3,0.02114424,2490.076,0.02052214,nan -338,'10 Completed (solidified)',148515.0,0.006294882,2865.48,0.02074172,nan -339,'10 Completed (solidified)',63742.66,0.00884938,2783.344,0.02059398,nan -340,'10 Completed (solidified)',239734.0,0.02588945,2398.32,0.0206245,nan -341,'10 Completed (solidified)',334812.3,0.00495081,2886.39,0.02087891,5341123.0 -342,'14 Completed (net flux is small)',4683937.0,0.7978006,591.7588,0.03620011,nan -343,'10 Completed (solidified)',47527.02,0.007728008,2820.401,0.02067462,nan -344,'10 Completed (solidified)',46985320.0,0.4744931,171.2063,0.0377986,nan -345,'10 Completed (solidified)',142584.3,0.0100219,2736.98,0.02058276,nan -346,'10 Completed (solidified)',478179.1,0.0047717,2874.037,0.0208994,5342406.0 -347,'10 Completed (solidified)',23225450.0,0.5382402,282.0932,0.03731647,nan -348,'10 Completed (solidified)',212168.1,0.006291149,2861.025,0.02074182,nan -349,'14 Completed (net flux is small)',13992630.0,0.6913471,459.6805,0.03671188,nan -350,'10 Completed (solidified)',32498900.0,0.4976091,232.2157,0.03752706,nan -351,'10 Completed (solidified)',63198.73,0.00631423,2870.746,0.02073926,nan -352,'10 Completed (solidified)',105216.0,0.0049883,2923.131,0.02085921,2723891.0 -353,'10 Completed (solidified)',21064.1,0.004851198,2938.025,0.02089583,2721881.0 -354,'10 Completed (solidified)',333301.7,0.02121109,2465.133,0.02052373,nan -355,'10 Completed (solidified)',210619.8,0.007586838,2817.265,0.02068675,nan -356,'10 Completed (solidified)',47140.72,0.01004733,2749.131,0.02058125,nan -357,'10 Completed (solidified)',147441.0,0.007600613,2821.283,0.02068579,nan -358,'14 Completed (net flux is small)',13818550.0,0.691652,460.452,0.03670974,nan -359,'10 Completed (solidified)',46381290.0,0.4561991,154.7685,0.03786048,nan -360,'10 Completed (solidified)',141604.6,0.01488137,2614.326,0.02048306,nan -361,'10 Completed (solidified)',475254.7,0.007758182,2759.698,0.02066896,nan -362,'10 Completed (solidified)',20872.07,0.004968373,2930.534,0.02086407,2724138.0 -363,'10 Completed (solidified)',104400.1,0.004919521,2928.48,0.0208861,2721478.0 -364,'14 Completed (net flux is small)',4624273.0,0.7994033,593.0638,0.03618644,nan -365,'10 Completed (solidified)',237424.4,0.004885154,2903.541,0.02088914,5343722.0 -366,'10 Completed (solidified)',22939300.0,0.5492088,293.4287,0.03727222,nan -367,'10 Completed (solidified)',32120730.0,0.5173771,251.0662,0.03744469,nan -368,'10 Completed (solidified)',146113.8,0.004984913,2920.268,0.0208598,2723715.0 -369,'10 Completed (solidified)',471384.0,0.007719571,2761.447,0.0206724,nan -370,'10 Completed (solidified)',62617.4,0.004967652,2927.582,0.02086497,2722952.0 -371,'10 Completed (solidified)',236017.0,0.01473063,2605.362,0.02048411,nan -372,'10 Completed (solidified)',330383.5,0.01483217,2590.939,0.02048529,nan -373,'10 Completed (solidified)',45869560.0,0.4736628,172.2059,0.03779366,nan -374,'10 Completed (solidified)',21187.04,0.004921599,3338.838,0.0233587,2314112.0 -375,'10 Completed (solidified)',208943.4,0.006287675,2861.397,0.02074309,nan -376,'10 Completed (solidified)',24180060.0,0.4541279,696.8499,0.04055211,nan -377,'14 Completed (net flux is small)',4844080.0,0.4873124,822.8831,0.04042149,nan -378,'10 Completed (solidified)',47720.08,0.01675422,3040.147,0.02366792,nan -379,'10 Completed (solidified)',14517490.0,0.4861563,758.9068,0.04061354,nan -380,'10 Completed (solidified)',33821370.0,0.4345988,635.9952,0.04060198,nan -381,'10 Completed (solidified)',105984.5,0.006671249,3286.595,0.02339799,nan -382,'10 Completed (solidified)',143192.1,0.0165349,3034.078,0.02366246,nan -383,'10 Completed (solidified)',63590.41,0.00755527,3266.616,0.02342117,nan -384,'10 Completed (solidified)',238579.9,0.01696025,3013.321,0.02367612,nan -385,'10 Completed (solidified)',333959.0,0.02201467,2889.833,0.02383268,nan -386,'14 Completed (net flux is small)',4789306.0,0.4879734,823.0368,0.04042658,nan -387,'10 Completed (solidified)',477014.3,0.01970938,2923.675,0.02376149,nan -388,'10 Completed (solidified)',148374.1,0.004860757,3331.547,0.02335773,2313864.0 -389,'10 Completed (solidified)',48262480.0,0.3966001,532.4521,0.0406739,nan -390,'10 Completed (solidified)',211926.7,0.006666193,3279.341,0.02339818,nan -391,'10 Completed (solidified)',47431.95,0.02739904,2807.922,0.02400393,nan -392,'10 Completed (solidified)',237097.5,0.03091905,2716.407,0.0241295,nan -393,'10 Completed (solidified)',14354030.0,0.4848758,760.0076,0.04059962,nan -394,'10 Completed (solidified)',21097.77,0.01652854,3049.797,0.02366129,nan -395,'10 Completed (solidified)',142396.6,0.01943568,2967.914,0.02374857,nan -396,'10 Completed (solidified)',23901710.0,0.4575764,701.5774,0.04056599,nan -397,'10 Completed (solidified)',147640.8,0.00666169,3283.919,0.0233979,nan -398,'10 Completed (solidified)',47716040.0,0.4009292,540.9765,0.04067823,nan -399,'10 Completed (solidified)',105452.8,0.004850269,3334.828,0.02335746,2314660.0 -400,'10 Completed (solidified)',63246.47,0.006775808,3286.883,0.02340232,nan -401,'10 Completed (solidified)',33450960.0,0.4428366,646.5121,0.04063666,nan -402,'10 Completed (solidified)',332223.1,0.01682359,3005.411,0.02367307,nan -403,'14 Completed (net flux is small)',4735414.0,0.4878461,823.3765,0.04042443,nan -404,'10 Completed (solidified)',141614.2,0.01667604,3030.951,0.02366662,nan -405,'10 Completed (solidified)',474494.6,0.01953728,2927.643,0.02375605,nan -406,'10 Completed (solidified)',47209.01,0.01675372,3040.199,0.0236679,nan -407,'10 Completed (solidified)',210892.2,0.004985835,3323.604,0.02335842,2315656.0 -408,'10 Completed (solidified)',20975.28,0.004826134,3341.424,0.02335684,2316493.0 -409,'10 Completed (solidified)',104875.6,0.004901433,3333.468,0.02335845,2314943.0 -410,'10 Completed (solidified)',62974.42,0.01707151,3034.298,0.02367722,nan -411,'10 Completed (solidified)',23633260.0,0.4538077,699.7515,0.04053908,nan -412,'10 Completed (solidified)',14188050.0,0.4865574,760.6373,0.04061119,nan -413,'10 Completed (solidified)',235996.2,0.02195,2902.156,0.02382908,nan -414,'10 Completed (solidified)',146831.9,0.004839326,3332.182,0.0233573,2314911.0 -415,'14 Completed (net flux is small)',4680113.0,0.4880372,823.2763,0.04042637,nan -416,'10 Completed (solidified)',33075690.0,0.4368258,642.3609,0.04059799,nan -417,'10 Completed (solidified)',47192460.0,0.4016275,543.4469,0.04067392,nan -418,'10 Completed (solidified)',20854.41,0.004841639,3341.011,0.02335716,2316747.0 -419,'10 Completed (solidified)',329903.5,0.03912106,2551.272,0.02443077,nan -420,'10 Completed (solidified)',62562.1,0.004964929,3334.596,0.02335784,2315720.0 -421,'10 Completed (solidified)',209730.7,0.004865073,3327.042,0.02335791,2315226.0 -422,'10 Completed (solidified)',46909.84,0.02730718,2809.814,0.02400071,nan -423,'10 Completed (solidified)',14030460.0,0.4865509,761.2649,0.04060913,nan -424,'10 Completed (solidified)',140768.9,0.01982866,2959.354,0.0237608,nan -425,'10 Completed (solidified)',23373240.0,0.4559052,703.1239,0.04054573,nan -426,'10 Completed (solidified)',472055.5,0.01699228,2985.355,0.02367968,nan -427,'10 Completed (solidified)',104267.3,0.004961295,3331.752,0.02335799,2316395.0 -428,'10 Completed (solidified)',328487.0,0.01687811,3004.472,0.02367468,nan -429,'10 Completed (solidified)',146003.6,0.006659145,3284.051,0.02339788,nan -430,'10 Completed (solidified)',234664.5,0.01700483,3012.598,0.02367743,nan -431,'10 Completed (solidified)',32699940.0,0.4406824,647.5958,0.04061316,nan -432,'10 Completed (solidified)',46645820.0,0.3946382,533.0068,0.04065122,nan -433,'10 Completed (solidified)',469220.5,0.01987327,2920.674,0.0237666,nan -434,'10 Completed (solidified)',20708.39,0.004946114,3338.193,0.02335915,2314708.0 -435,'10 Completed (solidified)',13875620.0,0.4867995,762.1117,0.04060841,nan -436,'10 Completed (solidified)',208560.6,0.006661259,3279.622,0.0233982,nan -437,'10 Completed (solidified)',46601.02,0.027608,2803.759,0.02401124,nan -438,'14 Completed (net flux is small)',4628427.0,0.4875049,824.3976,0.0404184,nan -439,'10 Completed (solidified)',62142.88,0.00665846,3289.94,0.02339763,nan -440,'10 Completed (solidified)',103627.7,0.01197159,3152.977,0.02353358,nan -441,'10 Completed (solidified)',326290.1,0.0168145,3006.089,0.02367277,nan -442,'10 Completed (solidified)',23105610.0,0.4561201,704.4975,0.04054274,nan -443,'10 Completed (solidified)',233057.2,0.01996282,2945.745,0.02376626,nan -444,'10 Completed (solidified)',139843.0,0.01694099,3025.057,0.02367448,nan -445,'10 Completed (solidified)',145004.7,0.01866726,2992.243,0.02372498,nan -446,'10 Completed (solidified)',47679.4,0.00873239,3299.324,0.02383681,nan -447,'10 Completed (solidified)',207150.6,0.006665511,3279.575,0.02339822,nan -448,'10 Completed (solidified)',32340440.0,0.4396562,647.655,0.04060352,nan -449,'10 Completed (solidified)',46148360.0,0.4003576,545.2065,0.04065335,nan -450,'10 Completed (solidified)',466101.6,0.02003548,2917.369,0.02377172,nan -451,'10 Completed (solidified)',21141.59,0.004822528,3390.522,0.02365002,3149218.0 -452,'10 Completed (solidified)',238370.6,0.008707927,3270.149,0.02383755,nan -453,'10 Completed (solidified)',24003040.0,0.3675011,1058.69,0.04423333,nan -454,'10 Completed (solidified)',14404990.0,0.3790924,1117.791,0.0442381,nan -455,'14 Completed (net flux is small)',4803248.0,0.3828601,1176.859,0.0441251,nan -456,'10 Completed (solidified)',148079.2,0.004907715,3376.478,0.0236544,3149895.0 -457,'10 Completed (solidified)',143021.3,0.008694152,3285.269,0.02383593,nan -458,'10 Completed (solidified)',105754.7,0.006259765,3350.581,0.02371791,nan -459,'10 Completed (solidified)',211548.3,0.006279695,3340.067,0.02371928,nan -460,'10 Completed (solidified)',33578400.0,0.3654014,1002.995,0.04437669,nan -461,'10 Completed (solidified)',63428.58,0.004923662,3384.22,0.02365487,3147178.0 -462,'10 Completed (solidified)',47947360.0,0.34641,907.9769,0.0443877,nan -463,'10 Completed (solidified)',476753.0,0.007655478,3255.57,0.02378863,nan -464,'14 Completed (net flux is small)',4752958.0,0.3828743,1177.163,0.04412445,nan -465,'10 Completed (solidified)',142200.5,0.01324471,3189.579,0.02406176,nan -466,'10 Completed (solidified)',333506.5,0.01523243,3120.053,0.0241665,nan -467,'10 Completed (solidified)',237127.4,0.004921308,3352.664,0.02365585,5222404.0 -468,'10 Completed (solidified)',47417.2,0.01342845,3200.245,0.02406954,nan -469,'10 Completed (solidified)',21051.15,0.004950212,3387.655,0.02365577,3146448.0 -470,'10 Completed (solidified)',23753140.0,0.3688087,1060.277,0.04425027,nan -471,'10 Completed (solidified)',63136.32,0.004868617,3385.466,0.02365231,3146571.0 -472,'10 Completed (solidified)',14254930.0,0.3791044,1118.687,0.04423563,nan -473,'10 Completed (solidified)',331928.7,0.007691312,3277.208,0.02378907,nan -474,'10 Completed (solidified)',105218.9,0.004929378,3380.069,0.02365527,3148950.0 -475,'10 Completed (solidified)',473790.7,0.01756286,3052.445,0.02429109,nan -476,'10 Completed (solidified)',33231250.0,0.3655402,1004.745,0.04437333,nan -477,'10 Completed (solidified)',147277.3,0.007550437,3318.406,0.0237802,nan -478,'14 Completed (net flux is small)',4703430.0,0.3831222,1177.318,0.04412807,nan -479,'10 Completed (solidified)',210507.3,0.0049124,3370.327,0.02365465,3151154.0 -480,'10 Completed (solidified)',141489.8,0.008686349,3285.612,0.02383554,nan -481,'10 Completed (solidified)',47453870.0,0.3467873,910.6069,0.0443849,nan -482,'10 Completed (solidified)',62799.03,0.004877834,3385.275,0.02365274,3146811.0 -483,'10 Completed (solidified)',47166.7,0.004899471,3383.3,0.02365379,5232110.0 -484,'10 Completed (solidified)',20932.29,0.004933644,3388.05,0.02365519,3147529.0 -485,'10 Completed (solidified)',104680.2,0.01032782,3262.73,0.02391516,nan -486,'10 Completed (solidified)',14107170.0,0.3792016,1119.517,0.04423475,nan -487,'10 Completed (solidified)',146535.1,0.007589329,3317.592,0.02378206,nan -488,'10 Completed (solidified)',23507820.0,0.3664666,1060.851,0.04421021,nan -489,'10 Completed (solidified)',235861.6,0.004956563,3352.018,0.02365749,5219676.0 -490,'10 Completed (solidified)',330183.6,0.004969667,3336.658,0.02365863,5225622.0 -491,'10 Completed (solidified)',46899.71,0.007677762,3322.209,0.02378591,nan -492,'10 Completed (solidified)',46968230.0,0.3540134,921.0528,0.04446798,nan -493,'10 Completed (solidified)',32903150.0,0.3668937,1006.74,0.04438909,nan -494,'10 Completed (solidified)',140699.1,0.004812783,3370.296,0.02365025,5221471.0 -495,'10 Completed (solidified)',471692.0,0.004798292,3318.109,0.02365137,5211433.0 -496,'14 Completed (net flux is small)',4654696.0,0.3833212,1177.511,0.04413078,nan -497,'10 Completed (solidified)',209348.3,0.004923127,3370.173,0.02365533,3148973.0 -498,'10 Completed (solidified)',234408.1,0.01260851,3188.435,0.02403099,nan -499,'10 Completed (solidified)',104108.2,0.006267687,3350.485,0.02371829,nan -500,'10 Completed (solidified)',20822.84,0.004819431,3390.608,0.02364988,3147459.0 -501,'10 Completed (solidified)',328330.4,0.004943317,3337.527,0.02365739,5213876.0 -502,'10 Completed (solidified)',13961810.0,0.377148,1120.683,0.04419877,nan -503,'10 Completed (solidified)',62448.42,0.004897034,3384.859,0.02365363,3148790.0 -504,'10 Completed (solidified)',145755.3,0.004944177,3375.735,0.02365589,3153403.0 -505,'10 Completed (solidified)',23265610.0,0.3778076,1065.303,0.04437592,nan -506,'10 Completed (solidified)',32566500.0,0.3587405,1004.02,0.04426482,nan -507,'14 Completed (net flux is small)',4608635.0,0.3864842,1176.152,0.04418389,nan -508,'10 Completed (solidified)',469025.2,0.007692249,3255.536,0.02379036,nan -509,'10 Completed (solidified)',208153.0,0.004920105,3370.297,0.02365518,3148415.0 -510,'10 Completed (solidified)',46490860.0,0.3502183,920.2849,0.04440759,nan -511,'10 Completed (solidified)',62020.04,0.007612334,3325.092,0.02378272,nan -512,'10 Completed (solidified)',232900.8,0.007650887,3293.302,0.02378628,nan -513,'10 Completed (solidified)',46585.03,0.004914878,3383.014,0.02365451,5225288.0 -514,'10 Completed (solidified)',103361.7,0.007569635,3322.06,0.02378089,nan -515,'10 Completed (solidified)',20671.62,0.007591161,3329.516,0.02378149,nan -516,'10 Completed (solidified)',139793.6,0.004936675,3367.602,0.02365582,5218234.0 -517,'10 Completed (solidified)',23027390.0,0.3667052,1063.736,0.04420518,nan -518,'10 Completed (solidified)',326092.6,0.007677765,3278.048,0.02378839,nan -519,'10 Completed (solidified)',465944.0,0.004942834,3315.133,0.02365799,5228914.0 -520,'10 Completed (solidified)',206809.5,0.004833624,3372.284,0.02365115,3147951.0 -521,'10 Completed (solidified)',13818380.0,0.378271,1121.13,0.04421587,nan -522,'10 Completed (solidified)',32233840.0,0.3596164,1005.862,0.04427376,nan -523,'10 Completed (solidified)',144699.7,0.007571091,3318.073,0.02378118,nan -524,'10 Completed (solidified)',46018950.0,0.3476971,918.7744,0.04437178,nan From 2bacac719025f8bddb9e0fe9e25c86fcfc25ffff Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 22 Apr 2025 11:10:46 +0200 Subject: [PATCH 013/105] try for grid plot. need to fix log single plots --- tools/post_processing_grid/plot_grid.py | 707 ++++++++++++++++++++++-- 1 file changed, 649 insertions(+), 58 deletions(-) diff --git a/tools/post_processing_grid/plot_grid.py b/tools/post_processing_grid/plot_grid.py index 15fd63082..2bac0e4b9 100644 --- a/tools/post_processing_grid/plot_grid.py +++ b/tools/post_processing_grid/plot_grid.py @@ -6,6 +6,7 @@ import matplotlib.pyplot as plt import matplotlib.colors as mcolors import matplotlib.cm as cm +import matplotlib.ticker as ticker from matplotlib.ticker import LogFormatterMathtext def load_extracted_data(csv_path): @@ -234,7 +235,7 @@ def plot_grid_status(cases_data, plot_dir: Path, status_colors: dict = None): print(f"Plot grid_status_summary.png saved to {output_path}") -def kde_cumulative(values, color, ax, bw_adjust=0.3, **kwargs): +def kde_cumulative_linear(values, color, ax, bw_adjust=0.3, **kwargs): """ Plot a cumulative KDE curve on the given x-axis. @@ -268,7 +269,47 @@ def kde_cumulative(values, color, ax, bw_adjust=0.3, **kwargs): **kwargs ) -def plot_kde_cumulative(data_dict, xlabel, ylabel, cmap=plt.cm.plasma, vmin=None, vmax=None, key_label="Parameter", ax=None, save_path=None, bw_adjust=0.3, tick_values=None, norm=None): +def kde_cumulative_log(values, color, ax, bw_adjust=0.3, **kwargs): + """ + Plot a cumulative KDE curve on the given x-axis with a log scale for the x-axis. + + Parameters + ---------- + values : array-like + Data points on the x-axis used to calculate the KDE. + + color : str or matplotlib color + The color of the KDE curve. + + ax : matplotlib.axes.Axes + The axis on which to plot the KDE. + + bw_adjust : float, optional, default=0.3 + Bandwidth adjustment factor : controls the smoothness of the KDE. + Smaller values make the KDE more sensitive to data, while larger values smooth it out. + + **kwargs : keyword arguments, optional + Additional arguments passed to `sns.kdeplot`, such as `label`, `linewidth`, etc. + """ + # Plot the cumulative KDE + sns.kdeplot( + values, + cumulative=True, + bw_adjust=bw_adjust, + clip=(np.min(values), np.max(values)), + common_grid=True, + color=color, + ax=ax, + **kwargs + ) + + # Set x-axis to log scale + ax.set_xscale('log') + + # Format the x-axis to show powers of 10 + ax.get_xaxis().set_major_formatter(ticker.FuncFormatter(lambda x, _: r'$10^{{{}}}$'.format(int(np.log10(x))))) + +def plot_kde_cumulative_linear(data_dict, xlabel, ylabel, cmap=plt.cm.plasma, vmin=None, vmax=None, key_label="Parameter", ax=None, save_path=None, bw_adjust=0.3, tick_values=None, norm=None): """ Plot cumulative KDE curves for one of the output parameters of the grid (like esc_rate_total, solidification_time) on the x-axis. The different curves correspond to a input parameter from the grid with a color mapped on the right side of the plot. @@ -329,7 +370,7 @@ def plot_kde_cumulative(data_dict, xlabel, ylabel, cmap=plt.cm.plasma, vmin=None if len(values) < 2: continue color = cmap(norm(key)) - kde_cumulative(values, color=color, ax=ax, bw_adjust=bw_adjust, label=str(key)) + kde_cumulative_linear(values, color=color, ax=ax, bw_adjust=bw_adjust, label=str(key)) ax.set_xlabel(xlabel, fontsize=12) ax.set_ylabel(ylabel, fontsize=12) @@ -356,94 +397,341 @@ def plot_kde_cumulative(data_dict, xlabel, ylabel, cmap=plt.cm.plasma, vmin=None plt.savefig(save_path, dpi=300) plt.close(fig) -if __name__ == '__main__': +def plot_kde_cumulative_linear_clean( + data_dict, + xlabel, + ylabel, + cmap=plt.cm.plasma, + vmin=None, + vmax=None, + key_label="Parameter", + ax=None, + save_path=None, + bw_adjust=0.3, + tick_values=None, + norm=None, + return_cbar=False # New parameter +): + """ + Plot cumulative KDE curves for one of the output parameters of the grid. + Optionally return a colorbar scalar mappable to use for shared colorbar. + """ + if ax is None: + fig, ax = plt.subplots(figsize=(10, 6)) + else: + fig = ax.figure - # Paths to the csv file and plot directory - grid_name = 'escape_grid_4_params_Pxuv_a_epsilon_fO2' - data_dir = f'/home2/p315557/PROTEUS/tools/post_processing_grid/processed_data/{grid_name}/{grid_name}_extracted_data.csv' - plots_path = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_plots/{grid_name}/' + keys = sorted(data_dict.keys()) + if vmin is None: + vmin = min(keys) + if vmax is None: + vmax = max(keys) - # Load the data and check if the plot directory exists - df, grid_params, extracted_outputs = load_extracted_data(data_dir) - plot_dir_exists(plots_path) - # Group extracted outputs by grid parameters - grouped_data = group_output_by_parameter(df, grid_params, extracted_outputs) + # Use provided or default normalization + if norm is None: + norm = mcolors.Normalize(vmin=vmin, vmax=vmax) + sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) - # Plot the grid status - plot_grid_status(df, plots_path) + # Plot each dataset + for key in keys: + values = np.array(data_dict[key]) + if len(values) < 2: + continue + color = cmap(norm(key)) + kde_cumulative_linear(values, color=color, ax=ax, bw_adjust=bw_adjust, label=str(key)) - # Single plots : normalized cumulative distributions - # Labels and colormaps for each parameter/output - param_label_map = { - "orbit.semimajoraxis": "Semi-major axis [AU]", - "escape.zephyrus.Pxuv": r"$P_{XUV}$ [bar]", - "escape.zephyrus.efficiency": r"Escape efficiency factor, $\epsilon$", - "outgas.fO2_shift_IW":r"$log_{10}$($fO_2$) ($\Delta$ IW)" - } - output_label_map = { - "solidification_time": "Solidification time [yr]", - "esc_rate_total": "Total escape rate [kg/s]", - "Phi_global": "Melt fraction [%]", - "P_surf": "Surface pressure [bar]", - "atm_kg_per_mol": "Atmospheric mass [kg/mol]" - } - colormaps_by_param = { - "orbit.semimajoraxis": cm.plasma, - "escape.zephyrus.Pxuv": cm.cividis, - "escape.zephyrus.efficiency": cm.spring, - "outgas.fO2_shift_IW": cm.coolwarm - } + ax.set_xlabel(xlabel, fontsize=12) + ax.set_ylabel(ylabel, fontsize=12) + ax.set_ylim(0, 1.02) + ax.grid(alpha=0.2) - # List of params/outputs that should be plotted with a log-scale x-axis - log_scale_grid_params = ["escape.zephyrus.Pxuv"] - #log_scale_outputs = ["esc_rate_total", "P_surf", "atm_kg_per_mol"] - log_scale_outputs = [] + # Only return colorbar smappable, don't draw it (caller will handle that) + if return_cbar: + sm.set_array(np.linspace(vmin, vmax, 100)) # Needed for colorbar + sm.key_label = key_label # Store label for later + sm.tick_values = tick_values # Custom attribute to help outside + return sm + + # Otherwise, draw internal colorbar normally + sm.set_array(np.linspace(vmin, vmax, 100)) + cbar = fig.colorbar(sm, ax=ax) + cbar.set_label(key_label, fontsize=12) + + if tick_values is not None: + cbar.set_ticks(tick_values) + if isinstance(norm, mcolors.LogNorm): + cbar.set_ticklabels([f"$10^{{{int(np.log10(t))}}}$" for t in tick_values]) + cbar.ax.yaxis.set_major_formatter(LogFormatterMathtext()) + else: + cbar.set_ticklabels([str(t) for t in tick_values]) + + if save_path: + plt.savefig(save_path, dpi=300) + plt.close(fig) + + return None + +def plot_kde_cumulative_log(data_dict, xlabel, ylabel, cmap=plt.cm.plasma, vmin=None, vmax=None, key_label="Parameter", ax=None, save_path=None, bw_adjust=0.3, tick_values=None, norm=None): + """ + Plot cumulative KDE curves for one of the output parameters of the grid (like esc_rate_total, solidification_time) on the x-axis. + The different curves correspond to a input parameter from the grid with a color mapped on the right side of the plot. + + Parameters + ---------- + data_dict : dict + Dictionary of datasets, where each key represents a unique variable (e.g., semi-major axis, temperature), + and each value is a list or array of values (e.g., solidification times). + + xlabel : str + Label for the x-axis. + + ylabel : str + Label for the y-axis. + + cmap : matplotlib.colors.Colormap + The colormap used to assign colors to each dataset based on their corresponding key value. + + vmin : float + The minimum value of the key variable to normalize the colormap. + + vmax : float + The maximum value of the key variable to normalize the colormap. + + ax : matplotlib.axes.Axes, optional + The axis to plot on. If None, a new figure and axis will be created. + + save_path : str, optional + Path to save the generated plot. If None, the plot will not be saved. + + tick_values : list of float, optional + Values to use as ticks on the colorbar. Useful for discrete parameter steps. + + + Returns + ------- + '""" + if ax is None: + fig, ax = plt.subplots(figsize=(10, 6)) + else: + fig = ax.figure + + keys = sorted(data_dict.keys()) + if vmin is None: + vmin = min(keys) + if vmax is None: + vmax = max(keys) + + # Use provided or default norm + if norm is None: + norm = mcolors.Normalize(vmin=vmin, vmax=vmax) + sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) + + # Plot each group + for key in keys: + values = np.array(data_dict[key]) + if len(values) < 2: + continue + color = cmap(norm(key)) + kde_cumulative_log(values, color=color, ax=ax, bw_adjust=bw_adjust, label=str(key)) + + ax.set_xlabel(xlabel, fontsize=12) + ax.set_ylabel(ylabel, fontsize=12) + ax.set_ylim(0, 1.02) + ax.grid(alpha=0.2) + + # Colorbar setup + sm.set_array(np.linspace(vmin, vmax, 100)) + cbar = fig.colorbar(sm, ax=ax) + cbar.set_label(key_label, fontsize=12) + + # Use grid param values as colorbar ticks + if tick_values is not None: + cbar.set_ticks(tick_values) + if isinstance(norm, mcolors.LogNorm): + # Format log ticks with LaTeX-style math text (e.g., 10⁻³) + cbar.set_ticklabels([f"$10^{{{int(np.log10(t))}}}$" for t in tick_values]) + cbar.ax.yaxis.set_major_formatter(LogFormatterMathtext()) + else: + # Linear scale ticks + cbar.set_ticklabels([str(t) for t in tick_values]) + + if save_path: + plt.savefig(save_path, dpi=300) + plt.close(fig) + +def generate_single_plots_linear(extracted_outputs, grouped_data, grid_params, plots_path, param_label_map, colormaps_by_param, output_label_map, log_scale_grid_params): + """ + Generate and save normalized cumulative distribution plots for each output vs. grid parameter. + + Parameters: + ---------- + extracted_outputs : list of str + List of extracted output quantities to plot. + + grouped_data : dict + Data for each output/parameter pair. (like solidification_time_per_semimajoraxis) + + grid_params : dict + Parameter values used in the grid. + + plots_path : str + Directory where plots will be saved. + + param_label_map : dict + Dictionary containing the label of the grid parameter for the plot. + + colormaps_by_param : dict + Dictionary containing the colormap to use for each grid parameter for the plot. - # Loop through all extracted outputs and all grid parameters + output_label_map : dict, optional + Dictionary containing the label of the extracted output quantity for the plot. + + log_scale_grid_params : list of str, optional + Parameters to use log scale for colormap normalization. (like escape.zephyrus.Pxuv) + """ + + if param_label_map is None: + raise ValueError("param_label_map must be provided.") + + if colormaps_by_param is None: + raise ValueError("colormaps_by_param must be provided.") + + if output_label_map is None: + raise ValueError("output_label_map must be provided.") + + if log_scale_grid_params is None: + log_scale_grid_params = [] for output_name in extracted_outputs: for param, cmap in colormaps_by_param.items(): data_key = f"{output_name}_per_{param}" if data_key not in grouped_data: - print(f"WARNING : Skipping {data_key} — not found in grouped_data") + print(f"WARNING: Skipping {data_key} — not found in grouped_data") continue data_dict_raw = grouped_data[data_key] data_dict = data_dict_raw.copy() - # Convert melt fraction to percentage if output_name == "Phi_global": data_dict = {k: [v_i * 100 for v_i in v] for k, v in data_dict.items()} - # Get tick values explicitly from grid params tick_values = grid_params.get(param) vmin = min(tick_values) vmax = max(tick_values) - # Labels xlabel = output_label_map.get(output_name, output_name.replace("_", " ").title()) ylabel = "Normalized cumulative fraction of simulations" key_label = param_label_map.get(param, param.replace("_", " ").title()) - # Save name and figure setup + save_dir = Path(plots_path) / 'single_plot_linear' + plot_dir_exists(save_dir) + save_name = f"cumulative_{output_name}_vs_{param.replace('.', '_')}.png" - save_path = plots_path + 'single_plot/' + save_name + save_path = save_dir / save_name fig, ax = plt.subplots(figsize=(10, 6)) - # Log scale X for specific outputs (not active here unless you add them to log_scale_outputs) - if output_name in log_scale_outputs: - ax.set_xscale("log") + norm = ( + mcolors.LogNorm(vmin=vmin, vmax=vmax) + if param in log_scale_grid_params + else mcolors.Normalize(vmin=vmin, vmax=vmax) + ) - # Color scale norm - if param in log_scale_grid_params: - norm = mcolors.LogNorm(vmin=vmin, vmax=vmax) - else: - norm = mcolors.Normalize(vmin=vmin, vmax=vmax) + plot_kde_cumulative_linear( + data_dict=data_dict, + xlabel=xlabel, + ylabel=ylabel, + cmap=cmap, + vmin=vmin, + vmax=vmax, + key_label=key_label, + tick_values=tick_values, + save_path=save_path, + norm=norm, + ax=ax + ) + plt.close(fig) + + print("All linear-scale single plots saved.") - # Plot - plot_kde_cumulative( +def generate_single_plots_log(extracted_outputs, grouped_data, grid_params, plots_path, param_label_map, colormaps_by_param, output_label_map, log_scale_grid_params): + """ + Generate and save normalized cumulative distribution plots for each output vs. grid parameter. The x-axis is in log scale. + + Parameters: + ---------- + extracted_outputs : list of str + List of extracted output quantities to plot. + + grouped_data : dict + Data for each output/parameter pair. (like solidification_time_per_semimajoraxis) + + grid_params : dict + Parameter values used in the grid. + + plots_path : str + Directory where plots will be saved. + + param_label_map : dict + Dictionary containing the label of the grid parameter for the plot. + + colormaps_by_param : dict + Dictionary containing the colormap to use for each grid parameter for the plot. + + output_label_map : dict, optional + Dictionary containing the label of the extracted output quantity for the plot. + + log_scale_grid_params : list of str, optional + Parameters to use log scale for colormap normalization. (like escape.zephyrus.Pxuv) + """ + + if param_label_map is None: + raise ValueError("param_label_map must be provided.") + if colormaps_by_param is None: + raise ValueError("colormaps_by_param must be provided.") + if output_label_map is None: + raise ValueError("output_label_map must be provided.") + if log_scale_grid_params is None: + log_scale_grid_params = [] + + for output_name in extracted_outputs: + for param, cmap in colormaps_by_param.items(): + data_key = f"{output_name}_per_{param}" + + if data_key not in grouped_data: + print(f"WARNING: Skipping {data_key} — not found in grouped_data") + continue + + data_dict_raw = grouped_data[data_key] + data_dict = data_dict_raw.copy() + + if output_name == "Phi_global": + data_dict = {k: [v_i * 100 for v_i in v] for k, v in data_dict.items()} + + tick_values = grid_params.get(param) + vmin = min(tick_values) + vmax = max(tick_values) + + xlabel = output_label_map.get(output_name, output_name.replace("_", " ").title()) + ylabel = "Normalized cumulative fraction of simulations" + key_label = param_label_map.get(param, param.replace("_", " ").title()) + + save_dir = Path(plots_path) / 'single_plot_log' + plot_dir_exists(save_dir) + + save_name = f"cumulative_{output_name}_vs_{param.replace('.', '_')}_log.png" + save_path = save_dir / save_name + + fig, ax = plt.subplots(figsize=(10, 6)) + + norm = ( + mcolors.LogNorm(vmin=vmin, vmax=vmax) + if param in log_scale_grid_params + else mcolors.Normalize(vmin=vmin, vmax=vmax) + ) + + plot_kde_cumulative_log( data_dict=data_dict, xlabel=xlabel, ylabel=ylabel, @@ -456,6 +744,309 @@ def plot_kde_cumulative(data_dict, xlabel, ylabel, cmap=plt.cm.plasma, vmin=None norm=norm, ax=ax ) + plt.close(fig) - print(f"All single plots saved to {save_path}") \ No newline at end of file + print("All log-scale single plots saved.") + +def generate_grid_plot(extracted_outputs, grouped_data, grid_params, plots_path, param_label_map, colormaps_by_param, output_label_map, log_scale_grid_params): + """ + Generate and save normalized cumulative distribution plots for each output vs. grid parameters. + This creates a subplot where each column corresponds to one extracted output, and each row corresponds to one grid parameter. + + Parameters: + ---------- + extracted_outputs : list of str + List of extracted output quantities to plot. + + grouped_data : dict + Data for each output/parameter pair. (like solidification_time_per_semimajoraxis) + + grid_params : dict + Parameter values used in the grid. + + plots_path : str + Directory where plots will be saved. + + param_label_map : dict + Dictionary containing the label of the grid parameter for the plot. + + colormaps_by_param : dict + Dictionary containing the colormap to use for each grid parameter for the plot. + + output_label_map : dict, optional + Dictionary containing the label of the extracted output quantity for the plot. + + log_scale_grid_params : list of str, optional + Parameters to use log scale for colormap normalization. (like escape.zephyrus.Pxuv) + """ + + if param_label_map is None: + raise ValueError("param_label_map must be provided.") + if colormaps_by_param is None: + raise ValueError("colormaps_by_param must be provided.") + if output_label_map is None: + raise ValueError("output_label_map must be provided.") + if log_scale_grid_params is None: + log_scale_grid_params = [] + + num_cols = len(extracted_outputs) + num_rows = len(grid_params) + + # Create subplots with the appropriate layout + fig, axes = plt.subplots(num_rows, num_cols, figsize=(num_cols * 16, num_rows * 12)) + plt.subplots_adjust(hspace=0.15, wspace=0.15) + + if num_rows == 1: + axes = np.expand_dims(axes, axis=0) # Make sure it's 2D if only one row. + + for i, output_name in enumerate(extracted_outputs): + for j, (param, cmap) in enumerate(colormaps_by_param.items()): + data_key = f"{output_name}_per_{param}" + + if data_key not in grouped_data: + print(f"WARNING: Skipping {data_key} — not found in grouped_data") + continue + + data_dict_raw = grouped_data[data_key] + data_dict = data_dict_raw.copy() + + if output_name == "Phi_global": + data_dict = {k: [v_i * 100 for v_i in v] for k, v in data_dict.items()} + + tick_values = grid_params.get(param) + vmin = min(tick_values) + vmax = max(tick_values) + + xlabel = output_label_map.get(output_name, output_name.replace("_", " ").title()) + ylabel = "Normalized cumulative fraction of simulations" + key_label = param_label_map.get(param, param.replace("_", " ").title()) + + # Select the axis for this subplot + ax = axes[j, i] + + norm = ( + mcolors.LogNorm(vmin=vmin, vmax=vmax) + if param in log_scale_grid_params + else mcolors.Normalize(vmin=vmin, vmax=vmax) + ) + + plot_kde_cumulative_linear( + data_dict=data_dict, + xlabel=xlabel, + ylabel=ylabel, + cmap=cmap, + vmin=vmin, + vmax=vmax, + key_label=key_label, + tick_values=tick_values, + save_path=None, # Don't save the plot yet + norm=norm, + ax=ax + ) + + # Set titles for each subplot + if i == 0: + ax.set_ylabel(ylabel, fontsize=12) + if j == num_rows - 1: + ax.set_xlabel(xlabel, fontsize=12) + + # Customize plot with grid and ticks + ax.grid(alpha=0.2) + ax.set_ylim(0, 1.02) + + # Save the complete subplot figure + save_dir = Path(plots_path) / 'grid_plot' + plot_dir_exists(save_dir) + + save_name = "cumulative_grid_plot_linear.png" + save_path = save_dir / save_name + plt.savefig(save_path, dpi=300) + plt.close(fig) + + print(f"All subplot plots saved to {save_path}") + +def generate_grid_plot_clean(extracted_outputs, grouped_data, grid_params, plots_path, param_label_map, colormaps_by_param, output_label_map, log_scale_grid_params): + """ + Generate and save normalized cumulative distribution plots for each output vs. grid parameters. + This creates a subplot where each column corresponds to one extracted output, and each row corresponds to one grid parameter. + + Parameters: + ---------- + extracted_outputs : list of str + List of extracted output quantities to plot. + + grouped_data : dict + Data for each output/parameter pair. (like solidification_time_per_semimajoraxis) + + grid_params : dict + Parameter values used in the grid. + + plots_path : str + Directory where plots will be saved. + + param_label_map : dict + Dictionary containing the label of the grid parameter for the plot. + + colormaps_by_param : dict + Dictionary containing the colormap to use for each grid parameter for the plot. + + output_label_map : dict, optional + Dictionary containing the label of the extracted output quantity for the plot. + + log_scale_grid_params : list of str, optional + Parameters to use log scale for colormap normalization. (like escape.zephyrus.Pxuv) + """ + if param_label_map is None or colormaps_by_param is None or output_label_map is None: + raise ValueError("param_label_map, colormaps_by_param, and output_label_map must be provided.") + if log_scale_grid_params is None: + log_scale_grid_params = [] + + num_cols = len(extracted_outputs) + num_rows = len(grid_params) + + fig, axes = plt.subplots(num_rows, num_cols, figsize=(num_cols * 6, num_rows * 5), sharex='col', sharey='row') + plt.subplots_adjust(hspace=0.1, wspace=0.1) + + if num_rows == 1: + axes = np.expand_dims(axes, axis=0) + if num_cols == 1: + axes = np.expand_dims(axes, axis=1) + + param_list = list(grid_params.keys()) + + for i, output_name in enumerate(extracted_outputs): + for j, param in enumerate(param_list): + data_key = f"{output_name}_per_{param}" + cmap = colormaps_by_param[param] + + if data_key not in grouped_data: + print(f"WARNING: Skipping {data_key} — not found in grouped_data") + continue + + data_dict_raw = grouped_data[data_key] + data_dict = {k: [v_i * 100 if output_name == "Phi_global" else v_i for v_i in v] for k, v in data_dict_raw.items()} + + tick_values = grid_params.get(param) + vmin, vmax = min(tick_values), max(tick_values) + + xlabel = output_label_map.get(output_name, output_name.replace("_", " ").title()) + norm = ( + mcolors.LogNorm(vmin=vmin, vmax=vmax) + if param in log_scale_grid_params + else mcolors.Normalize(vmin=vmin, vmax=vmax) + ) + + ax = axes[j, i] + cbar = plot_kde_cumulative_linear_clean( + data_dict=data_dict, + xlabel="", + ylabel="", + cmap=cmap, + vmin=vmin, + vmax=vmax, + key_label=None, + tick_values=tick_values, + save_path=None, + norm=norm, + ax=ax, + return_cbar=True + ) + + if i != 0: + ax.set_ylabel("") + ax.set_yticklabels([]) + if j != num_rows - 1: + ax.set_xlabel("") + ax.set_xticklabels([]) + + ax.grid(alpha=0.2) + ax.set_ylim(0, 1.02) + + # Add shared y-axis label + fig.text(0.04, 0.5, "Normalized cumulative fraction of simulations", va='center', rotation='vertical', fontsize=16) + + # Add x-axis labels under each column + for i, output_name in enumerate(extracted_outputs): + xlabel = output_label_map.get(output_name, output_name.replace("_", " ").title()) + axes[-1, i].set_xlabel(xlabel, fontsize=12) + + # Create a single colorbar on the right side + fig.subplots_adjust(right=0.87) + cbar_ax = fig.add_axes([0.9, 0.15, 0.015, 0.7]) # [left, bottom, width, height] + fig.colorbar(cbar, cax=cbar_ax, orientation='vertical', label="Parameter value") + + # Save plot + save_dir = Path(plots_path) / 'grid_plot' + plot_dir_exists(save_dir) + save_path = save_dir / "cumulative_grid_plot_linear_clean.png" + plt.savefig(save_path, dpi=300) + plt.close(fig) + + print(f"All subplot plots saved to {save_path}") + +if __name__ == '__main__': + + # Paths to the csv file and plot directory + grid_name = 'escape_grid_4_params_Pxuv_a_epsilon_fO2' + data_dir = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_processed_data/{grid_name}/{grid_name}_extracted_data.csv' + plots_path = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_plots/{grid_name}/' + + # Load the data and check if the plot directory exists + df, grid_params, extracted_outputs = load_extracted_data(data_dir) + plot_dir_exists(plots_path) + # Group extracted outputs by grid parameters + grouped_data = group_output_by_parameter(df, grid_params, extracted_outputs) + + # Plot the grid status + plot_grid_status(df, plots_path) + + # Single plots + param_label_map = { + "orbit.semimajoraxis": "Semi-major axis [AU]", + "escape.zephyrus.Pxuv": r"$P_{XUV}$ [bar]", + "escape.zephyrus.efficiency": r"Escape efficiency factor, $\epsilon$", + "outgas.fO2_shift_IW":r"$log_{10}$($fO_2$) ($\Delta$ IW)"} + output_label_map = { + "solidification_time": "Solidification time [yr]", + "esc_rate_total": "Total escape rate [kg/s]", + "Phi_global": "Melt fraction [%]", + "P_surf": "Surface pressure [bar]", + "atm_kg_per_mol": "Atmospheric mass [kg/mol]"} + colormaps_by_param = { + "orbit.semimajoraxis": cm.plasma, + "escape.zephyrus.Pxuv": cm.cividis, + "escape.zephyrus.efficiency": cm.spring, + "outgas.fO2_shift_IW": cm.coolwarm} + + log_scale_grid_params = ["escape.zephyrus.Pxuv"] + + # generate_single_plots_linear( + # extracted_outputs=extracted_outputs, + # grouped_data=grouped_data, + # grid_params=grid_params, + # plots_path=plots_path, + # param_label_map=param_label_map, + # colormaps_by_param=colormaps_by_param, + # output_label_map=output_label_map, + # log_scale_grid_params=log_scale_grid_params) + + # generate_single_plots_log( + # extracted_outputs=extracted_outputs, + # grouped_data=grouped_data, + # grid_params=grid_params, + # plots_path=plots_path, + # param_label_map=param_label_map, + # colormaps_by_param=colormaps_by_param, + # output_label_map=output_label_map, + # log_scale_grid_params=log_scale_grid_params) + + generate_grid_plot( + extracted_outputs=extracted_outputs, + grouped_data=grouped_data, + grid_params=grid_params, + plots_path=plots_path, + param_label_map=param_label_map, + colormaps_by_param=colormaps_by_param, + output_label_map=output_label_map, + log_scale_grid_params=log_scale_grid_params) \ No newline at end of file From 36858ca5aeb184efebba14cf759341e4a62db363 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 23 Apr 2025 12:12:26 +0200 Subject: [PATCH 014/105] Clean and clear comment in post_processing_grid.py --- .../post_processing_grid.py | 175 +++++++++--------- 1 file changed, 91 insertions(+), 84 deletions(-) diff --git a/tools/post_processing_grid/post_processing_grid.py b/tools/post_processing_grid/post_processing_grid.py index bb860c1d0..0e04ab294 100644 --- a/tools/post_processing_grid/post_processing_grid.py +++ b/tools/post_processing_grid/post_processing_grid.py @@ -1,37 +1,34 @@ -import os from pathlib import Path import pandas as pd import toml +import os import re +import ast import numpy as np import csv -import ast from typing import Tuple, Dict, List, Any def load_grid_cases(grid_dir: Path): """ - Load information for each simulation of a PROTEUS grid. - - Read runtime_helpfile.csv, init_parameters (from toml file), and status + Load information for each simulation of a PROTEUS grid. + Read 'runtime_helpfile.csv', 'init_coupler.toml' and status files for each simulation of the grid. Parameters ---------- - grid_dir : Path - Path to the grid directory containing the case_* folders + grid_dir : Path or str + Path to the grid directory containing the 'case_*' folders Returns ---------- combined_data : list List of dictionaries, each containing: - - 'init_parameters' (dict): - Parameters loaded from init_coupler.toml - - 'output_values' (pandas.DataFrame): - Data from runtime_helpfile.csv - - 'status' (str or None): - Status string from status file (if available) + - 'init_parameters' (dict): Parameters loaded from `init_coupler.toml`. + - 'output_values' (pandas.DataFrame): Data from `runtime_helpfile.csv`. + - 'status' (str): Status string from the `status` file, or 'unknown' if unavailable. """ + combined_data = [] grid_dir = Path(grid_dir) @@ -61,8 +58,10 @@ def load_grid_cases(grid_dir: Path): except Exception as e: print(f"Error processing {case.name}: {e}") + # Count the number of simulations per status statuses = [str(case.get('status', 'unknown')).strip() or 'unknown' for case in combined_data] status_counts = pd.Series(statuses).value_counts().sort_values(ascending=False) + print('-----------------------------------------------------------') print(f"Total number of simulations: {len(statuses)}") print('-----------------------------------------------------------') @@ -74,44 +73,50 @@ def load_grid_cases(grid_dir: Path): def get_grid_parameters(grid_dir: str): """ - Extracts grid parameters names and values from the manager.log file + Extract grid parameter names and values from the 'manager.log' file. Parameters ---------- grid_dir : str - Path to the grid directory + Path to the directory of the PROTEUS grid Returns ------- - dict - A dictionary where each key is the parameter name and the value is a list of parameter values. + param_grid : dict + A dictionary where each key is a parameter name, and its corresponding values used for the entire grid is a list. + + case_params : dict + Dictionary containing each case number with the name and values of the tested parameters in this grid. """ + log_file = os.path.join(grid_dir, 'manager.log') + # Check if the 'manager.log' file exists if not os.path.exists(log_file): print(f"Error: manager.log not found at {log_file}") return {}, {} + # Read all lines from the 'manager.log' file with open(log_file, 'r') as file: lines = file.readlines() param_grid = {} case_params = {} + # Expressions to match the relevant lines dimension_pattern = re.compile(r"parameter:\s*(\S+)") values_pattern = re.compile(r"values\s*:\s*\[(.*?)\]") - case_line_pattern = re.compile(r"\]\s+(\d+):\s+(\{.*\})") # Fixed pattern + case_line_pattern = re.compile(r"\]\s+(\d+):\s+(\{.*\})") current_param = None - for line in lines: line = line.strip() - + # Check if the line defines a new parameter dim_match = dimension_pattern.search(line) if dim_match: current_param = dim_match.group(1) continue - + # Check if the line defines values for the current parameter val_match = values_pattern.search(line) if val_match and current_param: try: @@ -120,7 +125,7 @@ def get_grid_parameters(grid_dir: str): current_param = None except Exception as e: print(f"Error parsing values for {current_param}: {e}") - + # Check if the line contains case-specific data case_match = case_line_pattern.search(line) if case_match: case_num = int(case_match.group(1)) @@ -132,7 +137,7 @@ def get_grid_parameters(grid_dir: str): return param_grid, case_params -def extract_grid_output(cases_data, parameter_name): +def extract_grid_output(cases_data: list, parameter_name: str): """ Extract a specific parameter from the 'output_values' of each simulation case. @@ -147,14 +152,14 @@ def extract_grid_output(cases_data, parameter_name): Returns ------- parameter_values : list - A list containing the extracted values of the specified parameter for all cases. + A list containing the extracted values of the specified parameter for all cases of the grid. """ + parameter_values = [] columns_printed = False # Flag to print columns only once for case in cases_data: df = case['output_values'] - # Check if the parameter exists in the output dataframe if parameter_name in df.columns: # Extract the last value of the parameter from the last row @@ -166,14 +171,15 @@ def extract_grid_output(cases_data, parameter_name): print(f"Available columns in this case: {', '.join(df.columns)}") columns_printed = True - # Print the extracted values + # Print the extracted output values for the specified parameter print(f"Extracted output (at last time step) : {parameter_name} ") return parameter_values -def extract_solidification_time(cases_data, phi_crit): +def extract_solidification_time(cases_data: list, phi_crit: float): """ - Extract the solidification time for planets that reach Phi_global < phi_crit. + Extract the solidification time at the time step where the condition + 'Phi_global' < phi_crit is first satisfied for each planet. Parameters ---------- @@ -181,28 +187,30 @@ def extract_solidification_time(cases_data, phi_crit): List of dictionaries containing simulation data. phi_crit : float - The critical value of melt fraction at which a planet is considered solidified. + The critical melt fraction value below which a planet is considered solidified. + A typical value is 0.005. Returns ------- solidification_times : list A list containing the solidification times for all solidified planets of the grid. - If a planet never solidifies, it will have NaN in the list. + If a planet never solidifies, it will have a NaN in the list. """ + solidification_times = [] columns_printed = False for i, case in enumerate(cases_data): df = case['output_values'] - + # Check if the required columns exist in the dataframe if 'Phi_global' in df.columns and 'Time' in df.columns: condition = df['Phi_global'] < phi_crit if condition.any(): - first_index = condition.idxmax() # gives the first True index - solid_time = df.loc[first_index, 'Time'] + first_index = condition.idxmax() + solid_time = df.loc[first_index, 'Time'] # Get the index of the time at which the condition is first satisfied solidification_times.append(solid_time) else: - solidification_times.append(np.nan) # Append NaN if condition is not met + solidification_times.append(np.nan) # Append NaN if condition is not satisfied else: if not columns_printed: print("Warning: 'Phi_global' and/or 'Time' columns not found in some cases.") @@ -210,10 +218,9 @@ def extract_solidification_time(cases_data, phi_crit): columns_printed = True solidification_times.append(np.nan) # Append NaN if columns are missing - # Count the number of cases labeled as 10 Completed (solidified) + # Count the number of cases with a status = '10 Completed (solidified)' status_10_cases = [case for case in cases_data if (case.get('status') or '').strip() == '10 Completed (solidified)'] completed_count = len(status_10_cases) - # Count only valid solidification times (non-NaN) valid_solidification_times = [time for time in solidification_times if not np.isnan(time)] valid_solidified_count = len(valid_solidification_times) @@ -222,27 +229,29 @@ def extract_solidification_time(cases_data, phi_crit): print(f"Extracted solidification times (Phi_global < {phi_crit})") print(f"→ Found {valid_solidified_count} valid solidified cases based on Phi_global") print(f"→ Found {completed_count} cases with status '10 Completed (solidified)' ") - + # Check if the number of valid solidified cases matches the number of cases with status '10 Completed (solidified)' in the grid, to be sure the extraction is correct if valid_solidified_count != completed_count: print("WARNING: The number of valid solidified planets does not match the number of planets with status: '10 Completed (solidified)'") - print("\nChecking final Phi_global values for all status '10 Completed (solidified)' cases:") - for i, case in enumerate(status_10_cases): - df = case['output_values'] - if 'Phi_global' in df.columns: - final_phi = df['Phi_global'].iloc[-1] - #print(f"[Status Case {i}] Final Phi_global = {final_phi}") - else: - print(f"[Status Case {i}] Phi_global column missing.") + # To debug, the user can uncomment the following lines to print the solidification times for all plaent with '10 Completed (solidified)' + # print("\nChecking final Phi_global values for all status '10 Completed (solidified)' cases:") + # for i, case in enumerate(status_10_cases): + # df = case['output_values'] + # if 'Phi_global' in df.columns: + # final_phi = df['Phi_global'].iloc[-1] + # print(f"[Status Case {i}] Final Phi_global = {final_phi}") + # else: + # print(f"[Status Case {i}] Phi_global column missing.") else: print("Solidified planets count matches the number of planets with status: '10 Completed (solidified)'.") print('-----------------------------------------------------------') return solidification_times -def save_grid_data_to_csv(grid_name, cases_data, grid_parameters, case_params, extracted_value, output_to_extract, phi_crit, output_dir: Path): +def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dict, case_params: Dict[int, Dict[str, Any]], + extracted_value: dict, output_to_extract: list, phi_crit: float, output_dir: Path): """ - Save all simulation information (status, grid parameters, output values, solidification times) - into CSV files for later analysis (doing plots). + Save all simulation information (status, grid parameters, output values) into a CSV file + for later analysis (using plot_grid.py to make plots for instance). Parameters ---------- @@ -250,32 +259,35 @@ def save_grid_data_to_csv(grid_name, cases_data, grid_parameters, case_params, e Name of the grid. cases_data : list - List of dictionaries containing the status of all the simulations cases in the grid. + List of dictionaries containing simulation data. grid_parameters : dict - Dictionary containing the grid parameters. + A dictionary where each key is a parameter name, and its corresponding values used for the entire grid is a list. - case_params : Dict[int, Dict[str, Any]] + case_params : dict Dictionary containing each case number with the name and values of the tested parameters in this grid. extracted_value : dict - Dictionary containing the extracted output values for each parameter. - + A list containing the extracted values of the specified parameter for all cases of the grid. + output_to_extract : list List of output values extracted from each simulation in the grid. phi_crit : float - Critical melt fraction value to determine if the planet solidifies. + The critical melt fraction value used to determine if a planet is considered solidified. + A typical value is 0.005. output_dir : Path - Directory where the CSV file will be saved. + The directory where the generated CSV file will be saved. If the directory does not exist, + it will be created. """ + # Check if the output directory exist, if not create it output_dir = Path(output_dir) output_dir.mkdir(parents=True, exist_ok=True) - + # CSV file path csv_file = output_dir / f"{grid_name}_extracted_data.csv" - + with open(csv_file, 'w', newline='') as csvfile: writer = csv.writer(csvfile) @@ -303,22 +315,19 @@ def save_grid_data_to_csv(grid_name, cases_data, grid_parameters, case_params, e # CSV table header writer.writerow(["Case number", "Status"] + list(grid_parameters.keys()) + list(extracted_value.keys())) - # Data rows + # Write data rows for case_index, case_data in enumerate(cases_data): status = case_data.get('status', 'unknown') or 'unknown' row = [case_index, f"'{status}'"] - - # Use case_params (from get_grid_parameters) to pull values for each param + # Add grid parameters values for each case case_param_values = case_params.get(case_index, {}) - for param in grid_parameters.keys(): row.append(case_param_values.get(param, 'NA')) - - # Add extracted output values + # Add extracted output values for each case for param in extracted_value.keys(): value_list = extracted_value.get(param, []) row.append(value_list[case_index] if case_index < len(value_list) else 'NA') - + # Write the row to the CSV file writer.writerow(row) print(f"Extracted data has been successfully saved to {csv_file}.") @@ -326,29 +335,27 @@ def save_grid_data_to_csv(grid_name, cases_data, grid_parameters, case_params, e if __name__ == '__main__': - # Paths to the grid folder - grid_name = 'escape_grid_4_params_Pxuv_a_epsilon_fO2' - grid_path = f'/home2/p315557/outputs_Norma2/good_grids/{grid_name}/' - data_dir = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_processed_data/{grid_name}/' + # User needs to specify paths + grid_name = 'escape_grid_4_params_Pxuv_a_epsilon_fO2' + grid_path = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_grid/{grid_name}/' + data_dir = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_processed_data/{grid_name}/' + # User choose the parameters to post-process the grid - output_to_extract = ['esc_rate_total', # List of output values to extract from the runtime_helpfile - 'Phi_global', - 'P_surf', - 'atm_kg_per_mol'] - phi_crit = 0.005 # Critical melt fraction for solidification - extracted_value = {} - - # Post-processing the grid - cases_data = load_grid_cases(grid_path) # Load all simulation cases - grid_parameters, case_init_param = get_grid_parameters(grid_path) # Extract grid parameters - for param in output_to_extract: - extracted_value[param] = extract_grid_output(cases_data, param) # Extract output values - solidification_times = extract_solidification_time(cases_data, phi_crit) # Extract the solidification time - extracted_value['solidification_time'] = solidification_times + output_to_extract = ['esc_rate_total','Phi_global', 'P_surf','atm_kg_per_mol'] # Output columns to extract from 'runtime_helpfile.csv' of each case + phi_crit = 0.005 # Critical melt fraction for the solidification condition - # Save all the extracted data to a CSV file - save_grid_data_to_csv(grid_name, cases_data, grid_parameters, case_init_param, extracted_value, solidification_times, phi_crit, data_dir) + # Post-processing the grid + extracted_value = {} # Initialize the dictionary to store extracted values + cases_data = load_grid_cases(grid_path) # Load all simulation cases + grid_parameters, case_init_param = get_grid_parameters(grid_path) # Extract grid parameters + for param in output_to_extract: + extracted_value[param] = extract_grid_output(cases_data, param) # Extract output values + solidification_times = extract_solidification_time(cases_data, phi_crit) # Extract the solidification time + extracted_value['solidification_time'] = solidification_times # Add solidification time to the extracted_values + save_grid_data_to_csv(grid_name, cases_data, grid_parameters, case_init_param, + extracted_value, solidification_times, phi_crit, data_dir) # Save all the extracted data to a CSV file + # Done with the post-processing step :) print('-----------------------------------------------------------') print("Post-processing completed. Let's do some plots !") print('(Please check for any warning messages above before going further.)') From f3dc0677493229b95a5f1eec82baddf326acbdb6 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 23 Apr 2025 16:14:47 +0200 Subject: [PATCH 015/105] clean plot_grid.py --- tools/post_processing_grid/plot_grid.py | 26 ++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tools/post_processing_grid/plot_grid.py b/tools/post_processing_grid/plot_grid.py index 2bac0e4b9..dffced14e 100644 --- a/tools/post_processing_grid/plot_grid.py +++ b/tools/post_processing_grid/plot_grid.py @@ -1,6 +1,7 @@ -import pandas as pd -from io import StringIO from pathlib import Path +from io import StringIO + +import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt @@ -987,19 +988,18 @@ def generate_grid_plot_clean(extracted_outputs, grouped_data, grid_params, plots if __name__ == '__main__': - # Paths to the csv file and plot directory - grid_name = 'escape_grid_4_params_Pxuv_a_epsilon_fO2' - data_dir = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_processed_data/{grid_name}/{grid_name}_extracted_data.csv' - plots_path = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_plots/{grid_name}/' + # User needs to specify paths + grid_name = 'escape_grid_4_params_Pxuv_a_epsilon_fO2' + data_dir = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_processed_data/{grid_name}/{grid_name}_extracted_data.csv' + plots_path = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_plots/{grid_name}/' - # Load the data and check if the plot directory exists - df, grid_params, extracted_outputs = load_extracted_data(data_dir) - plot_dir_exists(plots_path) - # Group extracted outputs by grid parameters - grouped_data = group_output_by_parameter(df, grid_params, extracted_outputs) + # Load and organize data before plotting + df, grid_params, extracted_outputs = load_extracted_data(data_dir) # Load the data + plot_dir_exists(plots_path) # Check if the plot directory exists. If not, create it. + grouped_data = group_output_by_parameter(df, grid_params, extracted_outputs) # Group extracted outputs by grid parameters - # Plot the grid status - plot_grid_status(df, plots_path) + # Plots + plot_grid_status(df, plots_path) # Plot the grid status in an histogram # Single plots param_label_map = { From 62057eaaecdb089f9100e5bdf8101822e568642a Mon Sep 17 00:00:00 2001 From: Emma Postolec Date: Wed, 23 Apr 2025 18:58:28 +0200 Subject: [PATCH 016/105] Minor update to make it work fro another grid --- tools/post_processing_grid/plot_grid.py | 208 ++++++++++-------- .../post_processing_grid.py | 59 +++-- 2 files changed, 142 insertions(+), 125 deletions(-) diff --git a/tools/post_processing_grid/plot_grid.py b/tools/post_processing_grid/plot_grid.py index dffced14e..32d4f01b2 100644 --- a/tools/post_processing_grid/plot_grid.py +++ b/tools/post_processing_grid/plot_grid.py @@ -1,6 +1,7 @@ from pathlib import Path from io import StringIO +import os import pandas as pd import numpy as np import seaborn as sns @@ -10,15 +11,17 @@ import matplotlib.ticker as ticker from matplotlib.ticker import LogFormatterMathtext -def load_extracted_data(csv_path): +def load_extracted_data(data_path : str | Path, grid_name :str): """ Load extracted data from the CSV file generated with post_processing.py, returning a DataFrame for plotting. Parameters ---------- - csv_path : str or Path - Path to the CSV file containing extracted data. + data_path : str or Path + Path to the directory containing the CSV file with the extracted data. + grid_name : str + Name of the grid Returns ------- @@ -31,7 +34,12 @@ def load_extracted_data(csv_path): extracted_outputs : list of str List of extracted output variable names. """ - with open(csv_path, 'r') as f: + + csv_file = os.path.join(data_path, f"{grid_name}_extracted_data.csv") + if not os.path.exists(csv_file): + raise FileNotFoundError(f"CSV file not found at: {csv_file}") + + with open(csv_file, 'r') as f: lines = f.readlines() data_start_idx = None @@ -105,7 +113,7 @@ def is_float(value): """Helper function to check if a string can be converted to a float.""" try: float(value) # Try converting string to float - return True + return True except ValueError: return False # Fail not a valid float @@ -143,7 +151,7 @@ def group_output_by_parameter(df,grid_parameters,outputs): ------- dict Dictionary where each key is of the form '[output]_per_[parameter]', and each value is a dict {param_value: [output_values]}. - """ + """ grouped = {} for param in grid_parameters: @@ -162,58 +170,59 @@ def group_output_by_parameter(df,grid_parameters,outputs): def plot_grid_status(cases_data, plot_dir: Path, status_colors: dict = None): """ - Plot the status of simulation from the PROTEUS grid. + Plot the status of simulations from the PROTEUS grid with improved x-axis readability. Parameters ---------- - cases_data : list - List of dictionaries containing the status of all simulation from the grid. + cases_data : list or DataFrame + Contains the status of all simulations from the grid. plot_dir : Path - Path to the plots directory + Path to the plots directory. status_colors : dict, optional A dictionary mapping statuses to specific colors. If None, a default palette is used. - - Returns - ------- - Plot saved to the specified directory. """ # Extract and clean statuses statuses = df['Status'].fillna('unknown').astype(str) status_counts = statuses.value_counts().sort_values(ascending=False) - + # Set colors for the bars if status_colors: - palette = {str(status): status_colors.get(str(status), 'gray') for status in status_counts.index} + formatted_status_keys = [s.replace(" ", "\n") for s in status_counts.index] + palette = {formatted: status_colors.get(original, 'gray') + for formatted, original in zip(formatted_status_keys, status_counts.index)} else: palette = sns.color_palette("Accent", len(status_counts)) - palette = dict(zip(status_counts.index, palette)) + formatted_status_keys = [s.replace(" ", "\n") for s in status_counts.index] + palette = dict(zip(formatted_status_keys, palette)) + + # Format x-axis labels for better readability + formatted_labels = [s.replace(" ", "\n") for s in status_counts.index] # Prepare dataframe for plotting plot_df = pd.DataFrame({ - 'Status': status_counts.index, + 'Status': formatted_labels, 'Count': status_counts.values }) - #sns.set(style="white") plt.figure(figsize=(10, 7)) ax = sns.barplot( data=plot_df, x='Status', y='Count', - hue='Status', # required to apply the palette + hue='Status', palette=palette, dodge=False, - edgecolor='black' # edge color added here + edgecolor='black' ) - + # Remove legend if it was created if ax.legend_: ax.legend_.remove() - # Add text on top of bars + # Add value labels above bars total_simulations = len(cases_data) for i, count in enumerate(status_counts.values): percentage = (count / total_simulations) * 100 @@ -223,14 +232,13 @@ def plot_grid_status(cases_data, plot_dir: Path, status_colors: dict = None): ha='center', va='bottom', fontsize=10 ) - plt.title(f"Total number of simulations: {total_simulations}", fontsize=16) plt.xlabel("Simulation status", fontsize=16) plt.ylabel("Number of simulations", fontsize=16) - plt.yticks(fontsize=12) + plt.yticks(fontsize=12) plt.xticks(fontsize=12) plt.tight_layout() - output_path = plot_dir+'grid_status_summary.png' + output_path = plot_dir + 'grid_status_summary.png' plt.savefig(output_path, dpi=300) plt.close() @@ -252,7 +260,7 @@ def kde_cumulative_linear(values, color, ax, bw_adjust=0.3, **kwargs): The axis on which to plot the KDE. bw_adjust : float, optional, default=0.3 - Bandwidth adjustment factor : controls the smoothness of the KDE. + Bandwidth adjustment factor : controls the smoothness of the KDE. Smaller values make the KDE more sensitive to data, while larger values smooth it out. **kwargs : keyword arguments, optional @@ -260,15 +268,14 @@ def kde_cumulative_linear(values, color, ax, bw_adjust=0.3, **kwargs): """ sns.kdeplot( - values, - cumulative=True, - bw_adjust=bw_adjust, - clip=(np.min(values), np.max(values)), - common_grid=True, - color=color, - ax=ax, - **kwargs - ) + values, + cumulative=True, + bw_adjust=bw_adjust, + clip=(np.min(values), np.max(values)), + common_grid=True, + color=color, + ax=ax, + **kwargs) def kde_cumulative_log(values, color, ax, bw_adjust=0.3, **kwargs): """ @@ -286,7 +293,7 @@ def kde_cumulative_log(values, color, ax, bw_adjust=0.3, **kwargs): The axis on which to plot the KDE. bw_adjust : float, optional, default=0.3 - Bandwidth adjustment factor : controls the smoothness of the KDE. + Bandwidth adjustment factor : controls the smoothness of the KDE. Smaller values make the KDE more sensitive to data, while larger values smooth it out. **kwargs : keyword arguments, optional @@ -303,7 +310,7 @@ def kde_cumulative_log(values, color, ax, bw_adjust=0.3, **kwargs): ax=ax, **kwargs ) - + # Set x-axis to log scale ax.set_xscale('log') @@ -318,9 +325,9 @@ def plot_kde_cumulative_linear(data_dict, xlabel, ylabel, cmap=plt.cm.plasma, vm Parameters ---------- data_dict : dict - Dictionary of datasets, where each key represents a unique variable (e.g., semi-major axis, temperature), + Dictionary of datasets, where each key represents a unique variable (e.g., semi-major axis), and each value is a list or array of values (e.g., solidification times). - + xlabel : str Label for the x-axis. @@ -332,13 +339,13 @@ def plot_kde_cumulative_linear(data_dict, xlabel, ylabel, cmap=plt.cm.plasma, vm vmin : float The minimum value of the key variable to normalize the colormap. - + vmax : float The maximum value of the key variable to normalize the colormap. ax : matplotlib.axes.Axes, optional The axis to plot on. If None, a new figure and axis will be created. - + save_path : str, optional Path to save the generated plot. If None, the plot will not be saved. @@ -349,6 +356,7 @@ def plot_kde_cumulative_linear(data_dict, xlabel, ylabel, cmap=plt.cm.plasma, vm Returns ------- '""" + if ax is None: fig, ax = plt.subplots(figsize=(10, 6)) else: @@ -482,7 +490,7 @@ def plot_kde_cumulative_log(data_dict, xlabel, ylabel, cmap=plt.cm.plasma, vmin= data_dict : dict Dictionary of datasets, where each key represents a unique variable (e.g., semi-major axis, temperature), and each value is a list or array of values (e.g., solidification times). - + xlabel : str Label for the x-axis. @@ -494,13 +502,13 @@ def plot_kde_cumulative_log(data_dict, xlabel, ylabel, cmap=plt.cm.plasma, vmin= vmin : float The minimum value of the key variable to normalize the colormap. - + vmax : float The maximum value of the key variable to normalize the colormap. ax : matplotlib.axes.Axes, optional The axis to plot on. If None, a new figure and axis will be created. - + save_path : str, optional Path to save the generated plot. If None, the plot will not be saved. @@ -759,29 +767,29 @@ def generate_grid_plot(extracted_outputs, grouped_data, grid_params, plots_path, ---------- extracted_outputs : list of str List of extracted output quantities to plot. - + grouped_data : dict Data for each output/parameter pair. (like solidification_time_per_semimajoraxis) - + grid_params : dict Parameter values used in the grid. - + plots_path : str Directory where plots will be saved. - + param_label_map : dict Dictionary containing the label of the grid parameter for the plot. - + colormaps_by_param : dict Dictionary containing the colormap to use for each grid parameter for the plot. - + output_label_map : dict, optional Dictionary containing the label of the extracted output quantity for the plot. - + log_scale_grid_params : list of str, optional Parameters to use log scale for colormap normalization. (like escape.zephyrus.Pxuv) """ - + if param_label_map is None: raise ValueError("param_label_map must be provided.") if colormaps_by_param is None: @@ -790,25 +798,25 @@ def generate_grid_plot(extracted_outputs, grouped_data, grid_params, plots_path, raise ValueError("output_label_map must be provided.") if log_scale_grid_params is None: log_scale_grid_params = [] - + num_cols = len(extracted_outputs) num_rows = len(grid_params) - + # Create subplots with the appropriate layout fig, axes = plt.subplots(num_rows, num_cols, figsize=(num_cols * 16, num_rows * 12)) plt.subplots_adjust(hspace=0.15, wspace=0.15) - + if num_rows == 1: axes = np.expand_dims(axes, axis=0) # Make sure it's 2D if only one row. - + for i, output_name in enumerate(extracted_outputs): for j, (param, cmap) in enumerate(colormaps_by_param.items()): data_key = f"{output_name}_per_{param}" - + if data_key not in grouped_data: print(f"WARNING: Skipping {data_key} — not found in grouped_data") continue - + data_dict_raw = grouped_data[data_key] data_dict = data_dict_raw.copy() @@ -831,7 +839,7 @@ def generate_grid_plot(extracted_outputs, grouped_data, grid_params, plots_path, if param in log_scale_grid_params else mcolors.Normalize(vmin=vmin, vmax=vmax) ) - + plot_kde_cumulative_linear( data_dict=data_dict, xlabel=xlabel, @@ -845,26 +853,26 @@ def generate_grid_plot(extracted_outputs, grouped_data, grid_params, plots_path, norm=norm, ax=ax ) - + # Set titles for each subplot if i == 0: ax.set_ylabel(ylabel, fontsize=12) if j == num_rows - 1: ax.set_xlabel(xlabel, fontsize=12) - + # Customize plot with grid and ticks ax.grid(alpha=0.2) ax.set_ylim(0, 1.02) - + # Save the complete subplot figure save_dir = Path(plots_path) / 'grid_plot' plot_dir_exists(save_dir) - + save_name = "cumulative_grid_plot_linear.png" save_path = save_dir / save_name plt.savefig(save_path, dpi=300) plt.close(fig) - + print(f"All subplot plots saved to {save_path}") def generate_grid_plot_clean(extracted_outputs, grouped_data, grid_params, plots_path, param_label_map, colormaps_by_param, output_label_map, log_scale_grid_params): @@ -876,25 +884,25 @@ def generate_grid_plot_clean(extracted_outputs, grouped_data, grid_params, plots ---------- extracted_outputs : list of str List of extracted output quantities to plot. - + grouped_data : dict Data for each output/parameter pair. (like solidification_time_per_semimajoraxis) - + grid_params : dict Parameter values used in the grid. - + plots_path : str Directory where plots will be saved. - + param_label_map : dict Dictionary containing the label of the grid parameter for the plot. - + colormaps_by_param : dict Dictionary containing the colormap to use for each grid parameter for the plot. - + output_label_map : dict, optional Dictionary containing the label of the extracted output quantity for the plot. - + log_scale_grid_params : list of str, optional Parameters to use log scale for colormap normalization. (like escape.zephyrus.Pxuv) """ @@ -905,7 +913,7 @@ def generate_grid_plot_clean(extracted_outputs, grouped_data, grid_params, plots num_cols = len(extracted_outputs) num_rows = len(grid_params) - + fig, axes = plt.subplots(num_rows, num_cols, figsize=(num_cols * 6, num_rows * 5), sharex='col', sharey='row') plt.subplots_adjust(hspace=0.1, wspace=0.1) @@ -989,24 +997,27 @@ def generate_grid_plot_clean(extracted_outputs, grouped_data, grid_params, plots if __name__ == '__main__': # User needs to specify paths - grid_name = 'escape_grid_4_params_Pxuv_a_epsilon_fO2' - data_dir = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_processed_data/{grid_name}/{grid_name}_extracted_data.csv' - plots_path = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_plots/{grid_name}/' + grid_name = 'escape_grid_atm_a_f02_H_Mstar' + data_dir = f'/Users/emmapostolec/Documents/PHD/SCIENCE/CODES/PROTEUS/tools/post_processing_grid/nogit_processed_data/{grid_name}/' + plots_path = f'/Users/emmapostolec/Documents/PHD/SCIENCE/CODES/PROTEUS/tools/post_processing_grid/nogit_plots/{grid_name}/' # Load and organize data before plotting - df, grid_params, extracted_outputs = load_extracted_data(data_dir) # Load the data + df, grid_params, extracted_outputs = load_extracted_data(data_dir, grid_name) # Load the data plot_dir_exists(plots_path) # Check if the plot directory exists. If not, create it. grouped_data = group_output_by_parameter(df, grid_params, extracted_outputs) # Group extracted outputs by grid parameters # Plots - plot_grid_status(df, plots_path) # Plot the grid status in an histogram + plot_grid_status(df, plots_path) # Plot the grid status in an histogram - # Single plots + # Single plots param_label_map = { "orbit.semimajoraxis": "Semi-major axis [AU]", - "escape.zephyrus.Pxuv": r"$P_{XUV}$ [bar]", - "escape.zephyrus.efficiency": r"Escape efficiency factor, $\epsilon$", - "outgas.fO2_shift_IW":r"$log_{10}$($fO_2$) ($\Delta$ IW)"} + #"escape.zephyrus.Pxuv": r"$P_{XUV}$ [bar]", + #"escape.zephyrus.efficiency": r"Escape efficiency factor, $\epsilon$", + "outgas.fO2_shift_IW":r"$log_{10}$($fO_2$) ($\Delta$ IW)", + #"atmos_clim.module": "Atmospheric module", + "delivery.elements.H_oceans": "[H] [oceans]", + "star.mass": r"Stellar mass [M$_\odot$]"} output_label_map = { "solidification_time": "Solidification time [yr]", "esc_rate_total": "Total escape rate [kg/s]", @@ -1015,21 +1026,24 @@ def generate_grid_plot_clean(extracted_outputs, grouped_data, grid_params, plots "atm_kg_per_mol": "Atmospheric mass [kg/mol]"} colormaps_by_param = { "orbit.semimajoraxis": cm.plasma, - "escape.zephyrus.Pxuv": cm.cividis, - "escape.zephyrus.efficiency": cm.spring, - "outgas.fO2_shift_IW": cm.coolwarm} + #"escape.zephyrus.Pxuv": cm.cividis, + #"escape.zephyrus.efficiency": cm.spring, + "outgas.fO2_shift_IW": cm.coolwarm, + #"atmos_clim.module": cm.Dark2, + "delivery.elements.H_oceans": cm.winter, + "star.mass": cm.RdYlBu} log_scale_grid_params = ["escape.zephyrus.Pxuv"] - # generate_single_plots_linear( - # extracted_outputs=extracted_outputs, - # grouped_data=grouped_data, - # grid_params=grid_params, - # plots_path=plots_path, - # param_label_map=param_label_map, - # colormaps_by_param=colormaps_by_param, - # output_label_map=output_label_map, - # log_scale_grid_params=log_scale_grid_params) + generate_single_plots_linear( + extracted_outputs=extracted_outputs, + grouped_data=grouped_data, + grid_params=grid_params, + plots_path=plots_path, + param_label_map=param_label_map, + colormaps_by_param=colormaps_by_param, + output_label_map=output_label_map, + log_scale_grid_params=log_scale_grid_params) # generate_single_plots_log( # extracted_outputs=extracted_outputs, @@ -1049,4 +1063,8 @@ def generate_grid_plot_clean(extracted_outputs, grouped_data, grid_params, plots param_label_map=param_label_map, colormaps_by_param=colormaps_by_param, output_label_map=output_label_map, - log_scale_grid_params=log_scale_grid_params) \ No newline at end of file + log_scale_grid_params=log_scale_grid_params) + + print('-----------------------------------------------------') + print("All plots completed. Let's do some analyse now :) !") + print('-----------------------------------------------------') diff --git a/tools/post_processing_grid/post_processing_grid.py b/tools/post_processing_grid/post_processing_grid.py index 0e04ab294..9286ef61b 100644 --- a/tools/post_processing_grid/post_processing_grid.py +++ b/tools/post_processing_grid/post_processing_grid.py @@ -11,8 +11,8 @@ def load_grid_cases(grid_dir: Path): """ - Load information for each simulation of a PROTEUS grid. - Read 'runtime_helpfile.csv', 'init_coupler.toml' and status + Load information for each simulation of a PROTEUS grid. + Read 'runtime_helpfile.csv', 'init_coupler.toml' and status files for each simulation of the grid. Parameters @@ -28,7 +28,7 @@ def load_grid_cases(grid_dir: Path): - 'output_values' (pandas.DataFrame): Data from `runtime_helpfile.csv`. - 'status' (str): Status string from the `status` file, or 'unknown' if unavailable. """ - + combined_data = [] grid_dir = Path(grid_dir) @@ -61,7 +61,7 @@ def load_grid_cases(grid_dir: Path): # Count the number of simulations per status statuses = [str(case.get('status', 'unknown')).strip() or 'unknown' for case in combined_data] status_counts = pd.Series(statuses).value_counts().sort_values(ascending=False) - + print('-----------------------------------------------------------') print(f"Total number of simulations: {len(statuses)}") print('-----------------------------------------------------------') @@ -74,12 +74,12 @@ def load_grid_cases(grid_dir: Path): def get_grid_parameters(grid_dir: str): """ Extract grid parameter names and values from the 'manager.log' file. - + Parameters ---------- grid_dir : str Path to the directory of the PROTEUS grid - + Returns ------- param_grid : dict @@ -106,7 +106,7 @@ def get_grid_parameters(grid_dir: str): # Expressions to match the relevant lines dimension_pattern = re.compile(r"parameter:\s*(\S+)") values_pattern = re.compile(r"values\s*:\s*\[(.*?)\]") - case_line_pattern = re.compile(r"\]\s+(\d+):\s+(\{.*\})") + case_line_pattern = re.compile(r"\]\s+(\d+):\s+(\{.*\})") current_param = None for line in lines: @@ -170,7 +170,7 @@ def extract_grid_output(cases_data: list, parameter_name: str): print(f"Warning: Parameter '{parameter_name}' does not exist in case '{case['init_parameters'].get('name', 'Unknown')}'") print(f"Available columns in this case: {', '.join(df.columns)}") columns_printed = True - + # Print the extracted output values for the specified parameter print(f"Extracted output (at last time step) : {parameter_name} ") @@ -178,7 +178,7 @@ def extract_grid_output(cases_data: list, parameter_name: str): def extract_solidification_time(cases_data: list, phi_crit: float): """ - Extract the solidification time at the time step where the condition + Extract the solidification time at the time step where the condition 'Phi_global' < phi_crit is first satisfied for each planet. Parameters @@ -193,20 +193,20 @@ def extract_solidification_time(cases_data: list, phi_crit: float): Returns ------- solidification_times : list - A list containing the solidification times for all solidified planets of the grid. + A list containing the solidification times for all solidified planets of the grid. If a planet never solidifies, it will have a NaN in the list. """ solidification_times = [] columns_printed = False - + for i, case in enumerate(cases_data): df = case['output_values'] # Check if the required columns exist in the dataframe if 'Phi_global' in df.columns and 'Time' in df.columns: condition = df['Phi_global'] < phi_crit if condition.any(): - first_index = condition.idxmax() + first_index = condition.idxmax() solid_time = df.loc[first_index, 'Time'] # Get the index of the time at which the condition is first satisfied solidification_times.append(solid_time) else: @@ -247,10 +247,10 @@ def extract_solidification_time(cases_data: list, phi_crit: float): return solidification_times -def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dict, case_params: Dict[int, Dict[str, Any]], +def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dict, case_params: Dict[int, Dict[str, Any]], extracted_value: dict, output_to_extract: list, phi_crit: float, output_dir: Path): """ - Save all simulation information (status, grid parameters, output values) into a CSV file + Save all simulation information (status, grid parameters, output values) into a CSV file for later analysis (using plot_grid.py to make plots for instance). Parameters @@ -269,25 +269,25 @@ def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dic extracted_value : dict A list containing the extracted values of the specified parameter for all cases of the grid. - + output_to_extract : list List of output values extracted from each simulation in the grid. - + phi_crit : float The critical melt fraction value used to determine if a planet is considered solidified. A typical value is 0.005. output_dir : Path - The directory where the generated CSV file will be saved. If the directory does not exist, + The directory where the generated CSV file will be saved. If the directory does not exist, it will be created. """ # Check if the output directory exist, if not create it output_dir = Path(output_dir) output_dir.mkdir(parents=True, exist_ok=True) - + # CSV file path csv_file = output_dir / f"{grid_name}_extracted_data.csv" - + with open(csv_file, 'w', newline='') as csvfile: writer = csv.writer(csvfile) @@ -306,7 +306,7 @@ def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dic values_str = f"[{', '.join(map(str, values))}]" writer.writerow([f"{aligned_param}: {values_str}"]) writer.writerow(["----------------------------------------------------------"]) - writer.writerow(["Extracted output values:" f"[{', '.join(extracted_value.keys())}]"]) + writer.writerow(["Extracted output values:" f"[{', '.join(extracted_value.keys())}]"]) writer.writerow(["----------------------------------------------------------"]) writer.writerow([f"| Case number | Status | {' | '.join(grid_parameters.keys())} | {' | '.join(extracted_value.keys())} |"]) writer.writerow(["#############################################################################################################"]) @@ -336,15 +336,15 @@ def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dic if __name__ == '__main__': # User needs to specify paths - grid_name = 'escape_grid_4_params_Pxuv_a_epsilon_fO2' - grid_path = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_grid/{grid_name}/' - data_dir = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_processed_data/{grid_name}/' - + grid_name = 'escape_grid_atm_a_f02_H_Mstar' + grid_path = f'/Users/emmapostolec/Downloads/output_norma2_rsync/{grid_name}/' + data_dir = f'/Users/emmapostolec/Documents/PHD/SCIENCE/CODES/PROTEUS/tools/post_processing_grid/nogit_processed_data/{grid_name}/' + # User choose the parameters to post-process the grid output_to_extract = ['esc_rate_total','Phi_global', 'P_surf','atm_kg_per_mol'] # Output columns to extract from 'runtime_helpfile.csv' of each case phi_crit = 0.005 # Critical melt fraction for the solidification condition - - # Post-processing the grid + + # Post-processing the grid extracted_value = {} # Initialize the dictionary to store extracted values cases_data = load_grid_cases(grid_path) # Load all simulation cases grid_parameters, case_init_param = get_grid_parameters(grid_path) # Extract grid parameters @@ -352,11 +352,10 @@ def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dic extracted_value[param] = extract_grid_output(cases_data, param) # Extract output values solidification_times = extract_solidification_time(cases_data, phi_crit) # Extract the solidification time extracted_value['solidification_time'] = solidification_times # Add solidification time to the extracted_values - save_grid_data_to_csv(grid_name, cases_data, grid_parameters, case_init_param, - extracted_value, solidification_times, phi_crit, data_dir) # Save all the extracted data to a CSV file + save_grid_data_to_csv(grid_name, cases_data, grid_parameters, case_init_param, + extracted_value, solidification_times, phi_crit, data_dir) # Save all the extracted data to a CSV file # Done with the post-processing step :) - print('-----------------------------------------------------------') print("Post-processing completed. Let's do some plots !") print('(Please check for any warning messages above before going further.)') - print('-----------------------------------------------------------') \ No newline at end of file + print('-----------------------------------------------------------') From 493e68a1e00e526d5f03470dfb11359899e99de3 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Thu, 1 May 2025 18:10:22 +0200 Subject: [PATCH 017/105] maybe i fixed the log scale problem youhou --- .../merged_kde_plotting.ipynb | 155 ++++++ tools/post_processing_grid/plot_grid.py | 467 +++--------------- .../post_processing_grid.py | 94 ++-- 3 files changed, 275 insertions(+), 441 deletions(-) create mode 100644 tools/post_processing_grid/merged_kde_plotting.ipynb diff --git a/tools/post_processing_grid/merged_kde_plotting.ipynb b/tools/post_processing_grid/merged_kde_plotting.ipynb new file mode 100644 index 000000000..dc7d8e39e --- /dev/null +++ b/tools/post_processing_grid/merged_kde_plotting.ipynb @@ -0,0 +1,155 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 2, + "id": "648575f5", + "metadata": {}, + "outputs": [], + "source": [ + "# --- Imports ---\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import matplotlib.colors as mcolors\n", + "import seaborn as sns\n", + "from matplotlib.gridspec import GridSpec\n", + "from scipy.stats import gaussian_kde\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "61ff114c", + "metadata": {}, + "outputs": [], + "source": [ + "# --- Your KDE plot function ---\n", + "# (Paste your KDE logic here if it's modularized in plot_grid.py)\n", + "# If not, adapt the snippet below inside the plotting loop\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "2e926860", + "metadata": {}, + "outputs": [], + "source": [ + "# --- Load your input/output data here ---\n", + "# Example:\n", + "# inputs = {'C/H ratio': np.random.rand(10), 'Separation [AU]': np.random.rand(10)}\n", + "# outputs = {'Surface pressure [bar]': [np.random.normal(loc=val, size=100) for val in range(10)]}\n", + "inputs = {} # TODO: Fill with your data\n", + "outputs = {} # TODO: Fill with your data\n", + "\n", + "# Choose input parameter for color\n", + "color_key = 'C/H ratio' # Replace with your key\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "f573affd", + "metadata": {}, + "outputs": [], + "source": [ + "# --- Plotting function ---\n", + "def plot_kde_grid(inputs, outputs, color_key):\n", + " n_rows = len(outputs)\n", + " n_cols = len(inputs)\n", + " fig, axes = plt.subplots(n_rows, n_cols, figsize=(4 * n_cols, 3.5 * n_rows), sharex=False, sharey=False)\n", + "\n", + " # Normalize color values\n", + " cmap = plt.cm.viridis\n", + " color_vals = inputs[color_key]\n", + " norm = mcolors.LogNorm(vmin=max(np.min(color_vals), 1e-3), vmax=np.max(color_vals))\n", + " sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)\n", + "\n", + " axes = np.array(axes).reshape(n_rows, n_cols)\n", + "\n", + " for i, (out_key, out_vals) in enumerate(outputs.items()):\n", + " for j, (in_key, in_vals) in enumerate(inputs.items()):\n", + " ax = axes[i, j]\n", + " for k in range(len(color_vals)):\n", + " try:\n", + " kde = gaussian_kde(out_vals[k])\n", + " x_vals = np.linspace(np.min(out_vals[k]), np.max(out_vals[k]), 100)\n", + " y_vals = kde(x_vals)\n", + " ax.plot(x_vals, y_vals, color=cmap(norm(color_vals[k])))\n", + " except Exception:\n", + " continue\n", + "\n", + " if i == n_rows - 1:\n", + " ax.set_xlabel(in_key)\n", + " if j == 0:\n", + " ax.set_ylabel(out_key)\n", + "\n", + " # Colorbar\n", + " cbar = fig.colorbar(sm, ax=axes, orientation='vertical', fraction=0.02, pad=0.01)\n", + " cbar.set_label(color_key)\n", + "\n", + " fig.tight_layout()\n", + " plt.show()\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "12aef78b", + "metadata": {}, + "outputs": [ + { + "ename": "ValueError", + "evalue": "Number of rows must be a positive integer, not 0", + "output_type": "error", + "traceback": [ + "\u001b[31m---------------------------------------------------------------------------\u001b[39m", + "\u001b[31mValueError\u001b[39m Traceback (most recent call last)", + "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[6]\u001b[39m\u001b[32m, line 2\u001b[39m\n\u001b[32m 1\u001b[39m \u001b[38;5;66;03m# --- Run the plot ---\u001b[39;00m\n\u001b[32m----> \u001b[39m\u001b[32m2\u001b[39m \u001b[43mplot_kde_grid\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43moutputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcolor_key\u001b[49m\u001b[43m)\u001b[49m\n", + "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[5]\u001b[39m\u001b[32m, line 5\u001b[39m, in \u001b[36mplot_kde_grid\u001b[39m\u001b[34m(inputs, outputs, color_key)\u001b[39m\n\u001b[32m 3\u001b[39m n_rows = \u001b[38;5;28mlen\u001b[39m(outputs)\n\u001b[32m 4\u001b[39m n_cols = \u001b[38;5;28mlen\u001b[39m(inputs)\n\u001b[32m----> \u001b[39m\u001b[32m5\u001b[39m fig, axes = \u001b[43mplt\u001b[49m\u001b[43m.\u001b[49m\u001b[43msubplots\u001b[49m\u001b[43m(\u001b[49m\u001b[43mn_rows\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mn_cols\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfigsize\u001b[49m\u001b[43m=\u001b[49m\u001b[43m(\u001b[49m\u001b[32;43m4\u001b[39;49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m \u001b[49m\u001b[43mn_cols\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[32;43m3.5\u001b[39;49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m \u001b[49m\u001b[43mn_rows\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msharex\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msharey\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m)\u001b[49m\n\u001b[32m 7\u001b[39m \u001b[38;5;66;03m# Normalize color values\u001b[39;00m\n\u001b[32m 8\u001b[39m cmap = plt.cm.viridis\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/proteus/lib/python3.12/site-packages/matplotlib/pyplot.py:1770\u001b[39m, in \u001b[36msubplots\u001b[39m\u001b[34m(nrows, ncols, sharex, sharey, squeeze, width_ratios, height_ratios, subplot_kw, gridspec_kw, **fig_kw)\u001b[39m\n\u001b[32m 1625\u001b[39m \u001b[38;5;250m\u001b[39m\u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 1626\u001b[39m \u001b[33;03mCreate a figure and a set of subplots.\u001b[39;00m\n\u001b[32m 1627\u001b[39m \n\u001b[32m (...)\u001b[39m\u001b[32m 1767\u001b[39m \n\u001b[32m 1768\u001b[39m \u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 1769\u001b[39m fig = figure(**fig_kw)\n\u001b[32m-> \u001b[39m\u001b[32m1770\u001b[39m axs = \u001b[43mfig\u001b[49m\u001b[43m.\u001b[49m\u001b[43msubplots\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnrows\u001b[49m\u001b[43m=\u001b[49m\u001b[43mnrows\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mncols\u001b[49m\u001b[43m=\u001b[49m\u001b[43mncols\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msharex\u001b[49m\u001b[43m=\u001b[49m\u001b[43msharex\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msharey\u001b[49m\u001b[43m=\u001b[49m\u001b[43msharey\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 1771\u001b[39m \u001b[43m \u001b[49m\u001b[43msqueeze\u001b[49m\u001b[43m=\u001b[49m\u001b[43msqueeze\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msubplot_kw\u001b[49m\u001b[43m=\u001b[49m\u001b[43msubplot_kw\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 1772\u001b[39m \u001b[43m \u001b[49m\u001b[43mgridspec_kw\u001b[49m\u001b[43m=\u001b[49m\u001b[43mgridspec_kw\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mheight_ratios\u001b[49m\u001b[43m=\u001b[49m\u001b[43mheight_ratios\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 1773\u001b[39m \u001b[43m \u001b[49m\u001b[43mwidth_ratios\u001b[49m\u001b[43m=\u001b[49m\u001b[43mwidth_ratios\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1774\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m fig, axs\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/proteus/lib/python3.12/site-packages/matplotlib/figure.py:918\u001b[39m, in \u001b[36mFigureBase.subplots\u001b[39m\u001b[34m(self, nrows, ncols, sharex, sharey, squeeze, width_ratios, height_ratios, subplot_kw, gridspec_kw)\u001b[39m\n\u001b[32m 914\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m\u001b[33mwidth_ratios\u001b[39m\u001b[33m'\u001b[39m\u001b[33m must not be defined both as \u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 915\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mparameter and as key in \u001b[39m\u001b[33m'\u001b[39m\u001b[33mgridspec_kw\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m)\n\u001b[32m 916\u001b[39m gridspec_kw[\u001b[33m'\u001b[39m\u001b[33mwidth_ratios\u001b[39m\u001b[33m'\u001b[39m] = width_ratios\n\u001b[32m--> \u001b[39m\u001b[32m918\u001b[39m gs = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43madd_gridspec\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnrows\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mncols\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfigure\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mgridspec_kw\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 919\u001b[39m axs = gs.subplots(sharex=sharex, sharey=sharey, squeeze=squeeze,\n\u001b[32m 920\u001b[39m subplot_kw=subplot_kw)\n\u001b[32m 921\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m axs\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/proteus/lib/python3.12/site-packages/matplotlib/figure.py:1600\u001b[39m, in \u001b[36mFigureBase.add_gridspec\u001b[39m\u001b[34m(self, nrows, ncols, **kwargs)\u001b[39m\n\u001b[32m 1557\u001b[39m \u001b[38;5;250m\u001b[39m\u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 1558\u001b[39m \u001b[33;03mLow-level API for creating a `.GridSpec` that has this figure as a parent.\u001b[39;00m\n\u001b[32m 1559\u001b[39m \n\u001b[32m (...)\u001b[39m\u001b[32m 1596\u001b[39m \n\u001b[32m 1597\u001b[39m \u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 1599\u001b[39m _ = kwargs.pop(\u001b[33m'\u001b[39m\u001b[33mfigure\u001b[39m\u001b[33m'\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m) \u001b[38;5;66;03m# pop in case user has added this...\u001b[39;00m\n\u001b[32m-> \u001b[39m\u001b[32m1600\u001b[39m gs = \u001b[43mGridSpec\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnrows\u001b[49m\u001b[43m=\u001b[49m\u001b[43mnrows\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mncols\u001b[49m\u001b[43m=\u001b[49m\u001b[43mncols\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfigure\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1601\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m gs\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/proteus/lib/python3.12/site-packages/matplotlib/gridspec.py:363\u001b[39m, in \u001b[36mGridSpec.__init__\u001b[39m\u001b[34m(self, nrows, ncols, figure, left, bottom, right, top, wspace, hspace, width_ratios, height_ratios)\u001b[39m\n\u001b[32m 360\u001b[39m \u001b[38;5;28mself\u001b[39m.hspace = hspace\n\u001b[32m 361\u001b[39m \u001b[38;5;28mself\u001b[39m.figure = figure\n\u001b[32m--> \u001b[39m\u001b[32m363\u001b[39m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m.\u001b[49m\u001b[34;43m__init__\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mnrows\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mncols\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 364\u001b[39m \u001b[43m \u001b[49m\u001b[43mwidth_ratios\u001b[49m\u001b[43m=\u001b[49m\u001b[43mwidth_ratios\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 365\u001b[39m \u001b[43m \u001b[49m\u001b[43mheight_ratios\u001b[49m\u001b[43m=\u001b[49m\u001b[43mheight_ratios\u001b[49m\u001b[43m)\u001b[49m\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/proteus/lib/python3.12/site-packages/matplotlib/gridspec.py:48\u001b[39m, in \u001b[36mGridSpecBase.__init__\u001b[39m\u001b[34m(self, nrows, ncols, height_ratios, width_ratios)\u001b[39m\n\u001b[32m 33\u001b[39m \u001b[38;5;250m\u001b[39m\u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 34\u001b[39m \u001b[33;03mParameters\u001b[39;00m\n\u001b[32m 35\u001b[39m \u001b[33;03m----------\u001b[39;00m\n\u001b[32m (...)\u001b[39m\u001b[32m 45\u001b[39m \u001b[33;03m If not given, all rows will have the same height.\u001b[39;00m\n\u001b[32m 46\u001b[39m \u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 47\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(nrows, Integral) \u001b[38;5;129;01mor\u001b[39;00m nrows <= \u001b[32m0\u001b[39m:\n\u001b[32m---> \u001b[39m\u001b[32m48\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[32m 49\u001b[39m \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mNumber of rows must be a positive integer, not \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mnrows\u001b[38;5;132;01m!r}\u001b[39;00m\u001b[33m\"\u001b[39m)\n\u001b[32m 50\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(ncols, Integral) \u001b[38;5;129;01mor\u001b[39;00m ncols <= \u001b[32m0\u001b[39m:\n\u001b[32m 51\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[32m 52\u001b[39m \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mNumber of columns must be a positive integer, not \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mncols\u001b[38;5;132;01m!r}\u001b[39;00m\u001b[33m\"\u001b[39m)\n", + "\u001b[31mValueError\u001b[39m: Number of rows must be a positive integer, not 0" + ] + }, + { + "data": { + "text/plain": [ + "
      " + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# --- Run the plot ---\n", + "plot_kde_grid(inputs, outputs, color_key)\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "proteus", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/tools/post_processing_grid/plot_grid.py b/tools/post_processing_grid/plot_grid.py index 32d4f01b2..300771df2 100644 --- a/tools/post_processing_grid/plot_grid.py +++ b/tools/post_processing_grid/plot_grid.py @@ -195,15 +195,12 @@ def plot_grid_status(cases_data, plot_dir: Path, status_colors: dict = None): for formatted, original in zip(formatted_status_keys, status_counts.index)} else: palette = sns.color_palette("Accent", len(status_counts)) - formatted_status_keys = [s.replace(" ", "\n") for s in status_counts.index] + formatted_status_keys = [s.replace("d (", "d \n (") for s in status_counts.index] palette = dict(zip(formatted_status_keys, palette)) - # Format x-axis labels for better readability - formatted_labels = [s.replace(" ", "\n") for s in status_counts.index] - # Prepare dataframe for plotting plot_df = pd.DataFrame({ - 'Status': formatted_labels, + 'Status': formatted_status_keys, 'Count': status_counts.values }) @@ -232,7 +229,18 @@ def plot_grid_status(cases_data, plot_dir: Path, status_colors: dict = None): ha='center', va='bottom', fontsize=10 ) - plt.title(f"Total number of simulations: {total_simulations}", fontsize=16) + # Boxed total in upper right + plt.gca().text( + 0.97, 0.94, + f"Total number of simulations : {total_simulations}", + transform=plt.gca().transAxes, + ha='right', va='top', + fontsize=14, + #bbox=dict(boxstyle="round,pad=0.5", facecolor="white", edgecolor="black") + ) + + plt.grid(alpha=0.2, axis='y') + plt.title(f"Grid status summary : {grid_name}", fontsize=16) plt.xlabel("Simulation status", fontsize=16) plt.ylabel("Number of simulations", fontsize=16) plt.yticks(fontsize=12) @@ -242,9 +250,9 @@ def plot_grid_status(cases_data, plot_dir: Path, status_colors: dict = None): plt.savefig(output_path, dpi=300) plt.close() - print(f"Plot grid_status_summary.png saved to {output_path}") + print(f"Grid status histogram successfully saved to {output_path}") -def kde_cumulative_linear(values, color, ax, bw_adjust=0.3, **kwargs): +def kde_cumulative(values, color, ax, bw_adjust=0.3, log_x=False, **kwargs): """ Plot a cumulative KDE curve on the given x-axis. @@ -263,6 +271,9 @@ def kde_cumulative_linear(values, color, ax, bw_adjust=0.3, **kwargs): Bandwidth adjustment factor : controls the smoothness of the KDE. Smaller values make the KDE more sensitive to data, while larger values smooth it out. + log_x : bool, optional + If True, sets the x-axis to logarithmic scale. + **kwargs : keyword arguments, optional Additional arguments passed to `sns.kdeplot`, such as `label`, `linewidth`, etc. @@ -277,7 +288,13 @@ def kde_cumulative_linear(values, color, ax, bw_adjust=0.3, **kwargs): ax=ax, **kwargs) -def kde_cumulative_log(values, color, ax, bw_adjust=0.3, **kwargs): + # Set axis scale : linear or log + if log_x==True: + ax.set_xscale('log') + else: + ax.set_xscale('linear') + + """ Plot a cumulative KDE curve on the given x-axis with a log scale for the x-axis. @@ -317,7 +334,7 @@ def kde_cumulative_log(values, color, ax, bw_adjust=0.3, **kwargs): # Format the x-axis to show powers of 10 ax.get_xaxis().set_major_formatter(ticker.FuncFormatter(lambda x, _: r'$10^{{{}}}$'.format(int(np.log10(x))))) -def plot_kde_cumulative_linear(data_dict, xlabel, ylabel, cmap=plt.cm.plasma, vmin=None, vmax=None, key_label="Parameter", ax=None, save_path=None, bw_adjust=0.3, tick_values=None, norm=None): +def plot_kde_cumulative(data_dict, xlabel, ylabel, cmap=plt.cm.plasma, vmin=None, vmax=None, key_label="Parameter", ax=None, save_path=None, bw_adjust=0.3, log_x=False, tick_values=None, norm=None): """ Plot cumulative KDE curves for one of the output parameters of the grid (like esc_rate_total, solidification_time) on the x-axis. The different curves correspond to a input parameter from the grid with a color mapped on the right side of the plot. @@ -348,6 +365,14 @@ def plot_kde_cumulative_linear(data_dict, xlabel, ylabel, cmap=plt.cm.plasma, vm save_path : str, optional Path to save the generated plot. If None, the plot will not be saved. + + bw_adjust : float, optional, default=0.3 + Bandwidth adjustment factor : controls the smoothness of the KDE. + Smaller values make the KDE more sensitive to data, while larger values smooth it out. + + log_x : bool, optional + If True, sets the x-axis to logarithmic scale. + tick_values : list of float, optional Values to use as ticks on the colorbar. Useful for discrete parameter steps. @@ -379,169 +404,9 @@ def plot_kde_cumulative_linear(data_dict, xlabel, ylabel, cmap=plt.cm.plasma, vm if len(values) < 2: continue color = cmap(norm(key)) - kde_cumulative_linear(values, color=color, ax=ax, bw_adjust=bw_adjust, label=str(key)) - - ax.set_xlabel(xlabel, fontsize=12) - ax.set_ylabel(ylabel, fontsize=12) - ax.set_ylim(0, 1.02) - ax.grid(alpha=0.2) - - # Colorbar setup - sm.set_array(np.linspace(vmin, vmax, 100)) - cbar = fig.colorbar(sm, ax=ax) - cbar.set_label(key_label, fontsize=12) - - # Use grid param values as colorbar ticks - if tick_values is not None: - cbar.set_ticks(tick_values) - if isinstance(norm, mcolors.LogNorm): - # Format log ticks with LaTeX-style math text (e.g., 10⁻³) - cbar.set_ticklabels([f"$10^{{{int(np.log10(t))}}}$" for t in tick_values]) - cbar.ax.yaxis.set_major_formatter(LogFormatterMathtext()) - else: - # Linear scale ticks - cbar.set_ticklabels([str(t) for t in tick_values]) - - if save_path: - plt.savefig(save_path, dpi=300) - plt.close(fig) - -def plot_kde_cumulative_linear_clean( - data_dict, - xlabel, - ylabel, - cmap=plt.cm.plasma, - vmin=None, - vmax=None, - key_label="Parameter", - ax=None, - save_path=None, - bw_adjust=0.3, - tick_values=None, - norm=None, - return_cbar=False # New parameter -): - """ - Plot cumulative KDE curves for one of the output parameters of the grid. - Optionally return a colorbar scalar mappable to use for shared colorbar. - """ - if ax is None: - fig, ax = plt.subplots(figsize=(10, 6)) - else: - fig = ax.figure - - keys = sorted(data_dict.keys()) - if vmin is None: - vmin = min(keys) - if vmax is None: - vmax = max(keys) - - # Use provided or default normalization - if norm is None: - norm = mcolors.Normalize(vmin=vmin, vmax=vmax) - sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) - - # Plot each dataset - for key in keys: - values = np.array(data_dict[key]) - if len(values) < 2: - continue - color = cmap(norm(key)) - kde_cumulative_linear(values, color=color, ax=ax, bw_adjust=bw_adjust, label=str(key)) - - ax.set_xlabel(xlabel, fontsize=12) - ax.set_ylabel(ylabel, fontsize=12) - ax.set_ylim(0, 1.02) - ax.grid(alpha=0.2) - - # Only return colorbar smappable, don't draw it (caller will handle that) - if return_cbar: - sm.set_array(np.linspace(vmin, vmax, 100)) # Needed for colorbar - sm.key_label = key_label # Store label for later - sm.tick_values = tick_values # Custom attribute to help outside - return sm - - # Otherwise, draw internal colorbar normally - sm.set_array(np.linspace(vmin, vmax, 100)) - cbar = fig.colorbar(sm, ax=ax) - cbar.set_label(key_label, fontsize=12) - - if tick_values is not None: - cbar.set_ticks(tick_values) - if isinstance(norm, mcolors.LogNorm): - cbar.set_ticklabels([f"$10^{{{int(np.log10(t))}}}$" for t in tick_values]) - cbar.ax.yaxis.set_major_formatter(LogFormatterMathtext()) - else: - cbar.set_ticklabels([str(t) for t in tick_values]) - if save_path: - plt.savefig(save_path, dpi=300) - plt.close(fig) - - return None - -def plot_kde_cumulative_log(data_dict, xlabel, ylabel, cmap=plt.cm.plasma, vmin=None, vmax=None, key_label="Parameter", ax=None, save_path=None, bw_adjust=0.3, tick_values=None, norm=None): - """ - Plot cumulative KDE curves for one of the output parameters of the grid (like esc_rate_total, solidification_time) on the x-axis. - The different curves correspond to a input parameter from the grid with a color mapped on the right side of the plot. - - Parameters - ---------- - data_dict : dict - Dictionary of datasets, where each key represents a unique variable (e.g., semi-major axis, temperature), - and each value is a list or array of values (e.g., solidification times). - - xlabel : str - Label for the x-axis. - - ylabel : str - Label for the y-axis. - - cmap : matplotlib.colors.Colormap - The colormap used to assign colors to each dataset based on their corresponding key value. - - vmin : float - The minimum value of the key variable to normalize the colormap. - - vmax : float - The maximum value of the key variable to normalize the colormap. - - ax : matplotlib.axes.Axes, optional - The axis to plot on. If None, a new figure and axis will be created. - - save_path : str, optional - Path to save the generated plot. If None, the plot will not be saved. - - tick_values : list of float, optional - Values to use as ticks on the colorbar. Useful for discrete parameter steps. - - - Returns - ------- - '""" - if ax is None: - fig, ax = plt.subplots(figsize=(10, 6)) - else: - fig = ax.figure - - keys = sorted(data_dict.keys()) - if vmin is None: - vmin = min(keys) - if vmax is None: - vmax = max(keys) + kde_cumulative(values, color=color, ax=ax, bw_adjust=bw_adjust, log_x=log_x, label=str(key)) - # Use provided or default norm - if norm is None: - norm = mcolors.Normalize(vmin=vmin, vmax=vmax) - sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) - - # Plot each group - for key in keys: - values = np.array(data_dict[key]) - if len(values) < 2: - continue - color = cmap(norm(key)) - kde_cumulative_log(values, color=color, ax=ax, bw_adjust=bw_adjust, label=str(key)) ax.set_xlabel(xlabel, fontsize=12) ax.set_ylabel(ylabel, fontsize=12) @@ -568,7 +433,7 @@ def plot_kde_cumulative_log(data_dict, xlabel, ylabel, cmap=plt.cm.plasma, vmin= plt.savefig(save_path, dpi=300) plt.close(fig) -def generate_single_plots_linear(extracted_outputs, grouped_data, grid_params, plots_path, param_label_map, colormaps_by_param, output_label_map, log_scale_grid_params): +def generate_single_plots(extracted_outputs, grouped_data, grid_params, plots_path, param_label_map, colormaps_by_param, output_label_map, log_scale_grid_params, log_x=False): """ Generate and save normalized cumulative distribution plots for each output vs. grid parameter. @@ -633,7 +498,7 @@ def generate_single_plots_linear(extracted_outputs, grouped_data, grid_params, p ylabel = "Normalized cumulative fraction of simulations" key_label = param_label_map.get(param, param.replace("_", " ").title()) - save_dir = Path(plots_path) / 'single_plot_linear' + save_dir = Path(plots_path) / 'single_plot' plot_dir_exists(save_dir) save_name = f"cumulative_{output_name}_vs_{param.replace('.', '_')}.png" @@ -647,7 +512,7 @@ def generate_single_plots_linear(extracted_outputs, grouped_data, grid_params, p else mcolors.Normalize(vmin=vmin, vmax=vmax) ) - plot_kde_cumulative_linear( + plot_kde_cumulative( data_dict=data_dict, xlabel=xlabel, ylabel=ylabel, @@ -655,6 +520,7 @@ def generate_single_plots_linear(extracted_outputs, grouped_data, grid_params, p vmin=vmin, vmax=vmax, key_label=key_label, + log_x=log_x, tick_values=tick_values, save_path=save_path, norm=norm, @@ -664,101 +530,7 @@ def generate_single_plots_linear(extracted_outputs, grouped_data, grid_params, p print("All linear-scale single plots saved.") -def generate_single_plots_log(extracted_outputs, grouped_data, grid_params, plots_path, param_label_map, colormaps_by_param, output_label_map, log_scale_grid_params): - """ - Generate and save normalized cumulative distribution plots for each output vs. grid parameter. The x-axis is in log scale. - - Parameters: - ---------- - extracted_outputs : list of str - List of extracted output quantities to plot. - - grouped_data : dict - Data for each output/parameter pair. (like solidification_time_per_semimajoraxis) - - grid_params : dict - Parameter values used in the grid. - - plots_path : str - Directory where plots will be saved. - - param_label_map : dict - Dictionary containing the label of the grid parameter for the plot. - - colormaps_by_param : dict - Dictionary containing the colormap to use for each grid parameter for the plot. - - output_label_map : dict, optional - Dictionary containing the label of the extracted output quantity for the plot. - - log_scale_grid_params : list of str, optional - Parameters to use log scale for colormap normalization. (like escape.zephyrus.Pxuv) - """ - - if param_label_map is None: - raise ValueError("param_label_map must be provided.") - if colormaps_by_param is None: - raise ValueError("colormaps_by_param must be provided.") - if output_label_map is None: - raise ValueError("output_label_map must be provided.") - if log_scale_grid_params is None: - log_scale_grid_params = [] - - for output_name in extracted_outputs: - for param, cmap in colormaps_by_param.items(): - data_key = f"{output_name}_per_{param}" - - if data_key not in grouped_data: - print(f"WARNING: Skipping {data_key} — not found in grouped_data") - continue - - data_dict_raw = grouped_data[data_key] - data_dict = data_dict_raw.copy() - - if output_name == "Phi_global": - data_dict = {k: [v_i * 100 for v_i in v] for k, v in data_dict.items()} - - tick_values = grid_params.get(param) - vmin = min(tick_values) - vmax = max(tick_values) - - xlabel = output_label_map.get(output_name, output_name.replace("_", " ").title()) - ylabel = "Normalized cumulative fraction of simulations" - key_label = param_label_map.get(param, param.replace("_", " ").title()) - - save_dir = Path(plots_path) / 'single_plot_log' - plot_dir_exists(save_dir) - - save_name = f"cumulative_{output_name}_vs_{param.replace('.', '_')}_log.png" - save_path = save_dir / save_name - - fig, ax = plt.subplots(figsize=(10, 6)) - - norm = ( - mcolors.LogNorm(vmin=vmin, vmax=vmax) - if param in log_scale_grid_params - else mcolors.Normalize(vmin=vmin, vmax=vmax) - ) - - plot_kde_cumulative_log( - data_dict=data_dict, - xlabel=xlabel, - ylabel=ylabel, - cmap=cmap, - vmin=vmin, - vmax=vmax, - key_label=key_label, - tick_values=tick_values, - save_path=save_path, - norm=norm, - ax=ax - ) - - plt.close(fig) - - print("All log-scale single plots saved.") - -def generate_grid_plot(extracted_outputs, grouped_data, grid_params, plots_path, param_label_map, colormaps_by_param, output_label_map, log_scale_grid_params): +def generate_grid_plot(extracted_outputs, grouped_data, grid_params, plots_path, param_label_map, colormaps_by_param, output_label_map, log_scale_grid_params, log_x=False): """ Generate and save normalized cumulative distribution plots for each output vs. grid parameters. This creates a subplot where each column corresponds to one extracted output, and each row corresponds to one grid parameter. @@ -840,7 +612,7 @@ def generate_grid_plot(extracted_outputs, grouped_data, grid_params, plots_path, else mcolors.Normalize(vmin=vmin, vmax=vmax) ) - plot_kde_cumulative_linear( + plot_kde_cumulative( data_dict=data_dict, xlabel=xlabel, ylabel=ylabel, @@ -848,6 +620,7 @@ def generate_grid_plot(extracted_outputs, grouped_data, grid_params, plots_path, vmin=vmin, vmax=vmax, key_label=key_label, + log_x=log_x, tick_values=tick_values, save_path=None, # Don't save the plot yet norm=norm, @@ -868,141 +641,22 @@ def generate_grid_plot(extracted_outputs, grouped_data, grid_params, plots_path, save_dir = Path(plots_path) / 'grid_plot' plot_dir_exists(save_dir) - save_name = "cumulative_grid_plot_linear.png" + save_name = "cumulative_grid_plot.png" save_path = save_dir / save_name plt.savefig(save_path, dpi=300) plt.close(fig) print(f"All subplot plots saved to {save_path}") -def generate_grid_plot_clean(extracted_outputs, grouped_data, grid_params, plots_path, param_label_map, colormaps_by_param, output_label_map, log_scale_grid_params): - """ - Generate and save normalized cumulative distribution plots for each output vs. grid parameters. - This creates a subplot where each column corresponds to one extracted output, and each row corresponds to one grid parameter. - - Parameters: - ---------- - extracted_outputs : list of str - List of extracted output quantities to plot. - - grouped_data : dict - Data for each output/parameter pair. (like solidification_time_per_semimajoraxis) - - grid_params : dict - Parameter values used in the grid. - - plots_path : str - Directory where plots will be saved. - - param_label_map : dict - Dictionary containing the label of the grid parameter for the plot. - - colormaps_by_param : dict - Dictionary containing the colormap to use for each grid parameter for the plot. - - output_label_map : dict, optional - Dictionary containing the label of the extracted output quantity for the plot. - - log_scale_grid_params : list of str, optional - Parameters to use log scale for colormap normalization. (like escape.zephyrus.Pxuv) - """ - if param_label_map is None or colormaps_by_param is None or output_label_map is None: - raise ValueError("param_label_map, colormaps_by_param, and output_label_map must be provided.") - if log_scale_grid_params is None: - log_scale_grid_params = [] - - num_cols = len(extracted_outputs) - num_rows = len(grid_params) - - fig, axes = plt.subplots(num_rows, num_cols, figsize=(num_cols * 6, num_rows * 5), sharex='col', sharey='row') - plt.subplots_adjust(hspace=0.1, wspace=0.1) - - if num_rows == 1: - axes = np.expand_dims(axes, axis=0) - if num_cols == 1: - axes = np.expand_dims(axes, axis=1) - - param_list = list(grid_params.keys()) - - for i, output_name in enumerate(extracted_outputs): - for j, param in enumerate(param_list): - data_key = f"{output_name}_per_{param}" - cmap = colormaps_by_param[param] - - if data_key not in grouped_data: - print(f"WARNING: Skipping {data_key} — not found in grouped_data") - continue - - data_dict_raw = grouped_data[data_key] - data_dict = {k: [v_i * 100 if output_name == "Phi_global" else v_i for v_i in v] for k, v in data_dict_raw.items()} - - tick_values = grid_params.get(param) - vmin, vmax = min(tick_values), max(tick_values) - - xlabel = output_label_map.get(output_name, output_name.replace("_", " ").title()) - norm = ( - mcolors.LogNorm(vmin=vmin, vmax=vmax) - if param in log_scale_grid_params - else mcolors.Normalize(vmin=vmin, vmax=vmax) - ) - - ax = axes[j, i] - cbar = plot_kde_cumulative_linear_clean( - data_dict=data_dict, - xlabel="", - ylabel="", - cmap=cmap, - vmin=vmin, - vmax=vmax, - key_label=None, - tick_values=tick_values, - save_path=None, - norm=norm, - ax=ax, - return_cbar=True - ) - - if i != 0: - ax.set_ylabel("") - ax.set_yticklabels([]) - if j != num_rows - 1: - ax.set_xlabel("") - ax.set_xticklabels([]) - - ax.grid(alpha=0.2) - ax.set_ylim(0, 1.02) - - # Add shared y-axis label - fig.text(0.04, 0.5, "Normalized cumulative fraction of simulations", va='center', rotation='vertical', fontsize=16) - - # Add x-axis labels under each column - for i, output_name in enumerate(extracted_outputs): - xlabel = output_label_map.get(output_name, output_name.replace("_", " ").title()) - axes[-1, i].set_xlabel(xlabel, fontsize=12) - - # Create a single colorbar on the right side - fig.subplots_adjust(right=0.87) - cbar_ax = fig.add_axes([0.9, 0.15, 0.015, 0.7]) # [left, bottom, width, height] - fig.colorbar(cbar, cax=cbar_ax, orientation='vertical', label="Parameter value") - - # Save plot - save_dir = Path(plots_path) / 'grid_plot' - plot_dir_exists(save_dir) - save_path = save_dir / "cumulative_grid_plot_linear_clean.png" - plt.savefig(save_path, dpi=300) - plt.close(fig) - - print(f"All subplot plots saved to {save_path}") - if __name__ == '__main__': # User needs to specify paths - grid_name = 'escape_grid_atm_a_f02_H_Mstar' - data_dir = f'/Users/emmapostolec/Documents/PHD/SCIENCE/CODES/PROTEUS/tools/post_processing_grid/nogit_processed_data/{grid_name}/' - plots_path = f'/Users/emmapostolec/Documents/PHD/SCIENCE/CODES/PROTEUS/tools/post_processing_grid/nogit_plots/{grid_name}/' + grid_name = 'escape_grid_habrok_6_params_1Msun_agni' + data_dir = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_processed_data/{grid_name}/' + plots_path = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_plots/{grid_name}/' # Load and organize data before plotting - df, grid_params, extracted_outputs = load_extracted_data(data_dir, grid_name) # Load the data + df, grid_params, extracted_outputs = load_extracted_data(data_dir, grid_name) # Load the data plot_dir_exists(plots_path) # Check if the plot directory exists. If not, create it. grouped_data = group_output_by_parameter(df, grid_params, extracted_outputs) # Group extracted outputs by grid parameters @@ -1012,10 +666,11 @@ def generate_grid_plot_clean(extracted_outputs, grouped_data, grid_params, plots # Single plots param_label_map = { "orbit.semimajoraxis": "Semi-major axis [AU]", - #"escape.zephyrus.Pxuv": r"$P_{XUV}$ [bar]", - #"escape.zephyrus.efficiency": r"Escape efficiency factor, $\epsilon$", + "escape.zephyrus.Pxuv": r"$P_{XUV}$ [bar]", + "escape.zephyrus.efficiency": r"Escape efficiency factor, $\epsilon$", "outgas.fO2_shift_IW":r"$log_{10}$($fO_2$) ($\Delta$ IW)", #"atmos_clim.module": "Atmospheric module", + "delivery.elements.CH_ratio": "C/H ratio", "delivery.elements.H_oceans": "[H] [oceans]", "star.mass": r"Stellar mass [M$_\odot$]"} output_label_map = { @@ -1026,16 +681,18 @@ def generate_grid_plot_clean(extracted_outputs, grouped_data, grid_params, plots "atm_kg_per_mol": "Atmospheric mass [kg/mol]"} colormaps_by_param = { "orbit.semimajoraxis": cm.plasma, - #"escape.zephyrus.Pxuv": cm.cividis, - #"escape.zephyrus.efficiency": cm.spring, + "escape.zephyrus.Pxuv": cm.cividis, + "escape.zephyrus.efficiency": cm.spring, "outgas.fO2_shift_IW": cm.coolwarm, + "delivery.elements.CH_ratio":cm.PuOr, #"atmos_clim.module": cm.Dark2, "delivery.elements.H_oceans": cm.winter, - "star.mass": cm.RdYlBu} + #"star.mass": cm.RdYlBu + } log_scale_grid_params = ["escape.zephyrus.Pxuv"] - generate_single_plots_linear( + generate_single_plots( extracted_outputs=extracted_outputs, grouped_data=grouped_data, grid_params=grid_params, @@ -1045,16 +702,6 @@ def generate_grid_plot_clean(extracted_outputs, grouped_data, grid_params, plots output_label_map=output_label_map, log_scale_grid_params=log_scale_grid_params) - # generate_single_plots_log( - # extracted_outputs=extracted_outputs, - # grouped_data=grouped_data, - # grid_params=grid_params, - # plots_path=plots_path, - # param_label_map=param_label_map, - # colormaps_by_param=colormaps_by_param, - # output_label_map=output_label_map, - # log_scale_grid_params=log_scale_grid_params) - generate_grid_plot( extracted_outputs=extracted_outputs, grouped_data=grouped_data, diff --git a/tools/post_processing_grid/post_processing_grid.py b/tools/post_processing_grid/post_processing_grid.py index 9286ef61b..9b4ae286b 100644 --- a/tools/post_processing_grid/post_processing_grid.py +++ b/tools/post_processing_grid/post_processing_grid.py @@ -26,49 +26,77 @@ def load_grid_cases(grid_dir: Path): List of dictionaries, each containing: - 'init_parameters' (dict): Parameters loaded from `init_coupler.toml`. - 'output_values' (pandas.DataFrame): Data from `runtime_helpfile.csv`. - - 'status' (str): Status string from the `status` file, or 'unknown' if unavailable. + - 'status' (str): Status string from the `status` file, or 'Unknown' if unavailable. """ combined_data = [] grid_dir = Path(grid_dir) - # Load all cases from the grid - for case in grid_dir.glob('case_*'): + # Collect and sort the case directories + case_dirs = list(grid_dir.glob('case_*')) + case_dirs.sort(key=lambda p: int(p.name.split('_')[1])) + + for case in case_dirs: runtime_file = case / 'runtime_helpfile.csv' init_file = case / 'init_coupler.toml' status_file = case / 'status' - if runtime_file.exists() and init_file.exists(): + # Load init parameters + init_params = {} + if init_file.exists(): try: - df = pd.read_csv(runtime_file, sep='\t') init_params = toml.load(open(init_file)) + except Exception as e: + print(f"Error reading init file in {case.name}: {e}") - # Read status file if it exists, otherwise set to None - status = None - if status_file.exists(): - with open(status_file, 'r') as sf: - status = sf.read().replace('\n', ' ').strip() - - combined_data.append({ - 'init_parameters': init_params, - 'output_values': df, - 'status': status - }) + # Read runtime_helpfile.csv if available + df = None + if runtime_file.exists(): + try: + df = pd.read_csv(runtime_file, sep='\t') + except Exception as e: + print(f"WARNING : Error reading runtime_helpfile.csv for {case.name}: {e}") + # Read status file if available + status = 'Unknown' + if status_file.exists(): + try: + raw_lines = [ln.strip() for ln in status_file.read_text(encoding='utf-8').splitlines() if ln.strip()] + if len(raw_lines) >= 2: + status = raw_lines[1] + elif raw_lines: + status = raw_lines[0] + else: + status = 'Empty' except Exception as e: - print(f"Error processing {case.name}: {e}") + print(f"WARNING : Error reading status file in {case.name}: {e}") + else: + print(f"WARNING : Missing status file in {case.name}") + + # # THIS IS ONLY FOR MY CURRENT GRID ON HABROK + # if status in ('Unknown', 'Empty'): + # status = 'Disk quota exceeded' - # Count the number of simulations per status - statuses = [str(case.get('status', 'unknown')).strip() or 'unknown' for case in combined_data] + combined_data.append({ + 'init_parameters': init_params, + 'output_values' : df, + 'status' : status + }) + + # --- summary printout --- + statuses = [c['status'] for c in combined_data] status_counts = pd.Series(statuses).value_counts().sort_values(ascending=False) print('-----------------------------------------------------------') print(f"Total number of simulations: {len(statuses)}") print('-----------------------------------------------------------') print("Number of simulations per status:") - for status, count in status_counts.items(): - print(f" - '{status}': {count}") + for st, count in status_counts.items(): + print(f" - {st:<45} : {count}") print('-----------------------------------------------------------') + + return combined_data + return combined_data def get_grid_parameters(grid_dir: str): @@ -160,9 +188,9 @@ def extract_grid_output(cases_data: list, parameter_name: str): for case in cases_data: df = case['output_values'] - # Check if the parameter exists in the output dataframe + if df is None: + continue # Skip cases with no output if parameter_name in df.columns: - # Extract the last value of the parameter from the last row parameter_value = df[parameter_name].iloc[-1] parameter_values.append(parameter_value) else: @@ -203,6 +231,10 @@ def extract_solidification_time(cases_data: list, phi_crit: float): for i, case in enumerate(cases_data): df = case['output_values'] # Check if the required columns exist in the dataframe + if df is None: + solidification_times.append(np.nan) + continue + if 'Phi_global' in df.columns and 'Time' in df.columns: condition = df['Phi_global'] < phi_crit if condition.any(): @@ -219,7 +251,7 @@ def extract_solidification_time(cases_data: list, phi_crit: float): solidification_times.append(np.nan) # Append NaN if columns are missing # Count the number of cases with a status = '10 Completed (solidified)' - status_10_cases = [case for case in cases_data if (case.get('status') or '').strip() == '10 Completed (solidified)'] + status_10_cases = [case for case in cases_data if (case.get('status') or '').strip() == 'Completed (solidified)'] completed_count = len(status_10_cases) # Count only valid solidification times (non-NaN) valid_solidification_times = [time for time in solidification_times if not np.isnan(time)] @@ -228,7 +260,7 @@ def extract_solidification_time(cases_data: list, phi_crit: float): print('-----------------------------------------------------------') print(f"Extracted solidification times (Phi_global < {phi_crit})") print(f"→ Found {valid_solidified_count} valid solidified cases based on Phi_global") - print(f"→ Found {completed_count} cases with status '10 Completed (solidified)' ") + print(f"→ Found {completed_count} cases with status 'Completed (solidified)' ") # Check if the number of valid solidified cases matches the number of cases with status '10 Completed (solidified)' in the grid, to be sure the extraction is correct if valid_solidified_count != completed_count: print("WARNING: The number of valid solidified planets does not match the number of planets with status: '10 Completed (solidified)'") @@ -242,7 +274,7 @@ def extract_solidification_time(cases_data: list, phi_crit: float): # else: # print(f"[Status Case {i}] Phi_global column missing.") else: - print("Solidified planets count matches the number of planets with status: '10 Completed (solidified)'.") + print("Solidified planets count matches the number of planets with status: 'Completed (solidified)'.") print('-----------------------------------------------------------') return solidification_times @@ -317,7 +349,7 @@ def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dic # Write data rows for case_index, case_data in enumerate(cases_data): - status = case_data.get('status', 'unknown') or 'unknown' + status = case_data.get('status', 'Unknown') or 'Unknown' row = [case_index, f"'{status}'"] # Add grid parameters values for each case case_param_values = case_params.get(case_index, {}) @@ -336,9 +368,9 @@ def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dic if __name__ == '__main__': # User needs to specify paths - grid_name = 'escape_grid_atm_a_f02_H_Mstar' - grid_path = f'/Users/emmapostolec/Downloads/output_norma2_rsync/{grid_name}/' - data_dir = f'/Users/emmapostolec/Documents/PHD/SCIENCE/CODES/PROTEUS/tools/post_processing_grid/nogit_processed_data/{grid_name}/' + grid_name = 'escape_grid_habrok_6_params_1Msun_agni' + grid_path = f'//projects/p315557/{grid_name}/' + data_dir = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_processed_data/{grid_name}/' # User choose the parameters to post-process the grid output_to_extract = ['esc_rate_total','Phi_global', 'P_surf','atm_kg_per_mol'] # Output columns to extract from 'runtime_helpfile.csv' of each case @@ -357,5 +389,5 @@ def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dic # Done with the post-processing step :) print("Post-processing completed. Let's do some plots !") - print('(Please check for any warning messages above before going further.)') + print('(Please check for any WARNING messages above before going further.)') print('-----------------------------------------------------------') From 63b30226efaf516cab563f9a295ab0c9934d1d8f Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Fri, 2 May 2025 16:42:42 +0200 Subject: [PATCH 018/105] Almost there with log plots still needs to fix pb --- .../merged_kde_plotting.ipynb | 155 ------- tools/post_processing_grid/plot_grid.py | 420 +++++++++--------- 2 files changed, 215 insertions(+), 360 deletions(-) delete mode 100644 tools/post_processing_grid/merged_kde_plotting.ipynb diff --git a/tools/post_processing_grid/merged_kde_plotting.ipynb b/tools/post_processing_grid/merged_kde_plotting.ipynb deleted file mode 100644 index dc7d8e39e..000000000 --- a/tools/post_processing_grid/merged_kde_plotting.ipynb +++ /dev/null @@ -1,155 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 2, - "id": "648575f5", - "metadata": {}, - "outputs": [], - "source": [ - "# --- Imports ---\n", - "import numpy as np\n", - "import matplotlib.pyplot as plt\n", - "import matplotlib.colors as mcolors\n", - "import seaborn as sns\n", - "from matplotlib.gridspec import GridSpec\n", - "from scipy.stats import gaussian_kde\n" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "61ff114c", - "metadata": {}, - "outputs": [], - "source": [ - "# --- Your KDE plot function ---\n", - "# (Paste your KDE logic here if it's modularized in plot_grid.py)\n", - "# If not, adapt the snippet below inside the plotting loop\n" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "2e926860", - "metadata": {}, - "outputs": [], - "source": [ - "# --- Load your input/output data here ---\n", - "# Example:\n", - "# inputs = {'C/H ratio': np.random.rand(10), 'Separation [AU]': np.random.rand(10)}\n", - "# outputs = {'Surface pressure [bar]': [np.random.normal(loc=val, size=100) for val in range(10)]}\n", - "inputs = {} # TODO: Fill with your data\n", - "outputs = {} # TODO: Fill with your data\n", - "\n", - "# Choose input parameter for color\n", - "color_key = 'C/H ratio' # Replace with your key\n" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "f573affd", - "metadata": {}, - "outputs": [], - "source": [ - "# --- Plotting function ---\n", - "def plot_kde_grid(inputs, outputs, color_key):\n", - " n_rows = len(outputs)\n", - " n_cols = len(inputs)\n", - " fig, axes = plt.subplots(n_rows, n_cols, figsize=(4 * n_cols, 3.5 * n_rows), sharex=False, sharey=False)\n", - "\n", - " # Normalize color values\n", - " cmap = plt.cm.viridis\n", - " color_vals = inputs[color_key]\n", - " norm = mcolors.LogNorm(vmin=max(np.min(color_vals), 1e-3), vmax=np.max(color_vals))\n", - " sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)\n", - "\n", - " axes = np.array(axes).reshape(n_rows, n_cols)\n", - "\n", - " for i, (out_key, out_vals) in enumerate(outputs.items()):\n", - " for j, (in_key, in_vals) in enumerate(inputs.items()):\n", - " ax = axes[i, j]\n", - " for k in range(len(color_vals)):\n", - " try:\n", - " kde = gaussian_kde(out_vals[k])\n", - " x_vals = np.linspace(np.min(out_vals[k]), np.max(out_vals[k]), 100)\n", - " y_vals = kde(x_vals)\n", - " ax.plot(x_vals, y_vals, color=cmap(norm(color_vals[k])))\n", - " except Exception:\n", - " continue\n", - "\n", - " if i == n_rows - 1:\n", - " ax.set_xlabel(in_key)\n", - " if j == 0:\n", - " ax.set_ylabel(out_key)\n", - "\n", - " # Colorbar\n", - " cbar = fig.colorbar(sm, ax=axes, orientation='vertical', fraction=0.02, pad=0.01)\n", - " cbar.set_label(color_key)\n", - "\n", - " fig.tight_layout()\n", - " plt.show()\n" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "12aef78b", - "metadata": {}, - "outputs": [ - { - "ename": "ValueError", - "evalue": "Number of rows must be a positive integer, not 0", - "output_type": "error", - "traceback": [ - "\u001b[31m---------------------------------------------------------------------------\u001b[39m", - "\u001b[31mValueError\u001b[39m Traceback (most recent call last)", - "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[6]\u001b[39m\u001b[32m, line 2\u001b[39m\n\u001b[32m 1\u001b[39m \u001b[38;5;66;03m# --- Run the plot ---\u001b[39;00m\n\u001b[32m----> \u001b[39m\u001b[32m2\u001b[39m \u001b[43mplot_kde_grid\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43moutputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcolor_key\u001b[49m\u001b[43m)\u001b[49m\n", - "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[5]\u001b[39m\u001b[32m, line 5\u001b[39m, in \u001b[36mplot_kde_grid\u001b[39m\u001b[34m(inputs, outputs, color_key)\u001b[39m\n\u001b[32m 3\u001b[39m n_rows = \u001b[38;5;28mlen\u001b[39m(outputs)\n\u001b[32m 4\u001b[39m n_cols = \u001b[38;5;28mlen\u001b[39m(inputs)\n\u001b[32m----> \u001b[39m\u001b[32m5\u001b[39m fig, axes = \u001b[43mplt\u001b[49m\u001b[43m.\u001b[49m\u001b[43msubplots\u001b[49m\u001b[43m(\u001b[49m\u001b[43mn_rows\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mn_cols\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfigsize\u001b[49m\u001b[43m=\u001b[49m\u001b[43m(\u001b[49m\u001b[32;43m4\u001b[39;49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m \u001b[49m\u001b[43mn_cols\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[32;43m3.5\u001b[39;49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m \u001b[49m\u001b[43mn_rows\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msharex\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msharey\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m)\u001b[49m\n\u001b[32m 7\u001b[39m \u001b[38;5;66;03m# Normalize color values\u001b[39;00m\n\u001b[32m 8\u001b[39m cmap = plt.cm.viridis\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/proteus/lib/python3.12/site-packages/matplotlib/pyplot.py:1770\u001b[39m, in \u001b[36msubplots\u001b[39m\u001b[34m(nrows, ncols, sharex, sharey, squeeze, width_ratios, height_ratios, subplot_kw, gridspec_kw, **fig_kw)\u001b[39m\n\u001b[32m 1625\u001b[39m \u001b[38;5;250m\u001b[39m\u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 1626\u001b[39m \u001b[33;03mCreate a figure and a set of subplots.\u001b[39;00m\n\u001b[32m 1627\u001b[39m \n\u001b[32m (...)\u001b[39m\u001b[32m 1767\u001b[39m \n\u001b[32m 1768\u001b[39m \u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 1769\u001b[39m fig = figure(**fig_kw)\n\u001b[32m-> \u001b[39m\u001b[32m1770\u001b[39m axs = \u001b[43mfig\u001b[49m\u001b[43m.\u001b[49m\u001b[43msubplots\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnrows\u001b[49m\u001b[43m=\u001b[49m\u001b[43mnrows\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mncols\u001b[49m\u001b[43m=\u001b[49m\u001b[43mncols\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msharex\u001b[49m\u001b[43m=\u001b[49m\u001b[43msharex\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msharey\u001b[49m\u001b[43m=\u001b[49m\u001b[43msharey\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 1771\u001b[39m \u001b[43m \u001b[49m\u001b[43msqueeze\u001b[49m\u001b[43m=\u001b[49m\u001b[43msqueeze\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msubplot_kw\u001b[49m\u001b[43m=\u001b[49m\u001b[43msubplot_kw\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 1772\u001b[39m \u001b[43m \u001b[49m\u001b[43mgridspec_kw\u001b[49m\u001b[43m=\u001b[49m\u001b[43mgridspec_kw\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mheight_ratios\u001b[49m\u001b[43m=\u001b[49m\u001b[43mheight_ratios\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 1773\u001b[39m \u001b[43m \u001b[49m\u001b[43mwidth_ratios\u001b[49m\u001b[43m=\u001b[49m\u001b[43mwidth_ratios\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1774\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m fig, axs\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/proteus/lib/python3.12/site-packages/matplotlib/figure.py:918\u001b[39m, in \u001b[36mFigureBase.subplots\u001b[39m\u001b[34m(self, nrows, ncols, sharex, sharey, squeeze, width_ratios, height_ratios, subplot_kw, gridspec_kw)\u001b[39m\n\u001b[32m 914\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m\u001b[33mwidth_ratios\u001b[39m\u001b[33m'\u001b[39m\u001b[33m must not be defined both as \u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 915\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mparameter and as key in \u001b[39m\u001b[33m'\u001b[39m\u001b[33mgridspec_kw\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m)\n\u001b[32m 916\u001b[39m gridspec_kw[\u001b[33m'\u001b[39m\u001b[33mwidth_ratios\u001b[39m\u001b[33m'\u001b[39m] = width_ratios\n\u001b[32m--> \u001b[39m\u001b[32m918\u001b[39m gs = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43madd_gridspec\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnrows\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mncols\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfigure\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mgridspec_kw\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 919\u001b[39m axs = gs.subplots(sharex=sharex, sharey=sharey, squeeze=squeeze,\n\u001b[32m 920\u001b[39m subplot_kw=subplot_kw)\n\u001b[32m 921\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m axs\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/proteus/lib/python3.12/site-packages/matplotlib/figure.py:1600\u001b[39m, in \u001b[36mFigureBase.add_gridspec\u001b[39m\u001b[34m(self, nrows, ncols, **kwargs)\u001b[39m\n\u001b[32m 1557\u001b[39m \u001b[38;5;250m\u001b[39m\u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 1558\u001b[39m \u001b[33;03mLow-level API for creating a `.GridSpec` that has this figure as a parent.\u001b[39;00m\n\u001b[32m 1559\u001b[39m \n\u001b[32m (...)\u001b[39m\u001b[32m 1596\u001b[39m \n\u001b[32m 1597\u001b[39m \u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 1599\u001b[39m _ = kwargs.pop(\u001b[33m'\u001b[39m\u001b[33mfigure\u001b[39m\u001b[33m'\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m) \u001b[38;5;66;03m# pop in case user has added this...\u001b[39;00m\n\u001b[32m-> \u001b[39m\u001b[32m1600\u001b[39m gs = \u001b[43mGridSpec\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnrows\u001b[49m\u001b[43m=\u001b[49m\u001b[43mnrows\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mncols\u001b[49m\u001b[43m=\u001b[49m\u001b[43mncols\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfigure\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1601\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m gs\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/proteus/lib/python3.12/site-packages/matplotlib/gridspec.py:363\u001b[39m, in \u001b[36mGridSpec.__init__\u001b[39m\u001b[34m(self, nrows, ncols, figure, left, bottom, right, top, wspace, hspace, width_ratios, height_ratios)\u001b[39m\n\u001b[32m 360\u001b[39m \u001b[38;5;28mself\u001b[39m.hspace = hspace\n\u001b[32m 361\u001b[39m \u001b[38;5;28mself\u001b[39m.figure = figure\n\u001b[32m--> \u001b[39m\u001b[32m363\u001b[39m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m.\u001b[49m\u001b[34;43m__init__\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mnrows\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mncols\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 364\u001b[39m \u001b[43m \u001b[49m\u001b[43mwidth_ratios\u001b[49m\u001b[43m=\u001b[49m\u001b[43mwidth_ratios\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 365\u001b[39m \u001b[43m \u001b[49m\u001b[43mheight_ratios\u001b[49m\u001b[43m=\u001b[49m\u001b[43mheight_ratios\u001b[49m\u001b[43m)\u001b[49m\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/proteus/lib/python3.12/site-packages/matplotlib/gridspec.py:48\u001b[39m, in \u001b[36mGridSpecBase.__init__\u001b[39m\u001b[34m(self, nrows, ncols, height_ratios, width_ratios)\u001b[39m\n\u001b[32m 33\u001b[39m \u001b[38;5;250m\u001b[39m\u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 34\u001b[39m \u001b[33;03mParameters\u001b[39;00m\n\u001b[32m 35\u001b[39m \u001b[33;03m----------\u001b[39;00m\n\u001b[32m (...)\u001b[39m\u001b[32m 45\u001b[39m \u001b[33;03m If not given, all rows will have the same height.\u001b[39;00m\n\u001b[32m 46\u001b[39m \u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 47\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(nrows, Integral) \u001b[38;5;129;01mor\u001b[39;00m nrows <= \u001b[32m0\u001b[39m:\n\u001b[32m---> \u001b[39m\u001b[32m48\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[32m 49\u001b[39m \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mNumber of rows must be a positive integer, not \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mnrows\u001b[38;5;132;01m!r}\u001b[39;00m\u001b[33m\"\u001b[39m)\n\u001b[32m 50\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(ncols, Integral) \u001b[38;5;129;01mor\u001b[39;00m ncols <= \u001b[32m0\u001b[39m:\n\u001b[32m 51\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[32m 52\u001b[39m \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mNumber of columns must be a positive integer, not \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mncols\u001b[38;5;132;01m!r}\u001b[39;00m\u001b[33m\"\u001b[39m)\n", - "\u001b[31mValueError\u001b[39m: Number of rows must be a positive integer, not 0" - ] - }, - { - "data": { - "text/plain": [ - "
      " - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "# --- Run the plot ---\n", - "plot_kde_grid(inputs, outputs, color_key)\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "proteus", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/tools/post_processing_grid/plot_grid.py b/tools/post_processing_grid/plot_grid.py index 300771df2..049f69a6c 100644 --- a/tools/post_processing_grid/plot_grid.py +++ b/tools/post_processing_grid/plot_grid.py @@ -158,10 +158,12 @@ def group_output_by_parameter(df,grid_parameters,outputs): for output in outputs: key_name = f"{output}_per_{param}" value_dict = {} - for param_value in df[param].dropna().unique(): subset = df[df[param] == param_value] - output_values = subset[output].dropna().tolist() + output_values = subset[output].replace([np.inf, -np.inf], np.nan) + output_values = output_values.dropna() + output_values = output_values[output_values > 0] # Remove zeros and negatives + value_dict[param_value] = output_values grouped[key_name] = value_dict @@ -252,89 +254,80 @@ def plot_grid_status(cases_data, plot_dir: Path, status_colors: dict = None): print(f"Grid status histogram successfully saved to {output_path}") -def kde_cumulative(values, color, ax, bw_adjust=0.3, log_x=False, **kwargs): +def plot_hist_kde(values, ax, color, plot_hist=True, plot_kde=True, cumulative=True, log_x=False, bins=100, bw_adjust=0.3, hist_element="step", kde_kwargs={}, hist_kwargs={}): """ - Plot a cumulative KDE curve on the given x-axis. + Plot a cumulative or non-cumulative histograms and/or KDE curves on the given x-axis. Parameters ---------- values : array-like - Data points on the x-axis used to calculate the KDE. - - color : str or matplotlib color - The color of the KDE curve. - + Data points on the x-axis used to calculate the histogram/KDE. ax : matplotlib.axes.Axes - The axis on which to plot the KDE. - - bw_adjust : float, optional, default=0.3 - Bandwidth adjustment factor : controls the smoothness of the KDE. - Smaller values make the KDE more sensitive to data, while larger values smooth it out. - - log_x : bool, optional - If True, sets the x-axis to logarithmic scale. + Axis to draw the plot on. + color : str + Color for both histogram and KDE. + plot_hist : bool + Whether to plot a histogram. + plot_kde : bool + Whether to plot a KDE curve. + cumulative : bool + Whether to plot the cumulative distribution. + log_x : bool + Whether to use a logarithmic x-axis. + bw_adjust : float + Bandwidth adjustment for KDE. + bins : int + Number of bins for the histogram. + hist_element : str + 'step' or 'bars' for histogram style. + kde_kwargs : dict + Additional kwargs for sns.kdeplot. + hist_kwargs : dict + Additional kwargs for sns.histplot. + """ - **kwargs : keyword arguments, optional - Additional arguments passed to `sns.kdeplot`, such as `label`, `linewidth`, etc. + if plot_hist: + sns.histplot( + values, + bins=bins, + cumulative=cumulative, + stat="density", + element=hist_element, + color=color, + ax=ax, + kde=False, + **hist_kwargs + ) - """ - sns.kdeplot( + if plot_kde: + sns.kdeplot( values, - cumulative=True, + cumulative=cumulative, bw_adjust=bw_adjust, clip=(np.min(values), np.max(values)), common_grid=True, color=color, ax=ax, - **kwargs) + **kde_kwargs + ) - # Set axis scale : linear or log - if log_x==True: + # Axis scaling + if log_x: ax.set_xscale('log') + ax.get_xaxis().set_major_formatter(ticker.FuncFormatter(safe_log_formatter)) else: - ax.set_xscale('linear') - - - """ - Plot a cumulative KDE curve on the given x-axis with a log scale for the x-axis. - - Parameters - ---------- - values : array-like - Data points on the x-axis used to calculate the KDE. - - color : str or matplotlib color - The color of the KDE curve. - - ax : matplotlib.axes.Axes - The axis on which to plot the KDE. - - bw_adjust : float, optional, default=0.3 - Bandwidth adjustment factor : controls the smoothness of the KDE. - Smaller values make the KDE more sensitive to data, while larger values smooth it out. - - **kwargs : keyword arguments, optional - Additional arguments passed to `sns.kdeplot`, such as `label`, `linewidth`, etc. - """ - # Plot the cumulative KDE - sns.kdeplot( - values, - cumulative=True, - bw_adjust=bw_adjust, - clip=(np.min(values), np.max(values)), - common_grid=True, - color=color, - ax=ax, - **kwargs - ) - - # Set x-axis to log scale - ax.set_xscale('log') + ax.set_xscale('linear') - # Format the x-axis to show powers of 10 - ax.get_xaxis().set_major_formatter(ticker.FuncFormatter(lambda x, _: r'$10^{{{}}}$'.format(int(np.log10(x))))) +def safe_log_formatter(x, _): + try: + if x > 0: + return r'$10^{{{}}}$'.format(int(np.log10(x))) + else: + return '' + except Exception: + return '' -def plot_kde_cumulative(data_dict, xlabel, ylabel, cmap=plt.cm.plasma, vmin=None, vmax=None, key_label="Parameter", ax=None, save_path=None, bw_adjust=0.3, log_x=False, tick_values=None, norm=None): +def plot_distributions(data_dict, xlabel, ylabel, colormap, vmin=None, vmax=None, key_label="Parameter", ax=None, save_path=None, bw_adjust=0.3, log_x=True, tick_values=None, norm=None, plot_hist=True, plot_kde=True, cumulative=True, bins=100): """ Plot cumulative KDE curves for one of the output parameters of the grid (like esc_rate_total, solidification_time) on the x-axis. The different curves correspond to a input parameter from the grid with a color mapped on the right side of the plot. @@ -396,17 +389,24 @@ def plot_kde_cumulative(data_dict, xlabel, ylabel, cmap=plt.cm.plasma, vmin=None # Use provided or default norm if norm is None: norm = mcolors.Normalize(vmin=vmin, vmax=vmax) - sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) + sm = plt.cm.ScalarMappable(cmap=colormap, norm=norm) # Plot each group for key in keys: values = np.array(data_dict[key]) + + # Filter out invalid values for log scale + values = values[np.isfinite(values)] # Remove inf and NaN + if log_x: + values = values[values > 0] # Remove zero and negatives + if len(values) < 2: + print(f"Skipping key {key} due to insufficient valid data points.") continue - color = cmap(norm(key)) - kde_cumulative(values, color=color, ax=ax, bw_adjust=bw_adjust, log_x=log_x, label=str(key)) + color = colormap(norm(key)) + plot_hist_kde(values=values, ax=ax, color=color, plot_hist=plot_hist, plot_kde=plot_kde, cumulative=cumulative, log_x=log_x, bins=bins, bw_adjust=0.3, hist_element="step", kde_kwargs={}, hist_kwargs={}) ax.set_xlabel(xlabel, fontsize=12) ax.set_ylabel(ylabel, fontsize=12) @@ -433,7 +433,7 @@ def plot_kde_cumulative(data_dict, xlabel, ylabel, cmap=plt.cm.plasma, vmin=None plt.savefig(save_path, dpi=300) plt.close(fig) -def generate_single_plots(extracted_outputs, grouped_data, grid_params, plots_path, param_label_map, colormaps_by_param, output_label_map, log_scale_grid_params, log_x=False): +def generate_single_plots(extracted_outputs, grouped_data, grid_params, plots_path, param_label_map, colormaps_by_param, output_label_map, log_scale_grid_params, log_x=True, plot_hist=True, plot_kde=True, cumulative=True, bins=100): """ Generate and save normalized cumulative distribution plots for each output vs. grid parameter. @@ -512,11 +512,11 @@ def generate_single_plots(extracted_outputs, grouped_data, grid_params, plots_pa else mcolors.Normalize(vmin=vmin, vmax=vmax) ) - plot_kde_cumulative( + plot_distributions( data_dict=data_dict, xlabel=xlabel, ylabel=ylabel, - cmap=cmap, + colormap=cmap, vmin=vmin, vmax=vmax, key_label=key_label, @@ -524,129 +524,134 @@ def generate_single_plots(extracted_outputs, grouped_data, grid_params, plots_pa tick_values=tick_values, save_path=save_path, norm=norm, - ax=ax + ax=ax, + bw_adjust=0.3, + plot_hist=plot_hist, + plot_kde=plot_kde, + cumulative=cumulative, + bins=bins ) plt.close(fig) - print("All linear-scale single plots saved.") - -def generate_grid_plot(extracted_outputs, grouped_data, grid_params, plots_path, param_label_map, colormaps_by_param, output_label_map, log_scale_grid_params, log_x=False): - """ - Generate and save normalized cumulative distribution plots for each output vs. grid parameters. - This creates a subplot where each column corresponds to one extracted output, and each row corresponds to one grid parameter. - - Parameters: - ---------- - extracted_outputs : list of str - List of extracted output quantities to plot. - - grouped_data : dict - Data for each output/parameter pair. (like solidification_time_per_semimajoraxis) - - grid_params : dict - Parameter values used in the grid. - - plots_path : str - Directory where plots will be saved. - - param_label_map : dict - Dictionary containing the label of the grid parameter for the plot. - - colormaps_by_param : dict - Dictionary containing the colormap to use for each grid parameter for the plot. - - output_label_map : dict, optional - Dictionary containing the label of the extracted output quantity for the plot. - - log_scale_grid_params : list of str, optional - Parameters to use log scale for colormap normalization. (like escape.zephyrus.Pxuv) - """ - - if param_label_map is None: - raise ValueError("param_label_map must be provided.") - if colormaps_by_param is None: - raise ValueError("colormaps_by_param must be provided.") - if output_label_map is None: - raise ValueError("output_label_map must be provided.") - if log_scale_grid_params is None: - log_scale_grid_params = [] - - num_cols = len(extracted_outputs) - num_rows = len(grid_params) - - # Create subplots with the appropriate layout - fig, axes = plt.subplots(num_rows, num_cols, figsize=(num_cols * 16, num_rows * 12)) - plt.subplots_adjust(hspace=0.15, wspace=0.15) - - if num_rows == 1: - axes = np.expand_dims(axes, axis=0) # Make sure it's 2D if only one row. - - for i, output_name in enumerate(extracted_outputs): - for j, (param, cmap) in enumerate(colormaps_by_param.items()): - data_key = f"{output_name}_per_{param}" - - if data_key not in grouped_data: - print(f"WARNING: Skipping {data_key} — not found in grouped_data") - continue - - data_dict_raw = grouped_data[data_key] - data_dict = data_dict_raw.copy() - - if output_name == "Phi_global": - data_dict = {k: [v_i * 100 for v_i in v] for k, v in data_dict.items()} - - tick_values = grid_params.get(param) - vmin = min(tick_values) - vmax = max(tick_values) - - xlabel = output_label_map.get(output_name, output_name.replace("_", " ").title()) - ylabel = "Normalized cumulative fraction of simulations" - key_label = param_label_map.get(param, param.replace("_", " ").title()) - - # Select the axis for this subplot - ax = axes[j, i] - - norm = ( - mcolors.LogNorm(vmin=vmin, vmax=vmax) - if param in log_scale_grid_params - else mcolors.Normalize(vmin=vmin, vmax=vmax) - ) - - plot_kde_cumulative( - data_dict=data_dict, - xlabel=xlabel, - ylabel=ylabel, - cmap=cmap, - vmin=vmin, - vmax=vmax, - key_label=key_label, - log_x=log_x, - tick_values=tick_values, - save_path=None, # Don't save the plot yet - norm=norm, - ax=ax - ) - - # Set titles for each subplot - if i == 0: - ax.set_ylabel(ylabel, fontsize=12) - if j == num_rows - 1: - ax.set_xlabel(xlabel, fontsize=12) - - # Customize plot with grid and ticks - ax.grid(alpha=0.2) - ax.set_ylim(0, 1.02) - - # Save the complete subplot figure - save_dir = Path(plots_path) / 'grid_plot' - plot_dir_exists(save_dir) - - save_name = "cumulative_grid_plot.png" - save_path = save_dir / save_name - plt.savefig(save_path, dpi=300) - plt.close(fig) - - print(f"All subplot plots saved to {save_path}") + print("All single plots saved.") + +# def generate_grid_plot(extracted_outputs, grouped_data, grid_params, plots_path, param_label_map, colormaps_by_param, output_label_map, log_scale_grid_params, log_x=True, bins=100): +# """ +# Generate and save normalized cumulative distribution plots for each output vs. grid parameters. +# This creates a subplot where each column corresponds to one extracted output, and each row corresponds to one grid parameter. + +# Parameters: +# ---------- +# extracted_outputs : list of str +# List of extracted output quantities to plot. + +# grouped_data : dict +# Data for each output/parameter pair. (like solidification_time_per_semimajoraxis) + +# grid_params : dict +# Parameter values used in the grid. + +# plots_path : str +# Directory where plots will be saved. + +# param_label_map : dict +# Dictionary containing the label of the grid parameter for the plot. + +# colormaps_by_param : dict +# Dictionary containing the colormap to use for each grid parameter for the plot. + +# output_label_map : dict, optional +# Dictionary containing the label of the extracted output quantity for the plot. + +# log_scale_grid_params : list of str, optional +# Parameters to use log scale for colormap normalization. (like escape.zephyrus.Pxuv) +# """ + +# if param_label_map is None: +# raise ValueError("param_label_map must be provided.") +# if colormaps_by_param is None: +# raise ValueError("colormaps_by_param must be provided.") +# if output_label_map is None: +# raise ValueError("output_label_map must be provided.") +# if log_scale_grid_params is None: +# log_scale_grid_params = [] + +# num_cols = len(extracted_outputs) +# num_rows = len(grid_params) + +# # Create subplots with the appropriate layout +# fig, axes = plt.subplots(num_rows, num_cols, figsize=(num_cols * 16, num_rows * 12)) +# plt.subplots_adjust(hspace=0.15, wspace=0.15) + +# if num_rows == 1: +# axes = np.expand_dims(axes, axis=0) # Make sure it's 2D if only one row. + +# for i, output_name in enumerate(extracted_outputs): +# for j, (param, cmap) in enumerate(colormaps_by_param.items()): +# data_key = f"{output_name}_per_{param}" + +# if data_key not in grouped_data: +# print(f"WARNING: Skipping {data_key} — not found in grouped_data") +# continue + +# data_dict_raw = grouped_data[data_key] +# data_dict = data_dict_raw.copy() + +# if output_name == "Phi_global": +# data_dict = {k: [v_i * 100 for v_i in v] for k, v in data_dict.items()} + +# tick_values = grid_params.get(param) +# vmin = min(tick_values) +# vmax = max(tick_values) + +# xlabel = output_label_map.get(output_name, output_name.replace("_", " ").title()) +# ylabel = "Normalized cumulative fraction of simulations" +# key_label = param_label_map.get(param, param.replace("_", " ").title()) + +# # Select the axis for this subplot +# ax = axes[j, i] + +# norm = ( +# mcolors.LogNorm(vmin=vmin, vmax=vmax) +# if param in log_scale_grid_params +# else mcolors.Normalize(vmin=vmin, vmax=vmax) +# ) + +# plot_kde_cumulative( +# data_dict=data_dict, +# xlabel=xlabel, +# ylabel=ylabel, +# cmap=cmap, +# vmin=vmin, +# vmax=vmax, +# key_label=key_label, +# log_x=log_x, +# tick_values=tick_values, +# save_path=None, # Don't save the plot yet +# norm=norm, +# ax=ax +# ) + +# # Set titles for each subplot +# if i == 0: +# ax.set_ylabel(ylabel, fontsize=12) +# if j == num_rows - 1: +# ax.set_xlabel(xlabel, fontsize=12) + +# # Customize plot with grid and ticks +# ax.grid(alpha=0.2) +# ax.set_ylim(0, 1.02) + +# # Save the complete subplot figure +# save_dir = Path(plots_path) / 'grid_plot' +# plot_dir_exists(save_dir) + +# save_name = "cumulative_grid_plot.png" +# save_path = save_dir / save_name +# plt.savefig(save_path, dpi=300) +# plt.close(fig) + +# print(f"All subplot plots saved to {save_path}") if __name__ == '__main__': @@ -668,7 +673,7 @@ def generate_grid_plot(extracted_outputs, grouped_data, grid_params, plots_path, "orbit.semimajoraxis": "Semi-major axis [AU]", "escape.zephyrus.Pxuv": r"$P_{XUV}$ [bar]", "escape.zephyrus.efficiency": r"Escape efficiency factor, $\epsilon$", - "outgas.fO2_shift_IW":r"$log_{10}$($fO_2$) ($\Delta$ IW)", + "outgas.fO2_shift_IW":r"$log_{10}$($fO_2$) [$\Delta$ IW]", #"atmos_clim.module": "Atmospheric module", "delivery.elements.CH_ratio": "C/H ratio", "delivery.elements.H_oceans": "[H] [oceans]", @@ -684,7 +689,7 @@ def generate_grid_plot(extracted_outputs, grouped_data, grid_params, plots_path, "escape.zephyrus.Pxuv": cm.cividis, "escape.zephyrus.efficiency": cm.spring, "outgas.fO2_shift_IW": cm.coolwarm, - "delivery.elements.CH_ratio":cm.PuOr, + "delivery.elements.CH_ratio":cm.copper, #"atmos_clim.module": cm.Dark2, "delivery.elements.H_oceans": cm.winter, #"star.mass": cm.RdYlBu @@ -700,17 +705,22 @@ def generate_grid_plot(extracted_outputs, grouped_data, grid_params, plots_path, param_label_map=param_label_map, colormaps_by_param=colormaps_by_param, output_label_map=output_label_map, - log_scale_grid_params=log_scale_grid_params) - - generate_grid_plot( - extracted_outputs=extracted_outputs, - grouped_data=grouped_data, - grid_params=grid_params, - plots_path=plots_path, - param_label_map=param_label_map, - colormaps_by_param=colormaps_by_param, - output_label_map=output_label_map, - log_scale_grid_params=log_scale_grid_params) + log_scale_grid_params=log_scale_grid_params, + log_x=True, + plot_hist=True, + plot_kde=True, + cumulative=True, + bins=100) + + # generate_grid_plot( + # extracted_outputs=extracted_outputs, + # grouped_data=grouped_data, + # grid_params=grid_params, + # plots_path=plots_path, + # param_label_map=param_label_map, + # colormaps_by_param=colormaps_by_param, + # output_label_map=output_label_map, + # log_scale_grid_params=log_scale_grid_params) print('-----------------------------------------------------') print("All plots completed. Let's do some analyse now :) !") From f35b076e9c397a99f9766e2a15c22e2568d3ead1 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 6 May 2025 17:25:56 +0200 Subject: [PATCH 019/105] test vs code commit --- src/proteus/escape/wrapper.py | 7 ------- tools/post_processing_grid/plot_grid.py | 2 +- tools/post_processing_grid/post_processing_grid.py | 4 ++-- 3 files changed, 3 insertions(+), 10 deletions(-) diff --git a/src/proteus/escape/wrapper.py b/src/proteus/escape/wrapper.py index 8186fbe4b..5308c0cb5 100644 --- a/src/proteus/escape/wrapper.py +++ b/src/proteus/escape/wrapper.py @@ -131,17 +131,10 @@ def calc_new_elements(hf_row:dict, dt:float, reservoir:str, min_thresh:float=1e1 res[e] = hf_row[e+key] M_vols = sum(list(res.values())) -<<<<<<< HEAD - # To avoid division by zero or very small values error for M_vols - if M_vols < 10.0: - log.warning("M_vols is too small (%.2e). Setting all target elements to zero." % M_vols) - return {e: 0.0 for e in res.keys()} -======= # check if we just desiccated the planet... if M_vols < min_thresh: log.debug(" Total mass of volatiles below threshold in escape calculation") return res ->>>>>>> main # calculate the current mass mixing ratio for each element # if escape is unfractionating, this should be conserved diff --git a/tools/post_processing_grid/plot_grid.py b/tools/post_processing_grid/plot_grid.py index 049f69a6c..ee173789c 100644 --- a/tools/post_processing_grid/plot_grid.py +++ b/tools/post_processing_grid/plot_grid.py @@ -656,7 +656,7 @@ def generate_single_plots(extracted_outputs, grouped_data, grid_params, plots_pa if __name__ == '__main__': # User needs to specify paths - grid_name = 'escape_grid_habrok_6_params_1Msun_agni' + grid_name = 'escape_grid_habrok_5_params_a_0.1_1Msun_agni' data_dir = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_processed_data/{grid_name}/' plots_path = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_plots/{grid_name}/' diff --git a/tools/post_processing_grid/post_processing_grid.py b/tools/post_processing_grid/post_processing_grid.py index 9b4ae286b..4c5ee6f55 100644 --- a/tools/post_processing_grid/post_processing_grid.py +++ b/tools/post_processing_grid/post_processing_grid.py @@ -368,8 +368,8 @@ def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dic if __name__ == '__main__': # User needs to specify paths - grid_name = 'escape_grid_habrok_6_params_1Msun_agni' - grid_path = f'//projects/p315557/{grid_name}/' + grid_name = 'escape_grid_habrok_5_params_a_0.1_1Msun_agni' + grid_path = f'/home2/p315557/PROTEUS/output/scratch/{grid_name}/' data_dir = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_processed_data/{grid_name}/' # User choose the parameters to post-process the grid From ed4b02e38270522cdf614a357feca35672fb6a21 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 7 May 2025 12:09:11 +0200 Subject: [PATCH 020/105] minor changes --- tools/post_processing_grid/plot_grid.py | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/tools/post_processing_grid/plot_grid.py b/tools/post_processing_grid/plot_grid.py index ee173789c..56a61e1da 100644 --- a/tools/post_processing_grid/plot_grid.py +++ b/tools/post_processing_grid/plot_grid.py @@ -286,6 +286,10 @@ def plot_hist_kde(values, ax, color, plot_hist=True, plot_kde=True, cumulative=T Additional kwargs for sns.histplot. """ + # Apply log transform if needed + if log_x: + values = np.log10(values) + if plot_hist: sns.histplot( values, @@ -311,10 +315,13 @@ def plot_hist_kde(values, ax, color, plot_hist=True, plot_kde=True, cumulative=T **kde_kwargs ) - # Axis scaling + # Set axis labels and formatting if log_x: - ax.set_xscale('log') - ax.get_xaxis().set_major_formatter(ticker.FuncFormatter(safe_log_formatter)) + # Keep axis in linear scale (values already log-transformed), + # but show ticks as powers of 10 + ax.set_xlabel("log10(x)") + ax.set_xticks(np.log10(ticks := np.geomspace(np.nanmin(10**values), np.nanmax(10**values), num=5))) + ax.set_xticklabels([f"{int(tick):.0e}" for tick in ticks]) else: ax.set_xscale('linear') @@ -399,7 +406,8 @@ def plot_distributions(data_dict, xlabel, ylabel, colormap, vmin=None, vmax=None values = values[np.isfinite(values)] # Remove inf and NaN if log_x: values = values[values > 0] # Remove zero and negatives - + values = np.log10(values) + if len(values) < 2: print(f"Skipping key {key} due to insufficient valid data points.") continue @@ -407,8 +415,8 @@ def plot_distributions(data_dict, xlabel, ylabel, colormap, vmin=None, vmax=None color = colormap(norm(key)) plot_hist_kde(values=values, ax=ax, color=color, plot_hist=plot_hist, plot_kde=plot_kde, cumulative=cumulative, log_x=log_x, bins=bins, bw_adjust=0.3, hist_element="step", kde_kwargs={}, hist_kwargs={}) - - ax.set_xlabel(xlabel, fontsize=12) + + ax.set_xlabel(f"log10({xlabel})" if log_x else xlabel, fontsize=12) ax.set_ylabel(ylabel, fontsize=12) ax.set_ylim(0, 1.02) ax.grid(alpha=0.2) @@ -433,6 +441,8 @@ def plot_distributions(data_dict, xlabel, ylabel, colormap, vmin=None, vmax=None plt.savefig(save_path, dpi=300) plt.close(fig) + + def generate_single_plots(extracted_outputs, grouped_data, grid_params, plots_path, param_label_map, colormaps_by_param, output_label_map, log_scale_grid_params, log_x=True, plot_hist=True, plot_kde=True, cumulative=True, bins=100): """ Generate and save normalized cumulative distribution plots for each output vs. grid parameter. @@ -512,6 +522,9 @@ def generate_single_plots(extracted_outputs, grouped_data, grid_params, plots_pa else mcolors.Normalize(vmin=vmin, vmax=vmax) ) + df = pd.DataFrame(data_dict) + keys = list(df.columns) + plot_distributions( data_dict=data_dict, xlabel=xlabel, From c000b1a3b06e5b9328379c555786130b36002fc8 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Thu, 8 May 2025 17:26:03 +0200 Subject: [PATCH 021/105] small test to have consistent plot --- tools/post_processing_grid/plot_grid.py | 212 ++++++++++++++++++------ 1 file changed, 163 insertions(+), 49 deletions(-) diff --git a/tools/post_processing_grid/plot_grid.py b/tools/post_processing_grid/plot_grid.py index 56a61e1da..fbe85ccd3 100644 --- a/tools/post_processing_grid/plot_grid.py +++ b/tools/post_processing_grid/plot_grid.py @@ -678,52 +678,166 @@ def generate_single_plots(extracted_outputs, grouped_data, grid_params, plots_pa plot_dir_exists(plots_path) # Check if the plot directory exists. If not, create it. grouped_data = group_output_by_parameter(df, grid_params, extracted_outputs) # Group extracted outputs by grid parameters - # Plots - plot_grid_status(df, plots_path) # Plot the grid status in an histogram - - # Single plots - param_label_map = { - "orbit.semimajoraxis": "Semi-major axis [AU]", - "escape.zephyrus.Pxuv": r"$P_{XUV}$ [bar]", - "escape.zephyrus.efficiency": r"Escape efficiency factor, $\epsilon$", - "outgas.fO2_shift_IW":r"$log_{10}$($fO_2$) [$\Delta$ IW]", - #"atmos_clim.module": "Atmospheric module", - "delivery.elements.CH_ratio": "C/H ratio", - "delivery.elements.H_oceans": "[H] [oceans]", - "star.mass": r"Stellar mass [M$_\odot$]"} - output_label_map = { - "solidification_time": "Solidification time [yr]", - "esc_rate_total": "Total escape rate [kg/s]", - "Phi_global": "Melt fraction [%]", - "P_surf": "Surface pressure [bar]", - "atm_kg_per_mol": "Atmospheric mass [kg/mol]"} - colormaps_by_param = { - "orbit.semimajoraxis": cm.plasma, - "escape.zephyrus.Pxuv": cm.cividis, - "escape.zephyrus.efficiency": cm.spring, - "outgas.fO2_shift_IW": cm.coolwarm, - "delivery.elements.CH_ratio":cm.copper, - #"atmos_clim.module": cm.Dark2, - "delivery.elements.H_oceans": cm.winter, - #"star.mass": cm.RdYlBu - } - - log_scale_grid_params = ["escape.zephyrus.Pxuv"] - - generate_single_plots( - extracted_outputs=extracted_outputs, - grouped_data=grouped_data, - grid_params=grid_params, - plots_path=plots_path, - param_label_map=param_label_map, - colormaps_by_param=colormaps_by_param, - output_label_map=output_label_map, - log_scale_grid_params=log_scale_grid_params, - log_x=True, - plot_hist=True, - plot_kde=True, - cumulative=True, - bins=100) + ## Plots + #plot_grid_status(df, plots_path) # Plot the grid status in an histogram + + ############## TEST PLOT ############## + # # Extract the grouped dict + # grouped_values = grouped_data['P_surf_per_escape.zephyrus.Pxuv'] + + # # Set up colormap and normalization + # colormap = cm.cividis + # param_vals = sorted(grouped_values.keys()) + # norm = plt.Normalize(vmin=min(param_vals), vmax=max(param_vals)) + + # # Create a single plot with all curves + # fig, ax = plt.subplots(figsize=(7, 5)) + + # for pxuv_val in param_vals: + # values = grouped_values[pxuv_val].dropna().values # Clean NaNs + # if len(values) == 0: + # continue # Skip empty + + # plot_hist_kde( + # values=values, + # ax=ax, + # color=colormap(norm(pxuv_val)), + # plot_hist=True, + # plot_kde=True, + # cumulative=True, + # log_x=False, # Log-scale for P_surf + # bins=10 + # ) + + # # Add colorbar for Pxuv mapping + # sm = plt.cm.ScalarMappable(cmap=colormap, norm=norm) + # sm.set_array([]) + # cbar = plt.colorbar(sm, ax=ax) + # cbar.set_label(r"$P_{XUV}$ [bar]") + + # ax.set_ylabel("Cumulative density") + # ax.set_xlabel("Surface pressure [bar]") + # ax.set_title("Cumulative P_surf distributions for different $P_{XUV}$ values") + + # plt.tight_layout() + # plt.savefig(plots_path + "test_plot_P_surf_per_Pxuv.png", dpi=300) + # plt.close() + + plt.figure(figsize=(10, 6)) + for pxuv in grid_params['escape.zephyrus.Pxuv'] : + sns.histplot(data=np.array(grouped_data['P_surf_per_escape.zephyrus.Pxuv'][pxuv]), + bins=5, + kde=True, + kde_kws={'bw_adjust': 0.5}, + stat="density", + element="step", + cumulative=False, + log_scale=False, + fill=False, + linewidth=1.5, + color=cm.cividis(pxuv) + ) + plt.xlabel("Surface pressure [bar]") + plt.ylabel("Empirical cumulative density") + plt.savefig(plots_path + "test_plot_P_surf_per_Pxuv.png", dpi=300) + + # grouped_data = { + # 'P_surf_per_escape.zephyrus.Pxuv': { + # 1e-05: np.random.lognormal(mean=0, sigma=1, size=500), + # 0.01: np.random.lognormal(mean=1, sigma=1, size=500), + # 10.0: np.random.lognormal(mean=2, sigma=1, size=500) + # } + # } + # pxuv_vals = [1e-05, 0.01, 10.0] + + # # --- figure + axis --- + # fig, ax = plt.subplots(figsize=(10, 6)) + + # # --- set up normalization + ScalarMappable for the colorbar --- + # norm = mcolors.LogNorm(vmin=min(pxuv_vals), vmax=max(pxuv_vals)) + # # if you want linear scaling, use Normalize instead: + # # norm = mcolors.Normalize(vmin=min(pxuv_vals), vmax=max(pxuv_vals)) + + # sm = cm.ScalarMappable(norm=norm, cmap='cividis') + # sm.set_array([]) # dummy array for the mappable + + # # --- loop over pxuv values, plotting each line --- + # for pxuv in pxuv_vals: + # color = cm.cividis(norm(pxuv)) + # sns.histplot( + # data=grouped_data['P_surf_per_escape.zephyrus.Pxuv'][pxuv], + # bins=30, + # kde=True, + # kde_kws={'bw_adjust': 0.1}, + # stat="density", + # element="step", + # cumulative=True, + # fill=False, + # linewidth=1.5, + # color=color, + # ax=ax + # ) + + # # --- labels --- + # ax.set_xlabel("Surface pressure [bar]") + # ax.set_ylabel("Cumulative density") + + # # --- move y-axis ticks & label to the right --- + # # ax.yaxis.tick_right() + # # ax.yaxis.set_label_position("right") + # # ax.tick_params(axis='y', labelright=True, labelleft=False) + + # # --- add the colorbar --- + # cbar = fig.colorbar(sm, ax=ax, pad=0.02) + # cbar.set_label("Pxuv value") + + # # --- save or show --- + # plt.tight_layout() + # plt.savefig(plots_path + "test_plot_P_surf_per_Pxuv.png", dpi=300) + + # # Single plots + # param_label_map = { + # "orbit.semimajoraxis": "Semi-major axis [AU]", + # "escape.zephyrus.Pxuv": r"$P_{XUV}$ [bar]", + # "escape.zephyrus.efficiency": r"Escape efficiency factor, $\epsilon$", + # "outgas.fO2_shift_IW":r"$log_{10}$($fO_2$) [$\Delta$ IW]", + # #"atmos_clim.module": "Atmospheric module", + # "delivery.elements.CH_ratio": "C/H ratio", + # "delivery.elements.H_oceans": "[H] [oceans]", + # "star.mass": r"Stellar mass [M$_\odot$]"} + # output_label_map = { + # "solidification_time": "Solidification time [yr]", + # "esc_rate_total": "Total escape rate [kg/s]", + # "Phi_global": "Melt fraction [%]", + # "P_surf": "Surface pressure [bar]", + # "atm_kg_per_mol": "Atmospheric mass [kg/mol]"} + # colormaps_by_param = { + # "orbit.semimajoraxis": cm.plasma, + # "escape.zephyrus.Pxuv": cm.cividis, + # "escape.zephyrus.efficiency": cm.spring, + # "outgas.fO2_shift_IW": cm.coolwarm, + # "delivery.elements.CH_ratio":cm.copper, + # #"atmos_clim.module": cm.Dark2, + # "delivery.elements.H_oceans": cm.winter, + # #"star.mass": cm.RdYlBu + # } + + # log_scale_grid_params = ["escape.zephyrus.Pxuv"] + + # generate_single_plots( + # extracted_outputs=extracted_outputs, + # grouped_data=grouped_data, + # grid_params=grid_params, + # plots_path=plots_path, + # param_label_map=param_label_map, + # colormaps_by_param=colormaps_by_param, + # output_label_map=output_label_map, + # log_scale_grid_params=log_scale_grid_params, + # log_x=True, + # plot_hist=True, + # plot_kde=True, + # cumulative=True, + # bins=100) # generate_grid_plot( # extracted_outputs=extracted_outputs, @@ -735,6 +849,6 @@ def generate_single_plots(extracted_outputs, grouped_data, grid_params, plots_pa # output_label_map=output_label_map, # log_scale_grid_params=log_scale_grid_params) - print('-----------------------------------------------------') - print("All plots completed. Let's do some analyse now :) !") - print('-----------------------------------------------------') + # print('-----------------------------------------------------') + # print("All plots completed. Let's do some analyse now :) !") + # print('-----------------------------------------------------') From 2abe16984a95d08a6f95b7642ea43a3a18d9a991 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Sun, 11 May 2025 19:40:11 +0200 Subject: [PATCH 022/105] try to fix error log : now using ecdfplot for a single plot and its working --- tools/post_processing_grid/plot_grid.py | 72 ++++++++++++++----- .../post_processing_grid.py | 2 +- 2 files changed, 54 insertions(+), 20 deletions(-) diff --git a/tools/post_processing_grid/plot_grid.py b/tools/post_processing_grid/plot_grid.py index fbe85ccd3..b7c43382e 100644 --- a/tools/post_processing_grid/plot_grid.py +++ b/tools/post_processing_grid/plot_grid.py @@ -5,6 +5,7 @@ import pandas as pd import numpy as np import seaborn as sns +import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.colors as mcolors import matplotlib.cm as cm @@ -669,7 +670,7 @@ def generate_single_plots(extracted_outputs, grouped_data, grid_params, plots_pa if __name__ == '__main__': # User needs to specify paths - grid_name = 'escape_grid_habrok_5_params_a_0.1_1Msun_agni' + grid_name = 'escape_grid_habrok_7_params_1Msun' data_dir = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_processed_data/{grid_name}/' plots_path = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_plots/{grid_name}/' @@ -678,8 +679,8 @@ def generate_single_plots(extracted_outputs, grouped_data, grid_params, plots_pa plot_dir_exists(plots_path) # Check if the plot directory exists. If not, create it. grouped_data = group_output_by_parameter(df, grid_params, extracted_outputs) # Group extracted outputs by grid parameters - ## Plots - #plot_grid_status(df, plots_path) # Plot the grid status in an histogram + # # Plots + # plot_grid_status(df, plots_path) # Plot the grid status in an histogram ############## TEST PLOT ############## # # Extract the grouped dict @@ -723,23 +724,56 @@ def generate_single_plots(extracted_outputs, grouped_data, grid_params, plots_pa # plt.savefig(plots_path + "test_plot_P_surf_per_Pxuv.png", dpi=300) # plt.close() - plt.figure(figsize=(10, 6)) - for pxuv in grid_params['escape.zephyrus.Pxuv'] : - sns.histplot(data=np.array(grouped_data['P_surf_per_escape.zephyrus.Pxuv'][pxuv]), - bins=5, - kde=True, - kde_kws={'bw_adjust': 0.5}, - stat="density", - element="step", - cumulative=False, - log_scale=False, - fill=False, - linewidth=1.5, - color=cm.cividis(pxuv) +###### PRINT TO TEST/VERIFY DATA ###### + # print('Phi_global :') + # print(df['Phi_global'].describe()) + # print('Solidification time :') + # print(df['solidification_time'].describe()) + # print('Escape rate :') + # print(df['esc_rate_total'].describe()) + # print('P_surf :') + # print(df['P_surf'].describe()) + # print(df) + # print(df.columns) # Print the columns of the DataFrame + #print(grouped_data.keys()) # Print the keys of the grouped data + # print(len(grouped_data['solidification_time_per_delivery.elements.H_oceans'][1.0])) + # print(len(grouped_data['solidification_time_per_delivery.elements.H_oceans'][5.0])) + # print(len(grouped_data['solidification_time_per_delivery.elements.H_oceans'][10.0])) +################################### + + + elements = grid_params['delivery.elements.H_oceans'] + # Colorbar setup + norm = mpl.colors.Normalize(vmin=min(elements), vmax=max(elements)) + cmap = cm.winter + + # Figure setup + fig, ax = plt.subplots(figsize=(10, 6)) + for param in grid_params['delivery.elements.H_oceans'] : + sns.ecdfplot(data=np.array(grouped_data['solidification_time_per_delivery.elements.H_oceans'][param]), + log_scale=True, + stat="proportion", + color=cmap(norm(param)), + linewidth=2, + ax=ax ) - plt.xlabel("Surface pressure [bar]") - plt.ylabel("Empirical cumulative density") - plt.savefig(plots_path + "test_plot_P_surf_per_Pxuv.png", dpi=300) + + # Name of the axis + ax.set_xlabel("Solidification time [yr]", fontsize=14) + ax.set_ylabel("Normalized cumulative fraction of simulations", fontsize=14) + # Grid + ax.grid(alpha=0.1) + # Colorbar setup + sm = mpl.cm.ScalarMappable(cmap=cmap, norm=norm) + sm.set_array([]) + cbar = fig.colorbar(sm, ax=ax, pad=0.02, aspect=30) + cbar.set_label("[H] [oceans]", fontsize=14) + cbar.set_ticks(elements) + # Save the figure + plt.tight_layout() + plt.savefig(plots_path + "test/" + "test_plot_ecdf_solidfication_time_per_delivery.elements.H_oceans.png", dpi=300) + + print(grid_params.keys()) # grouped_data = { # 'P_surf_per_escape.zephyrus.Pxuv': { diff --git a/tools/post_processing_grid/post_processing_grid.py b/tools/post_processing_grid/post_processing_grid.py index 4c5ee6f55..3da274164 100644 --- a/tools/post_processing_grid/post_processing_grid.py +++ b/tools/post_processing_grid/post_processing_grid.py @@ -368,7 +368,7 @@ def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dic if __name__ == '__main__': # User needs to specify paths - grid_name = 'escape_grid_habrok_5_params_a_0.1_1Msun_agni' + grid_name = 'escape_grid_habrok_7_params_1Msun' grid_path = f'/home2/p315557/PROTEUS/output/scratch/{grid_name}/' data_dir = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_processed_data/{grid_name}/' From d116058e7b1b2e5a2ec0ae9f44f9e0a9a7a1a51f Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Sun, 11 May 2025 21:03:35 +0200 Subject: [PATCH 023/105] clean a bit plot_grid.py --- tools/post_processing_grid/plot_grid.py | 105 ++---------------------- 1 file changed, 6 insertions(+), 99 deletions(-) diff --git a/tools/post_processing_grid/plot_grid.py b/tools/post_processing_grid/plot_grid.py index b7c43382e..890ab8035 100644 --- a/tools/post_processing_grid/plot_grid.py +++ b/tools/post_processing_grid/plot_grid.py @@ -683,48 +683,8 @@ def generate_single_plots(extracted_outputs, grouped_data, grid_params, plots_pa # plot_grid_status(df, plots_path) # Plot the grid status in an histogram ############## TEST PLOT ############## - # # Extract the grouped dict - # grouped_values = grouped_data['P_surf_per_escape.zephyrus.Pxuv'] - - # # Set up colormap and normalization - # colormap = cm.cividis - # param_vals = sorted(grouped_values.keys()) - # norm = plt.Normalize(vmin=min(param_vals), vmax=max(param_vals)) - - # # Create a single plot with all curves - # fig, ax = plt.subplots(figsize=(7, 5)) - - # for pxuv_val in param_vals: - # values = grouped_values[pxuv_val].dropna().values # Clean NaNs - # if len(values) == 0: - # continue # Skip empty - - # plot_hist_kde( - # values=values, - # ax=ax, - # color=colormap(norm(pxuv_val)), - # plot_hist=True, - # plot_kde=True, - # cumulative=True, - # log_x=False, # Log-scale for P_surf - # bins=10 - # ) - - # # Add colorbar for Pxuv mapping - # sm = plt.cm.ScalarMappable(cmap=colormap, norm=norm) - # sm.set_array([]) - # cbar = plt.colorbar(sm, ax=ax) - # cbar.set_label(r"$P_{XUV}$ [bar]") - - # ax.set_ylabel("Cumulative density") - # ax.set_xlabel("Surface pressure [bar]") - # ax.set_title("Cumulative P_surf distributions for different $P_{XUV}$ values") - - # plt.tight_layout() - # plt.savefig(plots_path + "test_plot_P_surf_per_Pxuv.png", dpi=300) - # plt.close() - -###### PRINT TO TEST/VERIFY DATA ###### + + ###### PRINT TO TEST/VERIFY DATA ###### # print('Phi_global :') # print(df['Phi_global'].describe()) # print('Solidification time :') @@ -739,7 +699,7 @@ def generate_single_plots(extracted_outputs, grouped_data, grid_params, plots_pa # print(len(grouped_data['solidification_time_per_delivery.elements.H_oceans'][1.0])) # print(len(grouped_data['solidification_time_per_delivery.elements.H_oceans'][5.0])) # print(len(grouped_data['solidification_time_per_delivery.elements.H_oceans'][10.0])) -################################### + ################################### elements = grid_params['delivery.elements.H_oceans'] @@ -749,8 +709,8 @@ def generate_single_plots(extracted_outputs, grouped_data, grid_params, plots_pa # Figure setup fig, ax = plt.subplots(figsize=(10, 6)) - for param in grid_params['delivery.elements.H_oceans'] : - sns.ecdfplot(data=np.array(grouped_data['solidification_time_per_delivery.elements.H_oceans'][param]), + for value_init_param in grid_params['delivery.elements.H_oceans'] : + sns.ecdfplot(data=np.array(grouped_data['solidification_time_per_delivery.elements.H_oceans'][value_init_param]), log_scale=True, stat="proportion", color=cmap(norm(param)), @@ -775,60 +735,7 @@ def generate_single_plots(extracted_outputs, grouped_data, grid_params, plots_pa print(grid_params.keys()) - # grouped_data = { - # 'P_surf_per_escape.zephyrus.Pxuv': { - # 1e-05: np.random.lognormal(mean=0, sigma=1, size=500), - # 0.01: np.random.lognormal(mean=1, sigma=1, size=500), - # 10.0: np.random.lognormal(mean=2, sigma=1, size=500) - # } - # } - # pxuv_vals = [1e-05, 0.01, 10.0] - - # # --- figure + axis --- - # fig, ax = plt.subplots(figsize=(10, 6)) - - # # --- set up normalization + ScalarMappable for the colorbar --- - # norm = mcolors.LogNorm(vmin=min(pxuv_vals), vmax=max(pxuv_vals)) - # # if you want linear scaling, use Normalize instead: - # # norm = mcolors.Normalize(vmin=min(pxuv_vals), vmax=max(pxuv_vals)) - - # sm = cm.ScalarMappable(norm=norm, cmap='cividis') - # sm.set_array([]) # dummy array for the mappable - - # # --- loop over pxuv values, plotting each line --- - # for pxuv in pxuv_vals: - # color = cm.cividis(norm(pxuv)) - # sns.histplot( - # data=grouped_data['P_surf_per_escape.zephyrus.Pxuv'][pxuv], - # bins=30, - # kde=True, - # kde_kws={'bw_adjust': 0.1}, - # stat="density", - # element="step", - # cumulative=True, - # fill=False, - # linewidth=1.5, - # color=color, - # ax=ax - # ) - - # # --- labels --- - # ax.set_xlabel("Surface pressure [bar]") - # ax.set_ylabel("Cumulative density") - - # # --- move y-axis ticks & label to the right --- - # # ax.yaxis.tick_right() - # # ax.yaxis.set_label_position("right") - # # ax.tick_params(axis='y', labelright=True, labelleft=False) - - # # --- add the colorbar --- - # cbar = fig.colorbar(sm, ax=ax, pad=0.02) - # cbar.set_label("Pxuv value") - - # # --- save or show --- - # plt.tight_layout() - # plt.savefig(plots_path + "test_plot_P_surf_per_Pxuv.png", dpi=300) - +################################### # # Single plots # param_label_map = { # "orbit.semimajoraxis": "Semi-major axis [AU]", From 35779eacfc27d9761c71b1e2835c0349fdd7965f Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Sun, 11 May 2025 22:41:03 +0200 Subject: [PATCH 024/105] plot for solidification time ok save before looping on all output --- tools/post_processing_grid/plot_grid.py | 189 +++++++++++++++++------- 1 file changed, 134 insertions(+), 55 deletions(-) diff --git a/tools/post_processing_grid/plot_grid.py b/tools/post_processing_grid/plot_grid.py index 890ab8035..74531f92a 100644 --- a/tools/post_processing_grid/plot_grid.py +++ b/tools/post_processing_grid/plot_grid.py @@ -10,7 +10,8 @@ import matplotlib.colors as mcolors import matplotlib.cm as cm import matplotlib.ticker as ticker -from matplotlib.ticker import LogFormatterMathtext +from matplotlib.ticker import FixedLocator, LogFormatterMathtext +from matplotlib.colors import BoundaryNorm def load_extracted_data(data_path : str | Path, grid_name :str): @@ -682,60 +683,6 @@ def generate_single_plots(extracted_outputs, grouped_data, grid_params, plots_pa # # Plots # plot_grid_status(df, plots_path) # Plot the grid status in an histogram - ############## TEST PLOT ############## - - ###### PRINT TO TEST/VERIFY DATA ###### - # print('Phi_global :') - # print(df['Phi_global'].describe()) - # print('Solidification time :') - # print(df['solidification_time'].describe()) - # print('Escape rate :') - # print(df['esc_rate_total'].describe()) - # print('P_surf :') - # print(df['P_surf'].describe()) - # print(df) - # print(df.columns) # Print the columns of the DataFrame - #print(grouped_data.keys()) # Print the keys of the grouped data - # print(len(grouped_data['solidification_time_per_delivery.elements.H_oceans'][1.0])) - # print(len(grouped_data['solidification_time_per_delivery.elements.H_oceans'][5.0])) - # print(len(grouped_data['solidification_time_per_delivery.elements.H_oceans'][10.0])) - ################################### - - - elements = grid_params['delivery.elements.H_oceans'] - # Colorbar setup - norm = mpl.colors.Normalize(vmin=min(elements), vmax=max(elements)) - cmap = cm.winter - - # Figure setup - fig, ax = plt.subplots(figsize=(10, 6)) - for value_init_param in grid_params['delivery.elements.H_oceans'] : - sns.ecdfplot(data=np.array(grouped_data['solidification_time_per_delivery.elements.H_oceans'][value_init_param]), - log_scale=True, - stat="proportion", - color=cmap(norm(param)), - linewidth=2, - ax=ax - ) - - # Name of the axis - ax.set_xlabel("Solidification time [yr]", fontsize=14) - ax.set_ylabel("Normalized cumulative fraction of simulations", fontsize=14) - # Grid - ax.grid(alpha=0.1) - # Colorbar setup - sm = mpl.cm.ScalarMappable(cmap=cmap, norm=norm) - sm.set_array([]) - cbar = fig.colorbar(sm, ax=ax, pad=0.02, aspect=30) - cbar.set_label("[H] [oceans]", fontsize=14) - cbar.set_ticks(elements) - # Save the figure - plt.tight_layout() - plt.savefig(plots_path + "test/" + "test_plot_ecdf_solidfication_time_per_delivery.elements.H_oceans.png", dpi=300) - - print(grid_params.keys()) - -################################### # # Single plots # param_label_map = { # "orbit.semimajoraxis": "Semi-major axis [AU]", @@ -793,3 +740,135 @@ def generate_single_plots(extracted_outputs, grouped_data, grid_params, plots_pa # print('-----------------------------------------------------') # print("All plots completed. Let's do some analyse now :) !") # print('-----------------------------------------------------') + + ############## TEST PLOT ############## + + ###### PRINT TO TEST/VERIFY DATA ###### + # print('Phi_global :') + # print(df['Phi_global'].describe()) + # print('Solidification time :') + # print(df['solidification_time'].describe()) + # print('Escape rate :') + # print(df['esc_rate_total'].describe()) + # print('P_surf :') + # print(df['P_surf'].describe()) + # print(df) + # print(df.columns) # Print the columns of the DataFrame + #print(grouped_data.keys()) # Print the keys of the grouped data + # print(len(grouped_data['solidification_time_per_delivery.elements.H_oceans'][1.0])) + # print(len(grouped_data['solidification_time_per_delivery.elements.H_oceans'][5.0])) + # print(len(grouped_data['solidification_time_per_delivery.elements.H_oceans'][10.0])) + ################################### + + + # elements = grid_params['delivery.elements.H_oceans'] + # # Colorbar setup + # norm = mpl.colors.Normalize(vmin=min(elements), vmax=max(elements)) + # cmap = cm.winter + + # # Figure setup + # fig, ax = plt.subplots(figsize=(10, 6)) + # for value_init_param in grid_params['delivery.elements.H_oceans'] : + # sns.ecdfplot(data=np.array(grouped_data['solidification_time_per_delivery.elements.H_oceans'][value_init_param]), + # log_scale=True, + # stat="proportion", + # color=cmap(norm(param)), + # linewidth=2, + # ax=ax + # ) + + # # Name of the axis + # ax.set_xlabel("Solidification time [yr]", fontsize=14) + # ax.set_ylabel("Normalized cumulative fraction of simulations", fontsize=14) + # # Grid + # ax.grid(alpha=0.1) + # # Colorbar setup + # sm = mpl.cm.ScalarMappable(cmap=cmap, norm=norm) + # sm.set_array([]) + # cbar = fig.colorbar(sm, ax=ax, pad=0.02, aspect=30) + # cbar.set_label("[H] [oceans]", fontsize=14) + # cbar.set_ticks(elements) + # # Save the figure + # plt.tight_layout() + # plt.savefig(plots_path + "test/" + "test_plot_ecdf_solidfication_time_per_delivery.elements.H_oceans.png", dpi=300) + +################################# + + param_settings = { + "orbit.semimajoraxis": {"label": "Semi-major axis [AU]", "colormap": cm.plasma, "log_scale": False}, + "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, + "escape.zephyrus.efficiency": {"label": r"Escape efficiency factor $\epsilon$", "colormap": cm.spring, "log_scale": False}, + "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2)$ [IW]", "colormap": cm.coolwarm, "log_scale": False}, + "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.Dark2, "log_scale": False}, + "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, + "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": False}} + + + for param_name, settings in param_settings.items(): + tested_param = grid_params[param_name] + if len(tested_param) <= 1: + continue + + # Extract the label and colormap from the settings + param_label = settings["label"] + cmap = settings["colormap"] + do_log_color = settings.get("log_scale", False) + + # Check if the parameter is numeric + is_numeric = np.issubdtype(np.array(tested_param).dtype, np.number) + if is_numeric: + if do_log_color: + norm = mpl.colors.LogNorm(vmin=min(tested_param), vmax=max(tested_param)) + else: + norm = mpl.colors.Normalize(vmin=min(tested_param), vmax=max(tested_param)) + color_func = lambda v: cmap(norm(v)) + colorbar_needed = True + else: + unique_vals = list(sorted(set(tested_param))) + cmap = mpl.cm.get_cmap(cmap, len(unique_vals)) + color_map = {val: cmap(i) for i, val in enumerate(unique_vals)} + color_func = lambda val: color_map[val] + colorbar_needed = False + # Figure + fig, ax = plt.subplots(figsize=(10, 6)) + for val in tested_param: + data_key = f'solidification_time_per_{param_name}' + if val not in grouped_data[data_key]: + continue + sns.ecdfplot( + data=np.array(grouped_data[data_key][val]), + log_scale=True, + stat="proportion", + color=color_func(val), + linewidth=3, + ax=ax) + ax.set_xlabel("Solidification time [yr]", fontsize=14) + ax.set_ylabel("Normalized cumulative fraction of simulations", fontsize=14) + ax.grid(alpha=0.1) + + # Create the colorbar or legend depending on the tested parameter + if colorbar_needed: + sm = mpl.cm.ScalarMappable(cmap=cmap, norm=norm) + cbar = fig.colorbar(sm, ax=ax, pad=0.02, aspect=30) + cbar.set_label(settings["label"], fontsize=14) + if do_log_color: + tick_values = sorted(set(tested_param)) + cbar.set_ticks(tick_values) + #cbar.ax.yaxis.set_major_locator(FixedLocator(tick_values)) + #cbar.ax.yaxis.set_major_formatter(LogFormatterMathtext(base=10)) + #cbar.update_ticks() + else: + cbar.set_ticks(sorted(set(tested_param))) + else: + handles = [mpl.lines.Line2D([0], [0], color=color_map[val], lw=3, label=str(val)) for val in unique_vals] + ax.legend(handles=handles, loc='lower right') + + # Save the plot + filename = f"ecdf_solidification_time_per_{param_name.replace('.', '_')}_normalized.png" + output_path = os.path.join(plots_path, "ecdf_by_param") + os.makedirs(output_path, exist_ok=True) + plt.tight_layout() + plt.savefig(os.path.join(output_path, filename), dpi=300) + plt.close() + +print(extracted_outputs) \ No newline at end of file From 551d0cf5e286c3b5657acc7acdba73cbeaa1d410 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Sun, 11 May 2025 23:21:39 +0200 Subject: [PATCH 025/105] Single plots for all input param and output OK --- tools/post_processing_grid/plot_grid.py | 678 +++--------------------- 1 file changed, 82 insertions(+), 596 deletions(-) diff --git a/tools/post_processing_grid/plot_grid.py b/tools/post_processing_grid/plot_grid.py index 74531f92a..109d6d063 100644 --- a/tools/post_processing_grid/plot_grid.py +++ b/tools/post_processing_grid/plot_grid.py @@ -256,418 +256,6 @@ def plot_grid_status(cases_data, plot_dir: Path, status_colors: dict = None): print(f"Grid status histogram successfully saved to {output_path}") -def plot_hist_kde(values, ax, color, plot_hist=True, plot_kde=True, cumulative=True, log_x=False, bins=100, bw_adjust=0.3, hist_element="step", kde_kwargs={}, hist_kwargs={}): - """ - Plot a cumulative or non-cumulative histograms and/or KDE curves on the given x-axis. - - Parameters - ---------- - values : array-like - Data points on the x-axis used to calculate the histogram/KDE. - ax : matplotlib.axes.Axes - Axis to draw the plot on. - color : str - Color for both histogram and KDE. - plot_hist : bool - Whether to plot a histogram. - plot_kde : bool - Whether to plot a KDE curve. - cumulative : bool - Whether to plot the cumulative distribution. - log_x : bool - Whether to use a logarithmic x-axis. - bw_adjust : float - Bandwidth adjustment for KDE. - bins : int - Number of bins for the histogram. - hist_element : str - 'step' or 'bars' for histogram style. - kde_kwargs : dict - Additional kwargs for sns.kdeplot. - hist_kwargs : dict - Additional kwargs for sns.histplot. - """ - - # Apply log transform if needed - if log_x: - values = np.log10(values) - - if plot_hist: - sns.histplot( - values, - bins=bins, - cumulative=cumulative, - stat="density", - element=hist_element, - color=color, - ax=ax, - kde=False, - **hist_kwargs - ) - - if plot_kde: - sns.kdeplot( - values, - cumulative=cumulative, - bw_adjust=bw_adjust, - clip=(np.min(values), np.max(values)), - common_grid=True, - color=color, - ax=ax, - **kde_kwargs - ) - - # Set axis labels and formatting - if log_x: - # Keep axis in linear scale (values already log-transformed), - # but show ticks as powers of 10 - ax.set_xlabel("log10(x)") - ax.set_xticks(np.log10(ticks := np.geomspace(np.nanmin(10**values), np.nanmax(10**values), num=5))) - ax.set_xticklabels([f"{int(tick):.0e}" for tick in ticks]) - else: - ax.set_xscale('linear') - -def safe_log_formatter(x, _): - try: - if x > 0: - return r'$10^{{{}}}$'.format(int(np.log10(x))) - else: - return '' - except Exception: - return '' - -def plot_distributions(data_dict, xlabel, ylabel, colormap, vmin=None, vmax=None, key_label="Parameter", ax=None, save_path=None, bw_adjust=0.3, log_x=True, tick_values=None, norm=None, plot_hist=True, plot_kde=True, cumulative=True, bins=100): - """ - Plot cumulative KDE curves for one of the output parameters of the grid (like esc_rate_total, solidification_time) on the x-axis. - The different curves correspond to a input parameter from the grid with a color mapped on the right side of the plot. - - Parameters - ---------- - data_dict : dict - Dictionary of datasets, where each key represents a unique variable (e.g., semi-major axis), - and each value is a list or array of values (e.g., solidification times). - - xlabel : str - Label for the x-axis. - - ylabel : str - Label for the y-axis. - - cmap : matplotlib.colors.Colormap - The colormap used to assign colors to each dataset based on their corresponding key value. - - vmin : float - The minimum value of the key variable to normalize the colormap. - - vmax : float - The maximum value of the key variable to normalize the colormap. - - ax : matplotlib.axes.Axes, optional - The axis to plot on. If None, a new figure and axis will be created. - - save_path : str, optional - Path to save the generated plot. If None, the plot will not be saved. - - bw_adjust : float, optional, default=0.3 - Bandwidth adjustment factor : controls the smoothness of the KDE. - Smaller values make the KDE more sensitive to data, while larger values smooth it out. - - log_x : bool, optional - If True, sets the x-axis to logarithmic scale. - - - tick_values : list of float, optional - Values to use as ticks on the colorbar. Useful for discrete parameter steps. - - - Returns - ------- - '""" - - if ax is None: - fig, ax = plt.subplots(figsize=(10, 6)) - else: - fig = ax.figure - - keys = sorted(data_dict.keys()) - if vmin is None: - vmin = min(keys) - if vmax is None: - vmax = max(keys) - - # Use provided or default norm - if norm is None: - norm = mcolors.Normalize(vmin=vmin, vmax=vmax) - sm = plt.cm.ScalarMappable(cmap=colormap, norm=norm) - - # Plot each group - for key in keys: - values = np.array(data_dict[key]) - - # Filter out invalid values for log scale - values = values[np.isfinite(values)] # Remove inf and NaN - if log_x: - values = values[values > 0] # Remove zero and negatives - values = np.log10(values) - - if len(values) < 2: - print(f"Skipping key {key} due to insufficient valid data points.") - continue - - color = colormap(norm(key)) - - plot_hist_kde(values=values, ax=ax, color=color, plot_hist=plot_hist, plot_kde=plot_kde, cumulative=cumulative, log_x=log_x, bins=bins, bw_adjust=0.3, hist_element="step", kde_kwargs={}, hist_kwargs={}) - - ax.set_xlabel(f"log10({xlabel})" if log_x else xlabel, fontsize=12) - ax.set_ylabel(ylabel, fontsize=12) - ax.set_ylim(0, 1.02) - ax.grid(alpha=0.2) - - # Colorbar setup - sm.set_array(np.linspace(vmin, vmax, 100)) - cbar = fig.colorbar(sm, ax=ax) - cbar.set_label(key_label, fontsize=12) - - # Use grid param values as colorbar ticks - if tick_values is not None: - cbar.set_ticks(tick_values) - if isinstance(norm, mcolors.LogNorm): - # Format log ticks with LaTeX-style math text (e.g., 10⁻³) - cbar.set_ticklabels([f"$10^{{{int(np.log10(t))}}}$" for t in tick_values]) - cbar.ax.yaxis.set_major_formatter(LogFormatterMathtext()) - else: - # Linear scale ticks - cbar.set_ticklabels([str(t) for t in tick_values]) - - if save_path: - plt.savefig(save_path, dpi=300) - plt.close(fig) - - - -def generate_single_plots(extracted_outputs, grouped_data, grid_params, plots_path, param_label_map, colormaps_by_param, output_label_map, log_scale_grid_params, log_x=True, plot_hist=True, plot_kde=True, cumulative=True, bins=100): - """ - Generate and save normalized cumulative distribution plots for each output vs. grid parameter. - - Parameters: - ---------- - extracted_outputs : list of str - List of extracted output quantities to plot. - - grouped_data : dict - Data for each output/parameter pair. (like solidification_time_per_semimajoraxis) - - grid_params : dict - Parameter values used in the grid. - - plots_path : str - Directory where plots will be saved. - - param_label_map : dict - Dictionary containing the label of the grid parameter for the plot. - - colormaps_by_param : dict - Dictionary containing the colormap to use for each grid parameter for the plot. - - output_label_map : dict, optional - Dictionary containing the label of the extracted output quantity for the plot. - - log_scale_grid_params : list of str, optional - Parameters to use log scale for colormap normalization. (like escape.zephyrus.Pxuv) - """ - - if param_label_map is None: - raise ValueError("param_label_map must be provided.") - - if colormaps_by_param is None: - raise ValueError("colormaps_by_param must be provided.") - - if output_label_map is None: - raise ValueError("output_label_map must be provided.") - - if log_scale_grid_params is None: - log_scale_grid_params = [] - - for output_name in extracted_outputs: - for param, cmap in colormaps_by_param.items(): - data_key = f"{output_name}_per_{param}" - - if data_key not in grouped_data: - print(f"WARNING: Skipping {data_key} — not found in grouped_data") - continue - - data_dict_raw = grouped_data[data_key] - data_dict = data_dict_raw.copy() - - if output_name == "Phi_global": - data_dict = {k: [v_i * 100 for v_i in v] for k, v in data_dict.items()} - - tick_values = grid_params.get(param) - vmin = min(tick_values) - vmax = max(tick_values) - - xlabel = output_label_map.get(output_name, output_name.replace("_", " ").title()) - ylabel = "Normalized cumulative fraction of simulations" - key_label = param_label_map.get(param, param.replace("_", " ").title()) - - save_dir = Path(plots_path) / 'single_plot' - plot_dir_exists(save_dir) - - save_name = f"cumulative_{output_name}_vs_{param.replace('.', '_')}.png" - save_path = save_dir / save_name - - fig, ax = plt.subplots(figsize=(10, 6)) - - norm = ( - mcolors.LogNorm(vmin=vmin, vmax=vmax) - if param in log_scale_grid_params - else mcolors.Normalize(vmin=vmin, vmax=vmax) - ) - - df = pd.DataFrame(data_dict) - keys = list(df.columns) - - plot_distributions( - data_dict=data_dict, - xlabel=xlabel, - ylabel=ylabel, - colormap=cmap, - vmin=vmin, - vmax=vmax, - key_label=key_label, - log_x=log_x, - tick_values=tick_values, - save_path=save_path, - norm=norm, - ax=ax, - bw_adjust=0.3, - plot_hist=plot_hist, - plot_kde=plot_kde, - cumulative=cumulative, - bins=bins - ) - plt.close(fig) - - print("All single plots saved.") - -# def generate_grid_plot(extracted_outputs, grouped_data, grid_params, plots_path, param_label_map, colormaps_by_param, output_label_map, log_scale_grid_params, log_x=True, bins=100): -# """ -# Generate and save normalized cumulative distribution plots for each output vs. grid parameters. -# This creates a subplot where each column corresponds to one extracted output, and each row corresponds to one grid parameter. - -# Parameters: -# ---------- -# extracted_outputs : list of str -# List of extracted output quantities to plot. - -# grouped_data : dict -# Data for each output/parameter pair. (like solidification_time_per_semimajoraxis) - -# grid_params : dict -# Parameter values used in the grid. - -# plots_path : str -# Directory where plots will be saved. - -# param_label_map : dict -# Dictionary containing the label of the grid parameter for the plot. - -# colormaps_by_param : dict -# Dictionary containing the colormap to use for each grid parameter for the plot. - -# output_label_map : dict, optional -# Dictionary containing the label of the extracted output quantity for the plot. - -# log_scale_grid_params : list of str, optional -# Parameters to use log scale for colormap normalization. (like escape.zephyrus.Pxuv) -# """ - -# if param_label_map is None: -# raise ValueError("param_label_map must be provided.") -# if colormaps_by_param is None: -# raise ValueError("colormaps_by_param must be provided.") -# if output_label_map is None: -# raise ValueError("output_label_map must be provided.") -# if log_scale_grid_params is None: -# log_scale_grid_params = [] - -# num_cols = len(extracted_outputs) -# num_rows = len(grid_params) - -# # Create subplots with the appropriate layout -# fig, axes = plt.subplots(num_rows, num_cols, figsize=(num_cols * 16, num_rows * 12)) -# plt.subplots_adjust(hspace=0.15, wspace=0.15) - -# if num_rows == 1: -# axes = np.expand_dims(axes, axis=0) # Make sure it's 2D if only one row. - -# for i, output_name in enumerate(extracted_outputs): -# for j, (param, cmap) in enumerate(colormaps_by_param.items()): -# data_key = f"{output_name}_per_{param}" - -# if data_key not in grouped_data: -# print(f"WARNING: Skipping {data_key} — not found in grouped_data") -# continue - -# data_dict_raw = grouped_data[data_key] -# data_dict = data_dict_raw.copy() - -# if output_name == "Phi_global": -# data_dict = {k: [v_i * 100 for v_i in v] for k, v in data_dict.items()} - -# tick_values = grid_params.get(param) -# vmin = min(tick_values) -# vmax = max(tick_values) - -# xlabel = output_label_map.get(output_name, output_name.replace("_", " ").title()) -# ylabel = "Normalized cumulative fraction of simulations" -# key_label = param_label_map.get(param, param.replace("_", " ").title()) - -# # Select the axis for this subplot -# ax = axes[j, i] - -# norm = ( -# mcolors.LogNorm(vmin=vmin, vmax=vmax) -# if param in log_scale_grid_params -# else mcolors.Normalize(vmin=vmin, vmax=vmax) -# ) - -# plot_kde_cumulative( -# data_dict=data_dict, -# xlabel=xlabel, -# ylabel=ylabel, -# cmap=cmap, -# vmin=vmin, -# vmax=vmax, -# key_label=key_label, -# log_x=log_x, -# tick_values=tick_values, -# save_path=None, # Don't save the plot yet -# norm=norm, -# ax=ax -# ) - -# # Set titles for each subplot -# if i == 0: -# ax.set_ylabel(ylabel, fontsize=12) -# if j == num_rows - 1: -# ax.set_xlabel(xlabel, fontsize=12) - -# # Customize plot with grid and ticks -# ax.grid(alpha=0.2) -# ax.set_ylim(0, 1.02) - -# # Save the complete subplot figure -# save_dir = Path(plots_path) / 'grid_plot' -# plot_dir_exists(save_dir) - -# save_name = "cumulative_grid_plot.png" -# save_path = save_dir / save_name -# plt.savefig(save_path, dpi=300) -# plt.close(fig) - -# print(f"All subplot plots saved to {save_path}") - if __name__ == '__main__': # User needs to specify paths @@ -680,195 +268,93 @@ def generate_single_plots(extracted_outputs, grouped_data, grid_params, plots_pa plot_dir_exists(plots_path) # Check if the plot directory exists. If not, create it. grouped_data = group_output_by_parameter(df, grid_params, extracted_outputs) # Group extracted outputs by grid parameters - # # Plots - # plot_grid_status(df, plots_path) # Plot the grid status in an histogram - - # # Single plots - # param_label_map = { - # "orbit.semimajoraxis": "Semi-major axis [AU]", - # "escape.zephyrus.Pxuv": r"$P_{XUV}$ [bar]", - # "escape.zephyrus.efficiency": r"Escape efficiency factor, $\epsilon$", - # "outgas.fO2_shift_IW":r"$log_{10}$($fO_2$) [$\Delta$ IW]", - # #"atmos_clim.module": "Atmospheric module", - # "delivery.elements.CH_ratio": "C/H ratio", - # "delivery.elements.H_oceans": "[H] [oceans]", - # "star.mass": r"Stellar mass [M$_\odot$]"} - # output_label_map = { - # "solidification_time": "Solidification time [yr]", - # "esc_rate_total": "Total escape rate [kg/s]", - # "Phi_global": "Melt fraction [%]", - # "P_surf": "Surface pressure [bar]", - # "atm_kg_per_mol": "Atmospheric mass [kg/mol]"} - # colormaps_by_param = { - # "orbit.semimajoraxis": cm.plasma, - # "escape.zephyrus.Pxuv": cm.cividis, - # "escape.zephyrus.efficiency": cm.spring, - # "outgas.fO2_shift_IW": cm.coolwarm, - # "delivery.elements.CH_ratio":cm.copper, - # #"atmos_clim.module": cm.Dark2, - # "delivery.elements.H_oceans": cm.winter, - # #"star.mass": cm.RdYlBu - # } - - # log_scale_grid_params = ["escape.zephyrus.Pxuv"] - - # generate_single_plots( - # extracted_outputs=extracted_outputs, - # grouped_data=grouped_data, - # grid_params=grid_params, - # plots_path=plots_path, - # param_label_map=param_label_map, - # colormaps_by_param=colormaps_by_param, - # output_label_map=output_label_map, - # log_scale_grid_params=log_scale_grid_params, - # log_x=True, - # plot_hist=True, - # plot_kde=True, - # cumulative=True, - # bins=100) - - # generate_grid_plot( - # extracted_outputs=extracted_outputs, - # grouped_data=grouped_data, - # grid_params=grid_params, - # plots_path=plots_path, - # param_label_map=param_label_map, - # colormaps_by_param=colormaps_by_param, - # output_label_map=output_label_map, - # log_scale_grid_params=log_scale_grid_params) - - # print('-----------------------------------------------------') - # print("All plots completed. Let's do some analyse now :) !") - # print('-----------------------------------------------------') - - ############## TEST PLOT ############## - - ###### PRINT TO TEST/VERIFY DATA ###### - # print('Phi_global :') - # print(df['Phi_global'].describe()) - # print('Solidification time :') - # print(df['solidification_time'].describe()) - # print('Escape rate :') - # print(df['esc_rate_total'].describe()) - # print('P_surf :') - # print(df['P_surf'].describe()) - # print(df) - # print(df.columns) # Print the columns of the DataFrame - #print(grouped_data.keys()) # Print the keys of the grouped data - # print(len(grouped_data['solidification_time_per_delivery.elements.H_oceans'][1.0])) - # print(len(grouped_data['solidification_time_per_delivery.elements.H_oceans'][5.0])) - # print(len(grouped_data['solidification_time_per_delivery.elements.H_oceans'][10.0])) - ################################### - - - # elements = grid_params['delivery.elements.H_oceans'] - # # Colorbar setup - # norm = mpl.colors.Normalize(vmin=min(elements), vmax=max(elements)) - # cmap = cm.winter - - # # Figure setup - # fig, ax = plt.subplots(figsize=(10, 6)) - # for value_init_param in grid_params['delivery.elements.H_oceans'] : - # sns.ecdfplot(data=np.array(grouped_data['solidification_time_per_delivery.elements.H_oceans'][value_init_param]), - # log_scale=True, - # stat="proportion", - # color=cmap(norm(param)), - # linewidth=2, - # ax=ax - # ) - - # # Name of the axis - # ax.set_xlabel("Solidification time [yr]", fontsize=14) - # ax.set_ylabel("Normalized cumulative fraction of simulations", fontsize=14) - # # Grid - # ax.grid(alpha=0.1) - # # Colorbar setup - # sm = mpl.cm.ScalarMappable(cmap=cmap, norm=norm) - # sm.set_array([]) - # cbar = fig.colorbar(sm, ax=ax, pad=0.02, aspect=30) - # cbar.set_label("[H] [oceans]", fontsize=14) - # cbar.set_ticks(elements) - # # Save the figure - # plt.tight_layout() - # plt.savefig(plots_path + "test/" + "test_plot_ecdf_solidfication_time_per_delivery.elements.H_oceans.png", dpi=300) - -################################# + # Plots + plot_grid_status(df, plots_path) # Plot the grid status in an histogram + # Single plot of all input parameters and extracted outputs +################################################### + # Define input parameter settings param_settings = { - "orbit.semimajoraxis": {"label": "Semi-major axis [AU]", "colormap": cm.plasma, "log_scale": False}, - "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, - "escape.zephyrus.efficiency": {"label": r"Escape efficiency factor $\epsilon$", "colormap": cm.spring, "log_scale": False}, - "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2)$ [IW]", "colormap": cm.coolwarm, "log_scale": False}, - "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.Dark2, "log_scale": False}, - "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, - "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": False}} - - - for param_name, settings in param_settings.items(): - tested_param = grid_params[param_name] - if len(tested_param) <= 1: - continue - - # Extract the label and colormap from the settings - param_label = settings["label"] - cmap = settings["colormap"] - do_log_color = settings.get("log_scale", False) - - # Check if the parameter is numeric - is_numeric = np.issubdtype(np.array(tested_param).dtype, np.number) - if is_numeric: - if do_log_color: - norm = mpl.colors.LogNorm(vmin=min(tested_param), vmax=max(tested_param)) - else: - norm = mpl.colors.Normalize(vmin=min(tested_param), vmax=max(tested_param)) - color_func = lambda v: cmap(norm(v)) - colorbar_needed = True - else: - unique_vals = list(sorted(set(tested_param))) - cmap = mpl.cm.get_cmap(cmap, len(unique_vals)) - color_map = {val: cmap(i) for i, val in enumerate(unique_vals)} - color_func = lambda val: color_map[val] - colorbar_needed = False - # Figure - fig, ax = plt.subplots(figsize=(10, 6)) - for val in tested_param: - data_key = f'solidification_time_per_{param_name}' - if val not in grouped_data[data_key]: + "orbit.semimajoraxis": {"label": "Semi-major axis [AU]", "colormap": cm.plasma, "log_scale": False}, + "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, + "escape.zephyrus.efficiency": {"label": r"Escape efficiency factor $\epsilon$", "colormap": cm.spring, "log_scale": False}, + "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2)$ [IW]", "colormap": cm.coolwarm, "log_scale": False}, + "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.Dark2, "log_scale": False}, + "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, + "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": False}} + + # Define extracted output settings + output_settings = { + 'esc_rate_total': {"label": "Total escape rate [kg/s]", "log_scale": True, "scale": 1.0}, + 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, + 'P_surf': {"label": "Surface pressure [bar]", "log_scale": True, "scale": 1.0}, + 'atm_kg_per_mol': {"label": "Atmospheric mass [kg/mol]", "log_scale": True, "scale": 1.0}, + 'solidification_time': {"label": "Solidification time [yr]", "log_scale": True, "scale": 1.0}} + + + # Loop over extracted outputs and input parameters + for output_name, out_settings in output_settings.items(): + for param_name, settings in param_settings.items(): + tested_param = grid_params[param_name] # Get the values of the input parameter + if len(tested_param) <= 1: continue - sns.ecdfplot( - data=np.array(grouped_data[data_key][val]), - log_scale=True, + # Extract plot settings for this input parameter + param_label = settings["label"] + cmap = settings["colormap"] + color_log = settings.get("log_scale", False) + # Extract plot settings for this output + x_label = out_settings["label"] + x_log = out_settings.get("log_scale", False) + scale = out_settings.get("scale", 1.0) + + # Determine colormap and color function if the parameter is numeric or string + is_numeric = np.issubdtype(np.array(tested_param).dtype, np.number) + if is_numeric: + norm = mpl.colors.LogNorm(vmin=min(tested_param), vmax=max(tested_param)) if color_log else mpl.colors.Normalize(vmin=min(tested_param), vmax=max(tested_param)) + color_func = lambda v: cmap(norm(v)) + colorbar_needed = True + else: + unique_vals = sorted(set(tested_param)) + cats_cmap = mpl.cm.get_cmap(cmap, len(unique_vals)) + color_map = {val: cats_cmap(i) for i, val in enumerate(unique_vals)} + color_func = lambda val: color_map[val] + colorbar_needed = False + + # Create figure + fig, ax = plt.subplots(figsize=(10, 6)) + data_key = f'{output_name}_per_{param_name}' + for val in tested_param: + if val not in grouped_data[data_key]: + continue + raw = np.array(grouped_data[data_key][val]) * scale + sns.ecdfplot( + data=raw, + log_scale=x_log, stat="proportion", color=color_func(val), linewidth=3, ax=ax) - ax.set_xlabel("Solidification time [yr]", fontsize=14) - ax.set_ylabel("Normalized cumulative fraction of simulations", fontsize=14) - ax.grid(alpha=0.1) - - # Create the colorbar or legend depending on the tested parameter - if colorbar_needed: - sm = mpl.cm.ScalarMappable(cmap=cmap, norm=norm) - cbar = fig.colorbar(sm, ax=ax, pad=0.02, aspect=30) - cbar.set_label(settings["label"], fontsize=14) - if do_log_color: - tick_values = sorted(set(tested_param)) - cbar.set_ticks(tick_values) - #cbar.ax.yaxis.set_major_locator(FixedLocator(tick_values)) - #cbar.ax.yaxis.set_major_formatter(LogFormatterMathtext(base=10)) - #cbar.update_ticks() + # Set axis labels + ax.set_xlabel(x_label, fontsize=14) + ax.set_ylabel("Normalized cumulative fraction of simulations", fontsize=14) + ax.grid(alpha=0.1) + + # Add colorbar or legend + if colorbar_needed: + sm = mpl.cm.ScalarMappable(cmap=cmap, norm=norm) + cbar = fig.colorbar(sm, ax=ax, pad=0.02, aspect=30) + cbar.set_label(param_label, fontsize=14) + ticks = sorted(set(tested_param)) + cbar.set_ticks(ticks) else: - cbar.set_ticks(sorted(set(tested_param))) - else: - handles = [mpl.lines.Line2D([0], [0], color=color_map[val], lw=3, label=str(val)) for val in unique_vals] - ax.legend(handles=handles, loc='lower right') - - # Save the plot - filename = f"ecdf_solidification_time_per_{param_name.replace('.', '_')}_normalized.png" - output_path = os.path.join(plots_path, "ecdf_by_param") - os.makedirs(output_path, exist_ok=True) - plt.tight_layout() - plt.savefig(os.path.join(output_path, filename), dpi=300) - plt.close() - -print(extracted_outputs) \ No newline at end of file + handles = [mpl.lines.Line2D([0], [0], color=color_map[val], lw=3, label=str(val)) for val in unique_vals] + ax.legend(handles=handles, loc='lower right') + + # Save the figure + output_dir = os.path.join(plots_path, "ecdf_by_param_and_output") + os.makedirs(output_dir, exist_ok=True) + fname = f"ecdf_{output_name}_per_{param_name.replace('.', '_')}.png" + plt.tight_layout() + plt.savefig(os.path.join(output_dir, fname), dpi=300) + plt.close() +################################################### \ No newline at end of file From 7c93ab5b1a0472063042aa7adbb78a4d8d014558 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Mon, 12 May 2025 01:16:43 +0200 Subject: [PATCH 026/105] plot grid ok !!! --- tools/post_processing_grid/plot_grid.py | 257 ++++++++++++++++-------- 1 file changed, 178 insertions(+), 79 deletions(-) diff --git a/tools/post_processing_grid/plot_grid.py b/tools/post_processing_grid/plot_grid.py index 109d6d063..12081b402 100644 --- a/tools/post_processing_grid/plot_grid.py +++ b/tools/post_processing_grid/plot_grid.py @@ -1,17 +1,13 @@ from pathlib import Path -from io import StringIO - import os import pandas as pd import numpy as np +from io import StringIO import seaborn as sns -import matplotlib as mpl import matplotlib.pyplot as plt +import matplotlib as mpl import matplotlib.colors as mcolors import matplotlib.cm as cm -import matplotlib.ticker as ticker -from matplotlib.ticker import FixedLocator, LogFormatterMathtext -from matplotlib.colors import BoundaryNorm def load_extracted_data(data_path : str | Path, grid_name :str): @@ -254,7 +250,7 @@ def plot_grid_status(cases_data, plot_dir: Path, status_colors: dict = None): plt.savefig(output_path, dpi=300) plt.close() - print(f"Grid status histogram successfully saved to {output_path}") + print(f"Summary plot of grid status is available") if __name__ == '__main__': @@ -272,89 +268,192 @@ def plot_grid_status(cases_data, plot_dir: Path, status_colors: dict = None): plot_grid_status(df, plots_path) # Plot the grid status in an histogram # Single plot of all input parameters and extracted outputs -################################################### +# ################################################### +# # Define input parameter settings +# param_settings = { +# "orbit.semimajoraxis": {"label": "Semi-major axis [AU]", "colormap": cm.plasma, "log_scale": False}, +# "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, +# "escape.zephyrus.efficiency": {"label": r"Escape efficiency factor $\epsilon$", "colormap": cm.spring, "log_scale": False}, +# "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2)$ [IW]", "colormap": cm.coolwarm, "log_scale": False}, +# "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.Dark2, "log_scale": False}, +# "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, +# "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": False}} + +# # Define extracted output settings +# output_settings = { +# 'esc_rate_total': {"label": "Total escape rate [kg/s]", "log_scale": True, "scale": 1.0}, +# 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, +# 'P_surf': {"label": "Surface pressure [bar]", "log_scale": True, "scale": 1.0}, +# 'atm_kg_per_mol': {"label": "Atmospheric mass [kg/mol]", "log_scale": True, "scale": 1.0}, +# 'solidification_time': {"label": "Solidification time [yr]", "log_scale": True, "scale": 1.0}} + + +# # Loop over extracted outputs and input parameters +# for output_name, out_settings in output_settings.items(): +# for param_name, settings in param_settings.items(): +# tested_param = grid_params[param_name] # Get the values of the input parameter +# if len(tested_param) <= 1: +# continue +# # Extract plot settings for this input parameter +# param_label = settings["label"] +# cmap = settings["colormap"] +# color_log = settings.get("log_scale", False) +# # Extract plot settings for this output +# x_label = out_settings["label"] +# x_log = out_settings.get("log_scale", False) +# scale = out_settings.get("scale", 1.0) + +# # Determine colormap and color function if the parameter is numeric or string +# is_numeric = np.issubdtype(np.array(tested_param).dtype, np.number) +# if is_numeric: +# norm = mpl.colors.LogNorm(vmin=min(tested_param), vmax=max(tested_param)) if color_log else mpl.colors.Normalize(vmin=min(tested_param), vmax=max(tested_param)) +# color_func = lambda v: cmap(norm(v)) +# colorbar_needed = True +# else: +# unique_vals = sorted(set(tested_param)) +# cats_cmap = mpl.cm.get_cmap(cmap, len(unique_vals)) +# color_map = {val: cats_cmap(i) for i, val in enumerate(unique_vals)} +# color_func = lambda val: color_map[val] +# colorbar_needed = False + +# # Create figure +# fig, ax = plt.subplots(figsize=(10, 6)) +# data_key = f'{output_name}_per_{param_name}' +# for val in tested_param: +# if val not in grouped_data[data_key]: +# continue +# raw = np.array(grouped_data[data_key][val]) * scale +# sns.ecdfplot( +# data=raw, +# log_scale=x_log, +# stat="proportion", +# color=color_func(val), +# linewidth=3, +# ax=ax) +# # Set axis labels +# ax.set_xlabel(x_label, fontsize=14) +# ax.set_ylabel("Normalized cumulative fraction of simulations", fontsize=14) +# ax.grid(alpha=0.1) + +# # Add colorbar or legend +# if colorbar_needed: +# sm = mpl.cm.ScalarMappable(cmap=cmap, norm=norm) +# cbar = fig.colorbar(sm, ax=ax, pad=0.02, aspect=30) +# cbar.set_label(param_label, fontsize=14) +# ticks = sorted(set(tested_param)) +# cbar.set_ticks(ticks) +# else: +# handles = [mpl.lines.Line2D([0], [0], color=color_map[val], lw=3, label=str(val)) for val in unique_vals] +# ax.legend(handles=handles, loc='lower right') + +# # Save the figure +# output_dir = os.path.join(plots_path, "ecdf_by_param_and_output") +# os.makedirs(output_dir, exist_ok=True) +# fname = f"ecdf_{output_name}_per_{param_name.replace('.', '_')}.png" +# plt.tight_layout() +# plt.savefig(os.path.join(output_dir, fname), dpi=300) +# plt.close() +# print(f"All single plots are available in the ecdf_by_param_and_output/ folder") +# ################################################### + + # Grid plot + # Define input parameter settings param_settings = { - "orbit.semimajoraxis": {"label": "Semi-major axis [AU]", "colormap": cm.plasma, "log_scale": False}, - "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, - "escape.zephyrus.efficiency": {"label": r"Escape efficiency factor $\epsilon$", "colormap": cm.spring, "log_scale": False}, - "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2)$ [IW]", "colormap": cm.coolwarm, "log_scale": False}, "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.Dark2, "log_scale": False}, + "orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.plasma, "log_scale": False}, + "escape.zephyrus.efficiency": {"label": r"$\epsilon$", "colormap": cm.spring, "log_scale": False}, + "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, + "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2 / IW)$", "colormap": cm.coolwarm, "log_scale": False}, "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, - "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": False}} + "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.winter, "log_scale": False}} # Define extracted output settings output_settings = { - 'esc_rate_total': {"label": "Total escape rate [kg/s]", "log_scale": True, "scale": 1.0}, + 'solidification_time': {"label": "Solidification [yr]", "log_scale": True, "scale": 1.0}, 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, 'P_surf': {"label": "Surface pressure [bar]", "log_scale": True, "scale": 1.0}, - 'atm_kg_per_mol': {"label": "Atmospheric mass [kg/mol]", "log_scale": True, "scale": 1.0}, - 'solidification_time': {"label": "Solidification time [yr]", "log_scale": True, "scale": 1.0}} - - - # Loop over extracted outputs and input parameters - for output_name, out_settings in output_settings.items(): - for param_name, settings in param_settings.items(): - tested_param = grid_params[param_name] # Get the values of the input parameter - if len(tested_param) <= 1: - continue - # Extract plot settings for this input parameter - param_label = settings["label"] - cmap = settings["colormap"] - color_log = settings.get("log_scale", False) - # Extract plot settings for this output - x_label = out_settings["label"] - x_log = out_settings.get("log_scale", False) - scale = out_settings.get("scale", 1.0) - - # Determine colormap and color function if the parameter is numeric or string - is_numeric = np.issubdtype(np.array(tested_param).dtype, np.number) - if is_numeric: - norm = mpl.colors.LogNorm(vmin=min(tested_param), vmax=max(tested_param)) if color_log else mpl.colors.Normalize(vmin=min(tested_param), vmax=max(tested_param)) - color_func = lambda v: cmap(norm(v)) - colorbar_needed = True - else: - unique_vals = sorted(set(tested_param)) - cats_cmap = mpl.cm.get_cmap(cmap, len(unique_vals)) - color_map = {val: cats_cmap(i) for i, val in enumerate(unique_vals)} - color_func = lambda val: color_map[val] - colorbar_needed = False - - # Create figure - fig, ax = plt.subplots(figsize=(10, 6)) - data_key = f'{output_name}_per_{param_name}' + 'esc_rate_total': {"label": "Escape rate [kg/s]", "log_scale": True, "scale": 1.0}, + 'atm_kg_per_mol': {"label": "Atm mass [kg/mol]", "log_scale": True, "scale": 1.0}} + + + # Prepare parameter and output lists + param_names = list(param_settings.keys()) + out_names = list(output_settings.keys()) + + # Create subplot grid: rows = parameters, columns = outputs + n_rows = len(param_names) + n_cols = len(out_names) + fig, axes = plt.subplots(n_rows, n_cols, figsize=(4 * n_cols, 2.5 * n_rows), squeeze=False) + + # Loop through parameters (rows) and outputs (columns) + for i, param_name in enumerate(param_names): + tested_param = grid_params[param_name] + settings = param_settings[param_name] + # Determine coloring + is_numeric = np.issubdtype(np.array(tested_param).dtype, np.number) + if is_numeric: + norm = mpl.colors.LogNorm(vmin=min(tested_param), vmax=max(tested_param)) if settings.get("log_scale", False) else mpl.colors.Normalize(vmin=min(tested_param), vmax=max(tested_param)) + color_func = lambda v: settings["colormap"](norm(v)) + colorbar_needed = True + else: + unique_vals = sorted(set(tested_param)) + cmap = mpl.cm.get_cmap(settings["colormap"], len(unique_vals)) + color_map = {val: cmap(j) for j, val in enumerate(unique_vals)} + color_func = lambda v: color_map[v] + colorbar_needed = False + + for j, output_name in enumerate(out_names): + ax = axes[i][j] + out_settings = output_settings[output_name] + # Plot each ECDF for val in tested_param: + data_key = f"{output_name}_per_{param_name}" if val not in grouped_data[data_key]: continue - raw = np.array(grouped_data[data_key][val]) * scale + raw = np.array(grouped_data[data_key][val]) * out_settings.get("scale", 1.0) sns.ecdfplot( - data=raw, - log_scale=x_log, - stat="proportion", - color=color_func(val), - linewidth=3, - ax=ax) - # Set axis labels - ax.set_xlabel(x_label, fontsize=14) - ax.set_ylabel("Normalized cumulative fraction of simulations", fontsize=14) - ax.grid(alpha=0.1) - - # Add colorbar or legend - if colorbar_needed: - sm = mpl.cm.ScalarMappable(cmap=cmap, norm=norm) - cbar = fig.colorbar(sm, ax=ax, pad=0.02, aspect=30) - cbar.set_label(param_label, fontsize=14) - ticks = sorted(set(tested_param)) - cbar.set_ticks(ticks) + data=raw, + log_scale=out_settings.get("log_scale", False), + #stat="proportion", + color=color_func(val), + linewidth=4, + ax=ax + ) + # Labels and grid + if i == n_rows - 1: + ax.set_xlabel(out_settings["label"], fontsize=22) + ax.xaxis.set_label_coords(0.5, -0.3) + ax.tick_params(axis='x', labelsize=22) + else : + ax.tick_params(axis='x', labelbottom=False) + if j == 0: + ax.set_ylabel("") + ticks = [0.0, 0.5, 1.0] + ax.set_yticks(ticks) + ax.tick_params(axis='y', labelsize=22) # show tick labels with size else: - handles = [mpl.lines.Line2D([0], [0], color=color_map[val], lw=3, label=str(val)) for val in unique_vals] - ax.legend(handles=handles, loc='lower right') - - # Save the figure - output_dir = os.path.join(plots_path, "ecdf_by_param_and_output") - os.makedirs(output_dir, exist_ok=True) - fname = f"ecdf_{output_name}_per_{param_name.replace('.', '_')}.png" - plt.tight_layout() - plt.savefig(os.path.join(output_dir, fname), dpi=300) - plt.close() -################################################### \ No newline at end of file + ax.set_ylabel("") + ax.set_yticks(ticks) + ax.tick_params(axis='y', labelleft=False) + # Grid + ax.grid(alpha=0.4) + # Colorbar or legend + if colorbar_needed: + sm = mpl.cm.ScalarMappable(cmap=settings["colormap"], norm=norm) + cbar = fig.colorbar(sm, ax=ax, pad=0.08, aspect=10) + cbar.set_label(settings["label"], fontsize=24) + cbar.ax.yaxis.set_label_coords(5.5, 0.5) + ticks = sorted(set(tested_param)) + cbar.set_ticks(ticks) + cbar.ax.tick_params(labelsize=22) + else: + handles = [mpl.lines.Line2D([0], [0], color=color_map[val], lw=4, label=str(val)) for val in unique_vals] + ax.legend(handles=handles, fontsize=24,bbox_to_anchor=(1.01, 1), loc='upper left') + + # Add a single shared y-axis label + fig.text(0.04, 0.5, 'Normalized cumulative fraction of simulations', va='center', rotation='vertical', fontsize=40) + # Adjust layout and save + plt.tight_layout(rect=[0.08, 0.02, 1, 0.97]) + fig.savefig(os.path.join(plots_path, "ecdf_param_output_grid.png"), dpi=300) + plt.close(fig) From bbd119fc9d66869fe5fbca32f82b5ea103d63076 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Fri, 23 May 2025 17:06:55 +0200 Subject: [PATCH 027/105] latest change before update main --- tools/post_processing_grid/plot_grid.py | 371 +++++++++--------- .../post_processing_grid.py | 4 +- 2 files changed, 187 insertions(+), 188 deletions(-) diff --git a/tools/post_processing_grid/plot_grid.py b/tools/post_processing_grid/plot_grid.py index 12081b402..ed842ae30 100644 --- a/tools/post_processing_grid/plot_grid.py +++ b/tools/post_processing_grid/plot_grid.py @@ -267,193 +267,192 @@ def plot_grid_status(cases_data, plot_dir: Path, status_colors: dict = None): # Plots plot_grid_status(df, plots_path) # Plot the grid status in an histogram - # Single plot of all input parameters and extracted outputs -# ################################################### -# # Define input parameter settings -# param_settings = { -# "orbit.semimajoraxis": {"label": "Semi-major axis [AU]", "colormap": cm.plasma, "log_scale": False}, -# "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, -# "escape.zephyrus.efficiency": {"label": r"Escape efficiency factor $\epsilon$", "colormap": cm.spring, "log_scale": False}, -# "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2)$ [IW]", "colormap": cm.coolwarm, "log_scale": False}, -# "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.Dark2, "log_scale": False}, -# "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, -# "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": False}} - -# # Define extracted output settings -# output_settings = { -# 'esc_rate_total': {"label": "Total escape rate [kg/s]", "log_scale": True, "scale": 1.0}, -# 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, -# 'P_surf': {"label": "Surface pressure [bar]", "log_scale": True, "scale": 1.0}, -# 'atm_kg_per_mol': {"label": "Atmospheric mass [kg/mol]", "log_scale": True, "scale": 1.0}, -# 'solidification_time': {"label": "Solidification time [yr]", "log_scale": True, "scale": 1.0}} - - -# # Loop over extracted outputs and input parameters -# for output_name, out_settings in output_settings.items(): -# for param_name, settings in param_settings.items(): -# tested_param = grid_params[param_name] # Get the values of the input parameter -# if len(tested_param) <= 1: -# continue -# # Extract plot settings for this input parameter -# param_label = settings["label"] -# cmap = settings["colormap"] -# color_log = settings.get("log_scale", False) -# # Extract plot settings for this output -# x_label = out_settings["label"] -# x_log = out_settings.get("log_scale", False) -# scale = out_settings.get("scale", 1.0) - -# # Determine colormap and color function if the parameter is numeric or string -# is_numeric = np.issubdtype(np.array(tested_param).dtype, np.number) -# if is_numeric: -# norm = mpl.colors.LogNorm(vmin=min(tested_param), vmax=max(tested_param)) if color_log else mpl.colors.Normalize(vmin=min(tested_param), vmax=max(tested_param)) -# color_func = lambda v: cmap(norm(v)) -# colorbar_needed = True -# else: -# unique_vals = sorted(set(tested_param)) -# cats_cmap = mpl.cm.get_cmap(cmap, len(unique_vals)) -# color_map = {val: cats_cmap(i) for i, val in enumerate(unique_vals)} -# color_func = lambda val: color_map[val] -# colorbar_needed = False - -# # Create figure -# fig, ax = plt.subplots(figsize=(10, 6)) -# data_key = f'{output_name}_per_{param_name}' -# for val in tested_param: -# if val not in grouped_data[data_key]: -# continue -# raw = np.array(grouped_data[data_key][val]) * scale -# sns.ecdfplot( -# data=raw, -# log_scale=x_log, -# stat="proportion", -# color=color_func(val), -# linewidth=3, -# ax=ax) -# # Set axis labels -# ax.set_xlabel(x_label, fontsize=14) -# ax.set_ylabel("Normalized cumulative fraction of simulations", fontsize=14) -# ax.grid(alpha=0.1) - -# # Add colorbar or legend -# if colorbar_needed: -# sm = mpl.cm.ScalarMappable(cmap=cmap, norm=norm) -# cbar = fig.colorbar(sm, ax=ax, pad=0.02, aspect=30) -# cbar.set_label(param_label, fontsize=14) -# ticks = sorted(set(tested_param)) -# cbar.set_ticks(ticks) -# else: -# handles = [mpl.lines.Line2D([0], [0], color=color_map[val], lw=3, label=str(val)) for val in unique_vals] -# ax.legend(handles=handles, loc='lower right') - -# # Save the figure -# output_dir = os.path.join(plots_path, "ecdf_by_param_and_output") -# os.makedirs(output_dir, exist_ok=True) -# fname = f"ecdf_{output_name}_per_{param_name.replace('.', '_')}.png" -# plt.tight_layout() -# plt.savefig(os.path.join(output_dir, fname), dpi=300) -# plt.close() -# print(f"All single plots are available in the ecdf_by_param_and_output/ folder") -# ################################################### + # # Single plot of all input parameters and extracted outputs + # # Define input parameter settings + # param_settings = { + # "orbit.semimajoraxis": {"label": "Semi-major axis [AU]", "colormap": cm.plasma, "log_scale": False}, + # "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, + # "escape.zephyrus.efficiency": {"label": r"Escape efficiency factor $\epsilon$", "colormap": cm.spring, "log_scale": False}, + # "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2)$ [IW]", "colormap": cm.coolwarm, "log_scale": False}, + # "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.Dark2, "log_scale": False}, + # "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, + # "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": False}} + + # # Define extracted output settings + # output_settings = { + # #'esc_rate_total': {"label": "Total escape rate [kg/s]", "log_scale": True, "scale": 1.0}, + # #'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, + # #'P_surf': {"label": "Surface pressure [bar]", "log_scale": True, "scale": 1.0}, + # #'atm_kg_per_mol': {"label": "Mean molecular weight (MMW) [g/mol]", "log_scale": False, "scale": 1000.0}, + # 'solidification_time': {"label": "Solidification time [yr]", "log_scale": True, "scale": 1.0}} + + + # # Loop over extracted outputs and input parameters + # for output_name, out_settings in output_settings.items(): + # for param_name, settings in param_settings.items(): + # tested_param = grid_params[param_name] # Get the values of the input parameter + # if len(tested_param) <= 1: + # continue + # # Extract plot settings for this input parameter + # param_label = settings["label"] + # cmap = settings["colormap"] + # color_log = settings.get("log_scale", False) + # # Extract plot settings for this output + # x_label = out_settings["label"] + # x_log = out_settings.get("log_scale", False) + # scale = out_settings.get("scale", 1.0) + + # # Determine colormap and color function if the parameter is numeric or string + # is_numeric = np.issubdtype(np.array(tested_param).dtype, np.number) + # if is_numeric: + # norm = mpl.colors.LogNorm(vmin=min(tested_param), vmax=max(tested_param)) if color_log else mpl.colors.Normalize(vmin=min(tested_param), vmax=max(tested_param)) + # color_func = lambda v: cmap(norm(v)) + # colorbar_needed = True + # else: + # unique_vals = sorted(set(tested_param)) + # cats_cmap = mpl.cm.get_cmap(cmap, len(unique_vals)) + # color_map = {val: cats_cmap(i) for i, val in enumerate(unique_vals)} + # color_func = lambda val: color_map[val] + # colorbar_needed = False + + # # Create figure + # fig, ax = plt.subplots(figsize=(10, 6)) + # data_key = f'{output_name}_per_{param_name}' + # for val in tested_param: + # if val not in grouped_data[data_key]: + # continue + # raw = np.array(grouped_data[data_key][val]) * scale + # sns.ecdfplot( + # data=raw, + # log_scale=x_log, + # stat="proportion", + # color=color_func(val), + # linewidth=3, + # ax=ax) + # # Set axis labels + # ax.set_xlabel(x_label, fontsize=14) + # ax.set_ylabel("Normalized cumulative fraction of simulations", fontsize=14) + # ax.grid(alpha=0.1) + + # # Add colorbar or legend + # if colorbar_needed: + # sm = mpl.cm.ScalarMappable(cmap=cmap, norm=norm) + # cbar = fig.colorbar(sm, ax=ax, pad=0.02, aspect=30) + # cbar.set_label(param_label, fontsize=14) + # ticks = sorted(set(tested_param)) + # cbar.set_ticks(ticks) + # else: + # handles = [mpl.lines.Line2D([0], [0], color=color_map[val], lw=3, label=str(val)) for val in unique_vals] + # ax.legend(handles=handles, loc='lower right') + + # # Save the figure + # output_dir = os.path.join(plots_path, "test_solid_ecdf_by_param_and_output") + # os.makedirs(output_dir, exist_ok=True) + # fname = f"ecdf_{output_name}_per_{param_name.replace('.', '_')}.png" + # plt.tight_layout() + # plt.savefig(os.path.join(output_dir, fname), dpi=300) + # plt.close() + # print(f"All single plots are available in the ecdf_by_param_and_output/ folder") # Grid plot - # Define input parameter settings - param_settings = { - "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.Dark2, "log_scale": False}, - "orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.plasma, "log_scale": False}, - "escape.zephyrus.efficiency": {"label": r"$\epsilon$", "colormap": cm.spring, "log_scale": False}, - "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, - "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2 / IW)$", "colormap": cm.coolwarm, "log_scale": False}, - "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, - "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.winter, "log_scale": False}} - - # Define extracted output settings - output_settings = { - 'solidification_time': {"label": "Solidification [yr]", "log_scale": True, "scale": 1.0}, - 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, - 'P_surf': {"label": "Surface pressure [bar]", "log_scale": True, "scale": 1.0}, - 'esc_rate_total': {"label": "Escape rate [kg/s]", "log_scale": True, "scale": 1.0}, - 'atm_kg_per_mol': {"label": "Atm mass [kg/mol]", "log_scale": True, "scale": 1.0}} - - - # Prepare parameter and output lists - param_names = list(param_settings.keys()) - out_names = list(output_settings.keys()) - - # Create subplot grid: rows = parameters, columns = outputs - n_rows = len(param_names) - n_cols = len(out_names) - fig, axes = plt.subplots(n_rows, n_cols, figsize=(4 * n_cols, 2.5 * n_rows), squeeze=False) + # # Define input parameter settings + # param_settings = { + # "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.Dark2, "log_scale": False}, + # "orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.plasma, "log_scale": False}, + # "escape.zephyrus.efficiency": {"label": r"$\epsilon$", "colormap": cm.spring, "log_scale": False}, + # "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, + # "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2 / IW)$", "colormap": cm.coolwarm, "log_scale": False}, + # "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, + # "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.winter, "log_scale": False}} + + # # Define extracted output settings + # output_settings = { + # 'solidification_time': {"label": "Solidification [yr]", "log_scale": True, "scale": 1.0}, + # 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, + # 'P_surf': {"label": "Surface pressure [bar]", "log_scale": True, "scale": 1.0}, + # 'esc_rate_total': {"label": "Escape rate [kg/s]", "log_scale": True, "scale": 1.0}, + # 'atm_kg_per_mol': {"label": "MMW [g/mol]", "log_scale": False, "scale": 1000.0}} + + # # Prepare parameter and output lists + # param_names = list(param_settings.keys()) + # out_names = list(output_settings.keys()) + + # # Create subplot grid: rows = parameters, columns = outputs + # n_rows = len(param_names) + # n_cols = len(out_names) + # fig, axes = plt.subplots(n_rows, n_cols, figsize=(4 * n_cols, 2.5 * n_rows), squeeze=False) - # Loop through parameters (rows) and outputs (columns) - for i, param_name in enumerate(param_names): - tested_param = grid_params[param_name] - settings = param_settings[param_name] - # Determine coloring - is_numeric = np.issubdtype(np.array(tested_param).dtype, np.number) - if is_numeric: - norm = mpl.colors.LogNorm(vmin=min(tested_param), vmax=max(tested_param)) if settings.get("log_scale", False) else mpl.colors.Normalize(vmin=min(tested_param), vmax=max(tested_param)) - color_func = lambda v: settings["colormap"](norm(v)) - colorbar_needed = True - else: - unique_vals = sorted(set(tested_param)) - cmap = mpl.cm.get_cmap(settings["colormap"], len(unique_vals)) - color_map = {val: cmap(j) for j, val in enumerate(unique_vals)} - color_func = lambda v: color_map[v] - colorbar_needed = False - - for j, output_name in enumerate(out_names): - ax = axes[i][j] - out_settings = output_settings[output_name] - # Plot each ECDF - for val in tested_param: - data_key = f"{output_name}_per_{param_name}" - if val not in grouped_data[data_key]: - continue - raw = np.array(grouped_data[data_key][val]) * out_settings.get("scale", 1.0) - sns.ecdfplot( - data=raw, - log_scale=out_settings.get("log_scale", False), - #stat="proportion", - color=color_func(val), - linewidth=4, - ax=ax - ) - # Labels and grid - if i == n_rows - 1: - ax.set_xlabel(out_settings["label"], fontsize=22) - ax.xaxis.set_label_coords(0.5, -0.3) - ax.tick_params(axis='x', labelsize=22) - else : - ax.tick_params(axis='x', labelbottom=False) - if j == 0: - ax.set_ylabel("") - ticks = [0.0, 0.5, 1.0] - ax.set_yticks(ticks) - ax.tick_params(axis='y', labelsize=22) # show tick labels with size - else: - ax.set_ylabel("") - ax.set_yticks(ticks) - ax.tick_params(axis='y', labelleft=False) - # Grid - ax.grid(alpha=0.4) - # Colorbar or legend - if colorbar_needed: - sm = mpl.cm.ScalarMappable(cmap=settings["colormap"], norm=norm) - cbar = fig.colorbar(sm, ax=ax, pad=0.08, aspect=10) - cbar.set_label(settings["label"], fontsize=24) - cbar.ax.yaxis.set_label_coords(5.5, 0.5) - ticks = sorted(set(tested_param)) - cbar.set_ticks(ticks) - cbar.ax.tick_params(labelsize=22) - else: - handles = [mpl.lines.Line2D([0], [0], color=color_map[val], lw=4, label=str(val)) for val in unique_vals] - ax.legend(handles=handles, fontsize=24,bbox_to_anchor=(1.01, 1), loc='upper left') - - # Add a single shared y-axis label - fig.text(0.04, 0.5, 'Normalized cumulative fraction of simulations', va='center', rotation='vertical', fontsize=40) - # Adjust layout and save - plt.tight_layout(rect=[0.08, 0.02, 1, 0.97]) - fig.savefig(os.path.join(plots_path, "ecdf_param_output_grid.png"), dpi=300) - plt.close(fig) + # # Loop through parameters (rows) and outputs (columns) + # for i, param_name in enumerate(param_names): + # tested_param = grid_params[param_name] + # settings = param_settings[param_name] + # # Determine coloring + # is_numeric = np.issubdtype(np.array(tested_param).dtype, np.number) + # if is_numeric: + # norm = mpl.colors.LogNorm(vmin=min(tested_param), vmax=max(tested_param)) if settings.get("log_scale", False) else mpl.colors.Normalize(vmin=min(tested_param), vmax=max(tested_param)) + # color_func = lambda v: settings["colormap"](norm(v)) + # colorbar_needed = True + # else: + # unique_vals = sorted(set(tested_param)) + # cmap = mpl.cm.get_cmap(settings["colormap"], len(unique_vals)) + # color_map = {val: cmap(j) for j, val in enumerate(unique_vals)} + # color_func = lambda v: color_map[v] + # colorbar_needed = False + + # for j, output_name in enumerate(out_names): + # ax = axes[i][j] + # out_settings = output_settings[output_name] + # # Plot each ECDF + # for val in tested_param: + # data_key = f"{output_name}_per_{param_name}" + # if val not in grouped_data[data_key]: + # continue + # raw = np.array(grouped_data[data_key][val]) * out_settings.get("scale", 1.0) + # sns.ecdfplot( + # data=raw, + # log_scale=out_settings.get("log_scale", False), + # #stat="proportion", + # color=color_func(val), + # linewidth=4, + # ax=ax + # ) + # # Labels and grid + # if i == n_rows - 1: + # ax.set_xlabel(out_settings["label"], fontsize=22) + # ax.xaxis.set_label_coords(0.5, -0.3) + # ax.tick_params(axis='x', labelsize=22) + # else : + # ax.tick_params(axis='x', labelbottom=False) + # if j == 0: + # ax.set_ylabel("") + # ticks = [0.0, 0.5, 1.0] + # ax.set_yticks(ticks) + # ax.tick_params(axis='y', labelsize=22) # show tick labels with size + # else: + # ax.set_ylabel("") + # ax.set_yticks(ticks) + # ax.tick_params(axis='y', labelleft=False) + # # Grid + # ax.grid(alpha=0.4) + # # Colorbar or legend + # if colorbar_needed: + # sm = mpl.cm.ScalarMappable(cmap=settings["colormap"], norm=norm) + # cbar = fig.colorbar(sm, ax=ax, pad=0.08, aspect=10) + # cbar.set_label(settings["label"], fontsize=24) + # cbar.ax.yaxis.set_label_coords(5.5, 0.5) + # ticks = sorted(set(tested_param)) + # cbar.set_ticks(ticks) + # cbar.ax.tick_params(labelsize=22) + # else: + # handles = [mpl.lines.Line2D([0], [0], color=color_map[val], lw=4, label=str(val)) for val in unique_vals] + # ax.legend(handles=handles, fontsize=24,bbox_to_anchor=(1.01, 1), loc='upper left') + + # # Add a single shared y-axis label + # fig.text(0.04, 0.5, 'Normalized cumulative fraction of simulations', va='center', rotation='vertical', fontsize=40) + # # Adjust layout and save + # plt.tight_layout(rect=[0.08, 0.02, 1, 0.97]) + # fig.savefig(os.path.join(plots_path, "ecdf_param_output_grid_test_solid.png"), dpi=300) + # plt.close(fig) + + # print(f"Grid plot is available in the ecdf_param_output_grid.png file") diff --git a/tools/post_processing_grid/post_processing_grid.py b/tools/post_processing_grid/post_processing_grid.py index 3da274164..e1f7ae7be 100644 --- a/tools/post_processing_grid/post_processing_grid.py +++ b/tools/post_processing_grid/post_processing_grid.py @@ -232,7 +232,7 @@ def extract_solidification_time(cases_data: list, phi_crit: float): df = case['output_values'] # Check if the required columns exist in the dataframe if df is None: - solidification_times.append(np.nan) + solidification_times.append(0.0) continue if 'Phi_global' in df.columns and 'Time' in df.columns: @@ -242,7 +242,7 @@ def extract_solidification_time(cases_data: list, phi_crit: float): solid_time = df.loc[first_index, 'Time'] # Get the index of the time at which the condition is first satisfied solidification_times.append(solid_time) else: - solidification_times.append(np.nan) # Append NaN if condition is not satisfied + solidification_times.append(0.0) # Append NaN if condition is not satisfied else: if not columns_printed: print("Warning: 'Phi_global' and/or 'Time' columns not found in some cases.") From e8e2a2be9a03b7f8194dd5a5398a2578f1480f25 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Mon, 2 Jun 2025 16:16:27 +0200 Subject: [PATCH 028/105] re-organize the code for easier use --- .../grid/functions_grid_analyze_and_plot.py | 345 ++++++++++++++++-- src/proteus/grid/grid_analysis.py | 52 +++ 2 files changed, 360 insertions(+), 37 deletions(-) rename tools/post_processing_grid/post_processing_grid.py => src/proteus/grid/functions_grid_analyze_and_plot.py (60%) create mode 100644 src/proteus/grid/grid_analysis.py diff --git a/tools/post_processing_grid/post_processing_grid.py b/src/proteus/grid/functions_grid_analyze_and_plot.py similarity index 60% rename from tools/post_processing_grid/post_processing_grid.py rename to src/proteus/grid/functions_grid_analyze_and_plot.py index e1f7ae7be..38735d99d 100644 --- a/tools/post_processing_grid/post_processing_grid.py +++ b/src/proteus/grid/functions_grid_analyze_and_plot.py @@ -1,13 +1,18 @@ -from pathlib import Path -import pandas as pd import toml -import os import re import ast -import numpy as np import csv from typing import Tuple, Dict, List, Any +from pathlib import Path +import os +import pandas as pd +import numpy as np +from io import StringIO +import seaborn as sns +import matplotlib.pyplot as plt + +# Functions for extracting grid data def load_grid_cases(grid_dir: Path): """ @@ -204,7 +209,7 @@ def extract_grid_output(cases_data: list, parameter_name: str): return parameter_values -def extract_solidification_time(cases_data: list, phi_crit: float): +def extract_solidification_time(cases_data: list, phi_crit: float = 0.005): """ Extract the solidification time at the time step where the condition 'Phi_global' < phi_crit is first satisfied for each planet. @@ -254,7 +259,7 @@ def extract_solidification_time(cases_data: list, phi_crit: float): status_10_cases = [case for case in cases_data if (case.get('status') or '').strip() == 'Completed (solidified)'] completed_count = len(status_10_cases) # Count only valid solidification times (non-NaN) - valid_solidification_times = [time for time in solidification_times if not np.isnan(time)] + valid_solidification_times = [time for time in solidification_times if not np.isnan(time) and time > 0.0] valid_solidified_count = len(valid_solidification_times) print('-----------------------------------------------------------') @@ -279,8 +284,52 @@ def extract_solidification_time(cases_data: list, phi_crit: float): return solidification_times +def extract_CHNOS_totals(cases_data: List[dict]) -> List[pd.Series]: + """ + For each case in `cases_data`, compute the row-wise total of H, C, N, O, S. + + Parameters + ---------- + cases_data : List[dict] + Each dict must have an 'output_values' key holding a pandas DataFrame. + + Returns + ------- + totals_list : List[pd.Series] + One Series per case, indexed as in the original DataFrame, containing + the sum H+C+N+O+S at each row. Cases missing any required column are skipped. + """ + # Columns to sum + _cols = ['H_kg_total', 'C_kg_total', 'N_kg_total', 'O_kg_total', 'S_kg_total'] + + totals_list: List[pd.Series] = [] + warned = False + + for case in cases_data: + df = case.get('output_values') + name = case.get('init_parameters', {}).get('name', '') + if df is None: + continue + + missing = [c for c in _cols if c not in df.columns] + if missing: + if not warned: + print(f"Warning: missing columns {missing!r} in case '{name}'. " + f"Available: {df.columns.tolist()!r}") + warned = True + continue + + # Compute row-wise sum of the five elements + total = df[_cols].sum(axis=1) + total.name = f"{name}_CHNOS_total" + totals_list.append(total) + + print(f"Extracted CHNOS totals for {len(totals_list)} cases.") + print('-----------------------------------------------------------') + return totals_list + def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dict, case_params: Dict[int, Dict[str, Any]], - extracted_value: dict, output_to_extract: list, phi_crit: float, output_dir: Path): + extracted_value: dict, output_to_extract: list, output_dir: Path, phi_crit: float = 0.005): """ Save all simulation information (status, grid parameters, output values) into a CSV file for later analysis (using plot_grid.py to make plots for instance). @@ -305,13 +354,13 @@ def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dic output_to_extract : list List of output values extracted from each simulation in the grid. - phi_crit : float - The critical melt fraction value used to determine if a planet is considered solidified. - A typical value is 0.005. - output_dir : Path The directory where the generated CSV file will be saved. If the directory does not exist, it will be created. + + phi_crit : float + The critical melt fraction value used to determine if a planet is considered solidified. + A typical value is 0.005. """ # Check if the output directory exist, if not create it output_dir = Path(output_dir) @@ -365,29 +414,251 @@ def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dic print(f"Extracted data has been successfully saved to {csv_file}.") print('-----------------------------------------------------------') -if __name__ == '__main__': - - # User needs to specify paths - grid_name = 'escape_grid_habrok_7_params_1Msun' - grid_path = f'/home2/p315557/PROTEUS/output/scratch/{grid_name}/' - data_dir = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_processed_data/{grid_name}/' - - # User choose the parameters to post-process the grid - output_to_extract = ['esc_rate_total','Phi_global', 'P_surf','atm_kg_per_mol'] # Output columns to extract from 'runtime_helpfile.csv' of each case - phi_crit = 0.005 # Critical melt fraction for the solidification condition - - # Post-processing the grid - extracted_value = {} # Initialize the dictionary to store extracted values - cases_data = load_grid_cases(grid_path) # Load all simulation cases - grid_parameters, case_init_param = get_grid_parameters(grid_path) # Extract grid parameters - for param in output_to_extract: - extracted_value[param] = extract_grid_output(cases_data, param) # Extract output values - solidification_times = extract_solidification_time(cases_data, phi_crit) # Extract the solidification time - extracted_value['solidification_time'] = solidification_times # Add solidification time to the extracted_values - save_grid_data_to_csv(grid_name, cases_data, grid_parameters, case_init_param, - extracted_value, solidification_times, phi_crit, data_dir) # Save all the extracted data to a CSV file - - # Done with the post-processing step :) - print("Post-processing completed. Let's do some plots !") - print('(Please check for any WARNING messages above before going further.)') - print('-----------------------------------------------------------') + +# Functions for plotting grid data results + +def load_extracted_data(data_path : str | Path, grid_name :str): + + """ + Load extracted data from the CSV file generated with post_processing.py, returning a DataFrame for plotting. + + Parameters + ---------- + data_path : str or Path + Path to the directory containing the CSV file with the extracted data. + grid_name : str + Name of the grid + + Returns + ------- + df : pd.DataFrame + DataFrame with the extracted simulation data. + + grid_params : dict + Dictionary of grid parameter names and their value lists. + + extracted_outputs : list of str + List of extracted output variable names. + """ + + csv_file = os.path.join(data_path, f"{grid_name}_extracted_data.csv") + if not os.path.exists(csv_file): + raise FileNotFoundError(f"CSV file not found at: {csv_file}") + + with open(csv_file, 'r') as f: + lines = f.readlines() + + data_start_idx = None + grid_params = {} + extracted_outputs = [] + grid_dimension = None + + # Get grid dimension + for i, line in enumerate(lines): + if line.strip().startswith("Dimension of the grid"): + grid_dimension = int(line.split(":")[-1].strip()) + break + + if grid_dimension is None: + raise ValueError("Could not find 'Dimension of the grid' in the CSV file.") + + # Extract grid parameters + for i, line in enumerate(lines): + if "Grid Parameters" in line: + grid_param_start_idx = i + 2 # Skip divider after header + break + else: + raise ValueError("Could not find 'Grid Parameters' section in the CSV file.") + + for line in lines[grid_param_start_idx:]: + line = line.strip().strip('"') # Remove quotes and whitespace + + # Stop at the next divider or output section + if line.startswith("----------------------------------------------------------") or "Extracted output values" in line: + break + + if ':' in line: + param_name, param_values = line.split(":", 1) + param_name = param_name.strip() + param_values = param_values.strip().strip("[]").split(",") + param_values = [val.strip() for val in param_values] + param_values = [float(val) if is_float(val) else val for val in param_values] + grid_params[param_name] = param_values + + # Check dimensions + if len(grid_params) != grid_dimension: + raise ValueError(f"Mismatch: Expected {grid_dimension} grid parameters, found {len(grid_params)}.") + + # Extract output names + for line in lines: + line = line.strip().strip('"') # Remove quotes and whitespace + + if line.startswith("Extracted output values"): + if ":" in line: + _, outputs_part = line.split(":", 1) + outputs_part = outputs_part.strip().strip("[]") + extracted_outputs = [s.strip() for s in outputs_part.split(",")] + break + + # Find start of actual data + for i, line in enumerate(lines): + if line.strip().startswith("Case number"): + data_start_idx = i + break + + if data_start_idx is None: + raise ValueError("Could not find CSV header line starting with 'Case number'.") + + # Extract the actual data section and read it + data_section = ''.join(lines[data_start_idx:]) + df = pd.read_csv(StringIO(data_section)) + + return df, grid_params, extracted_outputs + +def is_float(value): + """Helper function to check if a string can be converted to a float.""" + try: + float(value) # Try converting string to float + return True + except ValueError: + return False # Fail not a valid float + +def plot_dir_exists(plot_dir: Path): + + """ + Check if the plot directory exists. If not, create it. + + Parameters + ---------- + plot_dir : Path + Path object pointing to the desired plot directory. + """ + plot_dir = Path(plot_dir) + if not plot_dir.exists(): + plot_dir.mkdir(parents=True, exist_ok=True) + print(f"Created plot directory: {plot_dir}") + +def group_output_by_parameter(df,grid_parameters,outputs): + """ + Groups output values (like solidification times) by a specific grid parameter. + + Parameters + ---------- + df : pd.DataFrame + DataFrame containing simulation results including value of the grid parameter and the corresponding extracted output. + + grid_parameters : str + Column name of the grid parameter to group by (like 'escape.zephyrus.Pxuv'). + + outputs : str + Column name of the output to extract (like 'solidification_time'). + + Returns + ------- + dict + Dictionary where each key is of the form '[output]_per_[parameter]', and each value is a dict {param_value: [output_values]}. + """ + grouped = {} + + for param in grid_parameters: + for output in outputs: + key_name = f"{output}_per_{param}" + value_dict = {} + for param_value in df[param].dropna().unique(): + subset = df[df[param] == param_value] + output_values = subset[output].replace([np.inf, -np.inf], np.nan) + output_values = output_values.dropna() + output_values = output_values[output_values > 0] # Remove zeros and negatives + + value_dict[param_value] = output_values + + grouped[key_name] = value_dict + + return grouped + +def plot_grid_status(cases_data, plot_dir: Path, grid_name: str, status_colors: dict = None): + """ + Plot the status of simulations from the PROTEUS grid with improved x-axis readability. + + Parameters + ---------- + cases_data : list or DataFrame + Contains the status of all simulations from the grid. + + plot_dir : Path + Path to the plots directory. + + grid_name : str + Name of the grid, used for the plot title. + + status_colors : dict, optional + A dictionary mapping statuses to specific colors. If None, a default palette is used. + """ + + # Extract and clean statuses + statuses = cases_data['Status'].fillna('unknown').astype(str) + status_counts = statuses.value_counts().sort_values(ascending=False) + + # Set colors for the bars + if status_colors: + formatted_status_keys = [s.replace(" ", "\n") for s in status_counts.index] + palette = {formatted: status_colors.get(original, 'gray') + for formatted, original in zip(formatted_status_keys, status_counts.index)} + else: + palette = sns.color_palette("Accent", len(status_counts)) + formatted_status_keys = [s.replace("d (", "d \n (") for s in status_counts.index] + palette = dict(zip(formatted_status_keys, palette)) + + # Prepare dataframe for plotting + plot_df = pd.DataFrame({ + 'Status': formatted_status_keys, + 'Count': status_counts.values + }) + + plt.figure(figsize=(10, 7)) + ax = sns.barplot( + data=plot_df, + x='Status', + y='Count', + hue='Status', + palette=palette, + dodge=False, + edgecolor='black' + ) + + # Remove legend if it was created + if ax.legend_: + ax.legend_.remove() + + # Add value labels above bars + total_simulations = len(cases_data) + for i, count in enumerate(status_counts.values): + percentage = (count / total_simulations) * 100 + ax.text( + i, count + 1, + f"{count} ({percentage:.1f}%)", + ha='center', va='bottom', fontsize=10 + ) + + # Boxed total in upper right + plt.gca().text( + 0.97, 0.94, + f"Total number of simulations : {total_simulations}", + transform=plt.gca().transAxes, + ha='right', va='top', + fontsize=14, + #bbox=dict(boxstyle="round,pad=0.5", facecolor="white", edgecolor="black") + ) + + plt.grid(alpha=0.2, axis='y') + plt.title(f"Grid statuses summary : {grid_name}", fontsize=16) + plt.xlabel("Simulation statuses", fontsize=16) + plt.ylabel("Number of simulations", fontsize=16) + plt.yticks(fontsize=12) + plt.xticks(fontsize=12) + plt.tight_layout() + output_path = plot_dir + 'grid_statuses_summary.png' + plt.savefig(output_path, dpi=300) + plt.close() + + print(f"Summary plot of grid statuses is available") diff --git a/src/proteus/grid/grid_analysis.py b/src/proteus/grid/grid_analysis.py new file mode 100644 index 000000000..1497990fa --- /dev/null +++ b/src/proteus/grid/grid_analysis.py @@ -0,0 +1,52 @@ +import os +from functions_grid_analyze_and_plot import load_grid_cases, get_grid_parameters, extract_grid_output, extract_solidification_time, save_grid_data_to_csv, load_extracted_data, plot_dir_exists, group_output_by_parameter, plot_grid_status + +if __name__ == '__main__': + + # User needs to specify paths + path_to_grid = '/home2/p315557/PROTEUS/output/scratch/' + grid_name = 'escape_grid_habrok_7_params_1Msun' + + grid_path = f'{path_to_grid}{grid_name}/' # Path to the grid directory + data_dir = f'{grid_path}/extracted_data/' # Path to the directory where the data will be saved + os.makedirs(data_dir, exist_ok=True) # Create the directory if it doesn't exist + plots_path = f'{grid_path}plots_grid/' # Path to the directory where the plots will be saved + plot_dir_exists(plots_path) # Check if the plot directory exists. If not, create it. + + # User choose the parameters to post-process the grid + output_to_extract = ['esc_rate_total','Phi_global','P_surf','T_surf','M_planet','atm_kg_per_mol'] # Output columns to extract from 'runtime_helpfile.csv' of each case. For the units, check the file src/proteus/utils/coupler.py, lines 348-400 (keys) + + ### Step 1: Post-processing the grid + + print('-----------------------------------------------------------') + print(f'Step 1 : Post-processing the grid {grid_name} ...') + print('-----------------------------------------------------------') + + extracted_value = {} # Initialize the dictionary to store extracted values + cases_data = load_grid_cases(grid_path) # Load all simulation cases + grid_parameters, case_init_param = get_grid_parameters(grid_path) # Extract grid parameters + for param in output_to_extract: + extracted_value[param] = extract_grid_output(cases_data, param) # Extract output values + solidification_times = extract_solidification_time(cases_data) # Extract the solidification time + extracted_value['solidification_time'] = solidification_times # Add solidification time to the extracted_values + save_grid_data_to_csv(grid_name, cases_data, grid_parameters, case_init_param, + extracted_value, solidification_times, data_dir) # Save all the extracted data to a CSV file + + ### Step 2: Load data and plot + + print('-----------------------------------------------------------') + print(f'Step 2 : Loading data and plotting for grid {grid_name} ...') + print('-----------------------------------------------------------') + + df, grid_params, extracted_outputs = load_extracted_data(data_dir, grid_name) # Load the data + grouped_data = group_output_by_parameter(df, grid_params, extracted_outputs) # Group extracted outputs by grid parameters + + # Plots + plot_grid_status(df, plots_path, grid_name) # Plot the grid statuses in an histogram + + print('-----------------------------------------------------------') + print(f'Plots saved in {plots_path}') + print(f'Post-processing of grid {grid_name} completed successfully!') + print('-----------------------------------------------------------') + print('If you want to change the parameters to post-process the grid, please edit the code in PROTEUS/tools/post_processing_grid/grid_analysis.py') + print('-----------------------------------------------------------') From bf4c5afe39cc9a17d60381552a0cdfd5eaff25d6 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Mon, 2 Jun 2025 16:20:29 +0200 Subject: [PATCH 029/105] remove all files related to this branch from tools/ --- tools/post_processing_grid/plot_grid.py | 458 ------------------------ 1 file changed, 458 deletions(-) delete mode 100644 tools/post_processing_grid/plot_grid.py diff --git a/tools/post_processing_grid/plot_grid.py b/tools/post_processing_grid/plot_grid.py deleted file mode 100644 index ed842ae30..000000000 --- a/tools/post_processing_grid/plot_grid.py +++ /dev/null @@ -1,458 +0,0 @@ -from pathlib import Path -import os -import pandas as pd -import numpy as np -from io import StringIO -import seaborn as sns -import matplotlib.pyplot as plt -import matplotlib as mpl -import matplotlib.colors as mcolors -import matplotlib.cm as cm - -def load_extracted_data(data_path : str | Path, grid_name :str): - - """ - Load extracted data from the CSV file generated with post_processing.py, returning a DataFrame for plotting. - - Parameters - ---------- - data_path : str or Path - Path to the directory containing the CSV file with the extracted data. - grid_name : str - Name of the grid - - Returns - ------- - df : pd.DataFrame - DataFrame with the extracted simulation data. - - grid_params : dict - Dictionary of grid parameter names and their value lists. - - extracted_outputs : list of str - List of extracted output variable names. - """ - - csv_file = os.path.join(data_path, f"{grid_name}_extracted_data.csv") - if not os.path.exists(csv_file): - raise FileNotFoundError(f"CSV file not found at: {csv_file}") - - with open(csv_file, 'r') as f: - lines = f.readlines() - - data_start_idx = None - grid_params = {} - extracted_outputs = [] - grid_dimension = None - - # Get grid dimension - for i, line in enumerate(lines): - if line.strip().startswith("Dimension of the grid"): - grid_dimension = int(line.split(":")[-1].strip()) - break - - if grid_dimension is None: - raise ValueError("Could not find 'Dimension of the grid' in the CSV file.") - - # Extract grid parameters - for i, line in enumerate(lines): - if "Grid Parameters" in line: - grid_param_start_idx = i + 2 # Skip divider after header - break - else: - raise ValueError("Could not find 'Grid Parameters' section in the CSV file.") - - for line in lines[grid_param_start_idx:]: - line = line.strip().strip('"') # Remove quotes and whitespace - - # Stop at the next divider or output section - if line.startswith("----------------------------------------------------------") or "Extracted output values" in line: - break - - if ':' in line: - param_name, param_values = line.split(":", 1) - param_name = param_name.strip() - param_values = param_values.strip().strip("[]").split(",") - param_values = [val.strip() for val in param_values] - param_values = [float(val) if is_float(val) else val for val in param_values] - grid_params[param_name] = param_values - - # Check dimensions - if len(grid_params) != grid_dimension: - raise ValueError(f"Mismatch: Expected {grid_dimension} grid parameters, found {len(grid_params)}.") - - # Extract output names - for line in lines: - line = line.strip().strip('"') # Remove quotes and whitespace - - if line.startswith("Extracted output values"): - if ":" in line: - _, outputs_part = line.split(":", 1) - outputs_part = outputs_part.strip().strip("[]") - extracted_outputs = [s.strip() for s in outputs_part.split(",")] - break - - # Find start of actual data - for i, line in enumerate(lines): - if line.strip().startswith("Case number"): - data_start_idx = i - break - - if data_start_idx is None: - raise ValueError("Could not find CSV header line starting with 'Case number'.") - - # Extract the actual data section and read it - data_section = ''.join(lines[data_start_idx:]) - df = pd.read_csv(StringIO(data_section)) - - return df, grid_params, extracted_outputs - -def is_float(value): - """Helper function to check if a string can be converted to a float.""" - try: - float(value) # Try converting string to float - return True - except ValueError: - return False # Fail not a valid float - -def plot_dir_exists(plot_dir: Path): - - """ - Check if the plot directory exists. If not, create it. - - Parameters - ---------- - plot_dir : Path - Path object pointing to the desired plot directory. - """ - plot_dir = Path(plot_dir) - if not plot_dir.exists(): - plot_dir.mkdir(parents=True, exist_ok=True) - print(f"Created plot directory: {plot_dir}") - -def group_output_by_parameter(df,grid_parameters,outputs): - """ - Groups output values (like solidification times) by a specific grid parameter. - - Parameters - ---------- - df : pd.DataFrame - DataFrame containing simulation results including value of the grid parameter and the corresponding extracted output. - - grid_parameters : str - Column name of the grid parameter to group by (like 'escape.zephyrus.Pxuv'). - - outputs : str - Column name of the output to extract (like 'solidification_time'). - - Returns - ------- - dict - Dictionary where each key is of the form '[output]_per_[parameter]', and each value is a dict {param_value: [output_values]}. - """ - grouped = {} - - for param in grid_parameters: - for output in outputs: - key_name = f"{output}_per_{param}" - value_dict = {} - for param_value in df[param].dropna().unique(): - subset = df[df[param] == param_value] - output_values = subset[output].replace([np.inf, -np.inf], np.nan) - output_values = output_values.dropna() - output_values = output_values[output_values > 0] # Remove zeros and negatives - - value_dict[param_value] = output_values - - grouped[key_name] = value_dict - - return grouped - -def plot_grid_status(cases_data, plot_dir: Path, status_colors: dict = None): - """ - Plot the status of simulations from the PROTEUS grid with improved x-axis readability. - - Parameters - ---------- - cases_data : list or DataFrame - Contains the status of all simulations from the grid. - - plot_dir : Path - Path to the plots directory. - - status_colors : dict, optional - A dictionary mapping statuses to specific colors. If None, a default palette is used. - """ - - # Extract and clean statuses - statuses = df['Status'].fillna('unknown').astype(str) - status_counts = statuses.value_counts().sort_values(ascending=False) - - # Set colors for the bars - if status_colors: - formatted_status_keys = [s.replace(" ", "\n") for s in status_counts.index] - palette = {formatted: status_colors.get(original, 'gray') - for formatted, original in zip(formatted_status_keys, status_counts.index)} - else: - palette = sns.color_palette("Accent", len(status_counts)) - formatted_status_keys = [s.replace("d (", "d \n (") for s in status_counts.index] - palette = dict(zip(formatted_status_keys, palette)) - - # Prepare dataframe for plotting - plot_df = pd.DataFrame({ - 'Status': formatted_status_keys, - 'Count': status_counts.values - }) - - plt.figure(figsize=(10, 7)) - ax = sns.barplot( - data=plot_df, - x='Status', - y='Count', - hue='Status', - palette=palette, - dodge=False, - edgecolor='black' - ) - - # Remove legend if it was created - if ax.legend_: - ax.legend_.remove() - - # Add value labels above bars - total_simulations = len(cases_data) - for i, count in enumerate(status_counts.values): - percentage = (count / total_simulations) * 100 - ax.text( - i, count + 1, - f"{count} ({percentage:.1f}%)", - ha='center', va='bottom', fontsize=10 - ) - - # Boxed total in upper right - plt.gca().text( - 0.97, 0.94, - f"Total number of simulations : {total_simulations}", - transform=plt.gca().transAxes, - ha='right', va='top', - fontsize=14, - #bbox=dict(boxstyle="round,pad=0.5", facecolor="white", edgecolor="black") - ) - - plt.grid(alpha=0.2, axis='y') - plt.title(f"Grid status summary : {grid_name}", fontsize=16) - plt.xlabel("Simulation status", fontsize=16) - plt.ylabel("Number of simulations", fontsize=16) - plt.yticks(fontsize=12) - plt.xticks(fontsize=12) - plt.tight_layout() - output_path = plot_dir + 'grid_status_summary.png' - plt.savefig(output_path, dpi=300) - plt.close() - - print(f"Summary plot of grid status is available") - -if __name__ == '__main__': - - # User needs to specify paths - grid_name = 'escape_grid_habrok_7_params_1Msun' - data_dir = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_processed_data/{grid_name}/' - plots_path = f'/home2/p315557/PROTEUS/tools/post_processing_grid/nogit_plots/{grid_name}/' - - # Load and organize data before plotting - df, grid_params, extracted_outputs = load_extracted_data(data_dir, grid_name) # Load the data - plot_dir_exists(plots_path) # Check if the plot directory exists. If not, create it. - grouped_data = group_output_by_parameter(df, grid_params, extracted_outputs) # Group extracted outputs by grid parameters - - # Plots - plot_grid_status(df, plots_path) # Plot the grid status in an histogram - - # # Single plot of all input parameters and extracted outputs - # # Define input parameter settings - # param_settings = { - # "orbit.semimajoraxis": {"label": "Semi-major axis [AU]", "colormap": cm.plasma, "log_scale": False}, - # "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, - # "escape.zephyrus.efficiency": {"label": r"Escape efficiency factor $\epsilon$", "colormap": cm.spring, "log_scale": False}, - # "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2)$ [IW]", "colormap": cm.coolwarm, "log_scale": False}, - # "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.Dark2, "log_scale": False}, - # "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, - # "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": False}} - - # # Define extracted output settings - # output_settings = { - # #'esc_rate_total': {"label": "Total escape rate [kg/s]", "log_scale": True, "scale": 1.0}, - # #'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, - # #'P_surf': {"label": "Surface pressure [bar]", "log_scale": True, "scale": 1.0}, - # #'atm_kg_per_mol': {"label": "Mean molecular weight (MMW) [g/mol]", "log_scale": False, "scale": 1000.0}, - # 'solidification_time': {"label": "Solidification time [yr]", "log_scale": True, "scale": 1.0}} - - - # # Loop over extracted outputs and input parameters - # for output_name, out_settings in output_settings.items(): - # for param_name, settings in param_settings.items(): - # tested_param = grid_params[param_name] # Get the values of the input parameter - # if len(tested_param) <= 1: - # continue - # # Extract plot settings for this input parameter - # param_label = settings["label"] - # cmap = settings["colormap"] - # color_log = settings.get("log_scale", False) - # # Extract plot settings for this output - # x_label = out_settings["label"] - # x_log = out_settings.get("log_scale", False) - # scale = out_settings.get("scale", 1.0) - - # # Determine colormap and color function if the parameter is numeric or string - # is_numeric = np.issubdtype(np.array(tested_param).dtype, np.number) - # if is_numeric: - # norm = mpl.colors.LogNorm(vmin=min(tested_param), vmax=max(tested_param)) if color_log else mpl.colors.Normalize(vmin=min(tested_param), vmax=max(tested_param)) - # color_func = lambda v: cmap(norm(v)) - # colorbar_needed = True - # else: - # unique_vals = sorted(set(tested_param)) - # cats_cmap = mpl.cm.get_cmap(cmap, len(unique_vals)) - # color_map = {val: cats_cmap(i) for i, val in enumerate(unique_vals)} - # color_func = lambda val: color_map[val] - # colorbar_needed = False - - # # Create figure - # fig, ax = plt.subplots(figsize=(10, 6)) - # data_key = f'{output_name}_per_{param_name}' - # for val in tested_param: - # if val not in grouped_data[data_key]: - # continue - # raw = np.array(grouped_data[data_key][val]) * scale - # sns.ecdfplot( - # data=raw, - # log_scale=x_log, - # stat="proportion", - # color=color_func(val), - # linewidth=3, - # ax=ax) - # # Set axis labels - # ax.set_xlabel(x_label, fontsize=14) - # ax.set_ylabel("Normalized cumulative fraction of simulations", fontsize=14) - # ax.grid(alpha=0.1) - - # # Add colorbar or legend - # if colorbar_needed: - # sm = mpl.cm.ScalarMappable(cmap=cmap, norm=norm) - # cbar = fig.colorbar(sm, ax=ax, pad=0.02, aspect=30) - # cbar.set_label(param_label, fontsize=14) - # ticks = sorted(set(tested_param)) - # cbar.set_ticks(ticks) - # else: - # handles = [mpl.lines.Line2D([0], [0], color=color_map[val], lw=3, label=str(val)) for val in unique_vals] - # ax.legend(handles=handles, loc='lower right') - - # # Save the figure - # output_dir = os.path.join(plots_path, "test_solid_ecdf_by_param_and_output") - # os.makedirs(output_dir, exist_ok=True) - # fname = f"ecdf_{output_name}_per_{param_name.replace('.', '_')}.png" - # plt.tight_layout() - # plt.savefig(os.path.join(output_dir, fname), dpi=300) - # plt.close() - # print(f"All single plots are available in the ecdf_by_param_and_output/ folder") - - # Grid plot - - # # Define input parameter settings - # param_settings = { - # "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.Dark2, "log_scale": False}, - # "orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.plasma, "log_scale": False}, - # "escape.zephyrus.efficiency": {"label": r"$\epsilon$", "colormap": cm.spring, "log_scale": False}, - # "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, - # "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2 / IW)$", "colormap": cm.coolwarm, "log_scale": False}, - # "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, - # "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.winter, "log_scale": False}} - - # # Define extracted output settings - # output_settings = { - # 'solidification_time': {"label": "Solidification [yr]", "log_scale": True, "scale": 1.0}, - # 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, - # 'P_surf': {"label": "Surface pressure [bar]", "log_scale": True, "scale": 1.0}, - # 'esc_rate_total': {"label": "Escape rate [kg/s]", "log_scale": True, "scale": 1.0}, - # 'atm_kg_per_mol': {"label": "MMW [g/mol]", "log_scale": False, "scale": 1000.0}} - - # # Prepare parameter and output lists - # param_names = list(param_settings.keys()) - # out_names = list(output_settings.keys()) - - # # Create subplot grid: rows = parameters, columns = outputs - # n_rows = len(param_names) - # n_cols = len(out_names) - # fig, axes = plt.subplots(n_rows, n_cols, figsize=(4 * n_cols, 2.5 * n_rows), squeeze=False) - - # # Loop through parameters (rows) and outputs (columns) - # for i, param_name in enumerate(param_names): - # tested_param = grid_params[param_name] - # settings = param_settings[param_name] - # # Determine coloring - # is_numeric = np.issubdtype(np.array(tested_param).dtype, np.number) - # if is_numeric: - # norm = mpl.colors.LogNorm(vmin=min(tested_param), vmax=max(tested_param)) if settings.get("log_scale", False) else mpl.colors.Normalize(vmin=min(tested_param), vmax=max(tested_param)) - # color_func = lambda v: settings["colormap"](norm(v)) - # colorbar_needed = True - # else: - # unique_vals = sorted(set(tested_param)) - # cmap = mpl.cm.get_cmap(settings["colormap"], len(unique_vals)) - # color_map = {val: cmap(j) for j, val in enumerate(unique_vals)} - # color_func = lambda v: color_map[v] - # colorbar_needed = False - - # for j, output_name in enumerate(out_names): - # ax = axes[i][j] - # out_settings = output_settings[output_name] - # # Plot each ECDF - # for val in tested_param: - # data_key = f"{output_name}_per_{param_name}" - # if val not in grouped_data[data_key]: - # continue - # raw = np.array(grouped_data[data_key][val]) * out_settings.get("scale", 1.0) - # sns.ecdfplot( - # data=raw, - # log_scale=out_settings.get("log_scale", False), - # #stat="proportion", - # color=color_func(val), - # linewidth=4, - # ax=ax - # ) - # # Labels and grid - # if i == n_rows - 1: - # ax.set_xlabel(out_settings["label"], fontsize=22) - # ax.xaxis.set_label_coords(0.5, -0.3) - # ax.tick_params(axis='x', labelsize=22) - # else : - # ax.tick_params(axis='x', labelbottom=False) - # if j == 0: - # ax.set_ylabel("") - # ticks = [0.0, 0.5, 1.0] - # ax.set_yticks(ticks) - # ax.tick_params(axis='y', labelsize=22) # show tick labels with size - # else: - # ax.set_ylabel("") - # ax.set_yticks(ticks) - # ax.tick_params(axis='y', labelleft=False) - # # Grid - # ax.grid(alpha=0.4) - # # Colorbar or legend - # if colorbar_needed: - # sm = mpl.cm.ScalarMappable(cmap=settings["colormap"], norm=norm) - # cbar = fig.colorbar(sm, ax=ax, pad=0.08, aspect=10) - # cbar.set_label(settings["label"], fontsize=24) - # cbar.ax.yaxis.set_label_coords(5.5, 0.5) - # ticks = sorted(set(tested_param)) - # cbar.set_ticks(ticks) - # cbar.ax.tick_params(labelsize=22) - # else: - # handles = [mpl.lines.Line2D([0], [0], color=color_map[val], lw=4, label=str(val)) for val in unique_vals] - # ax.legend(handles=handles, fontsize=24,bbox_to_anchor=(1.01, 1), loc='upper left') - - # # Add a single shared y-axis label - # fig.text(0.04, 0.5, 'Normalized cumulative fraction of simulations', va='center', rotation='vertical', fontsize=40) - # # Adjust layout and save - # plt.tight_layout(rect=[0.08, 0.02, 1, 0.97]) - # fig.savefig(os.path.join(plots_path, "ecdf_param_output_grid_test_solid.png"), dpi=300) - # plt.close(fig) - - # print(f"Grid plot is available in the ecdf_param_output_grid.png file") From 20774675df49466f83d142201f7af73c7a374487 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Mon, 2 Jun 2025 17:34:36 +0200 Subject: [PATCH 030/105] re-organize output + try to fix grid plot --- src/proteus/grid/grid_analysis.py | 61 ++++- ...nalyze_and_plot.py => postprocess_grid.py} | 244 +++++++++++++++++- 2 files changed, 297 insertions(+), 8 deletions(-) rename src/proteus/grid/{functions_grid_analyze_and_plot.py => postprocess_grid.py} (70%) diff --git a/src/proteus/grid/grid_analysis.py b/src/proteus/grid/grid_analysis.py index 1497990fa..2c88f12fd 100644 --- a/src/proteus/grid/grid_analysis.py +++ b/src/proteus/grid/grid_analysis.py @@ -1,5 +1,16 @@ +# This script is used to post-process a grid of PROTEUS simulations. +# It extracts the output values from the simulation cases, saves them to a CSV file, +# and generates plots for the grid statuses based on the extracted data. It also generates +# ECDF single plots and a big grid plot for the input parameters vs extracted outputs. + +# The users need to specify the path to the grid directory and the grid name. He also needs +# to specify the output columns to extract from the 'runtime_helpfile.csv' of each case. And +# update the related plotting variables accordingly. + import os -from functions_grid_analyze_and_plot import load_grid_cases, get_grid_parameters, extract_grid_output, extract_solidification_time, save_grid_data_to_csv, load_extracted_data, plot_dir_exists, group_output_by_parameter, plot_grid_status +from postprocess_grid import load_grid_cases, get_grid_parameters, extract_grid_output, extract_solidification_time, save_grid_data_to_csv, load_extracted_data, plot_dir_exists, group_output_by_parameter, plot_grid_status, ecdf_single_plots, ecdf_grid_plot +import matplotlib.cm as cm + if __name__ == '__main__': @@ -8,9 +19,10 @@ grid_name = 'escape_grid_habrok_7_params_1Msun' grid_path = f'{path_to_grid}{grid_name}/' # Path to the grid directory - data_dir = f'{grid_path}/extracted_data/' # Path to the directory where the data will be saved + postprocess_path = f'{grid_path}post_processing_grid/' # Path to the postprocess directory + data_dir = f'{postprocess_path}/extracted_data/' # Path to the directory where the data will be saved os.makedirs(data_dir, exist_ok=True) # Create the directory if it doesn't exist - plots_path = f'{grid_path}plots_grid/' # Path to the directory where the plots will be saved + plots_path = f'{postprocess_path}plots_grid/' # Path to the directory where the plots will be saved plot_dir_exists(plots_path) # Check if the plot directory exists. If not, create it. # User choose the parameters to post-process the grid @@ -41,12 +53,51 @@ df, grid_params, extracted_outputs = load_extracted_data(data_dir, grid_name) # Load the data grouped_data = group_output_by_parameter(df, grid_params, extracted_outputs) # Group extracted outputs by grid parameters - # Plots + # Histogram of grid statuses plot_grid_status(df, plots_path, grid_name) # Plot the grid statuses in an histogram + # Single ECDF Plots + param_settings_single = { + "orbit.semimajoraxis": {"label": "Semi-major axis [AU]", "colormap": cm.plasma, "log_scale": False}, + "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, + "escape.zephyrus.efficiency": {"label": r"Escape efficiency factor $\epsilon$", "colormap": cm.spring, "log_scale": False}, + "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2)$ [IW]", "colormap": cm.coolwarm, "log_scale": False}, + "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, + "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, + "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": False}} + output_settings_single = { + 'esc_rate_total': {"label": "Total escape rate [kg/s]", "log_scale": True, "scale": 1.0}, + 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, + 'P_surf': {"label": "Surface pressure [bar]", "log_scale": True, "scale": 1.0}, + 'atm_kg_per_mol': {"label": "Mean molecular weight (MMW) [g/mol]", "log_scale": False, "scale": 1000.0}, + 'solidification_time': {"label": "Solidification time [yr]", "log_scale": True, "scale": 1.0}, + 'T_surf': {"label": r"T$_{surf}$ [K]", "log_scale": False, "scale": 1.0}, + 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24}} + ecdf_single_plots(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_single, output_settings=output_settings_single, plots_path=plots_path) + + # ECDF Grid Plot + param_settings_grid = { + "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, + "orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.plasma, "log_scale": False}, + "escape.zephyrus.efficiency": {"label": r"$\epsilon$", "colormap": cm.spring, "log_scale": False}, + "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, + "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2 / IW)$", "colormap": cm.coolwarm, "log_scale": False}, + "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, + "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.winter, "log_scale": False}} + output_settings_grid = { + 'solidification_time': {"label": "Solidification [yr]", "log_scale": True, "scale": 1.0}, + 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, + 'P_surf': {"label": "Surface pressure [bar]", "log_scale": True, "scale": 1.0}, + 'esc_rate_total': {"label": "Escape rate [kg/s]", "log_scale": True, "scale": 1.0}, + 'atm_kg_per_mol': {"label": "MMW [g/mol]", "log_scale": False, "scale": 1000.0}, + 'T_surf': {"label": r"T$_{surf}$ [K]", "log_scale": False, "scale": 1.0}, + 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24} + } + ecdf_grid_plot(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_grid, output_settings=output_settings_grid, plots_path=plots_path) + print('-----------------------------------------------------------') print(f'Plots saved in {plots_path}') print(f'Post-processing of grid {grid_name} completed successfully!') print('-----------------------------------------------------------') - print('If you want to change the parameters to post-process the grid, please edit the code in PROTEUS/tools/post_processing_grid/grid_analysis.py') + print('If you want to change the parameters to post-process the grid, please edit the code in PROTEUS/src/proteus/grid/grid_analysis.py') print('-----------------------------------------------------------') diff --git a/src/proteus/grid/functions_grid_analyze_and_plot.py b/src/proteus/grid/postprocess_grid.py similarity index 70% rename from src/proteus/grid/functions_grid_analyze_and_plot.py rename to src/proteus/grid/postprocess_grid.py index 38735d99d..afe4c1dda 100644 --- a/src/proteus/grid/functions_grid_analyze_and_plot.py +++ b/src/proteus/grid/postprocess_grid.py @@ -10,9 +10,10 @@ from io import StringIO import seaborn as sns import matplotlib.pyplot as plt +import matplotlib as mpl +import matplotlib.cm as cm - -# Functions for extracting grid data +##### Functions for extracting grid data ##### def load_grid_cases(grid_dir: Path): """ @@ -415,7 +416,7 @@ def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dic print('-----------------------------------------------------------') -# Functions for plotting grid data results +##### Functions for plotting grid data results ##### def load_extracted_data(data_path : str | Path, grid_name :str): @@ -662,3 +663,240 @@ def plot_grid_status(cases_data, plot_dir: Path, grid_name: str, status_colors: plt.close() print(f"Summary plot of grid statuses is available") + +def ecdf_single_plots(grid_params: dict, grouped_data: dict, param_settings: dict, output_settings: dict, plots_path: str): + """ + Generates and saves one ECDF plot per combination of output and input parameter. + + Parameters + ---------- + + grid_params : dict + A mapping from parameter names (e.g. "orbit.semimajoraxis") to arrays/lists of tested values. + + grouped_data : dict + Dictionary where each key is of the form '[output]_per_[parameter]', and each value is a dict {param_value: [output_values]}. + + param_settings : dict + For each input-parameter key, a dict containing: + - "label": label of the colormap for the corresponding input parameter + - "colormap": a matplotlib colormap (e.g. mpl.cm.plasma) + - "log_scale": bool, whether to color-normalize on a log scale + + output_settings : dict + For each output key, a dict containing: + - "label": label of the x-axis for the corresponding output quantity + - "log_scale": bool, whether to plot the x-axis on log scale + - "scale": float, a factor to multiply raw values by before plotting + + plots_path : str + Path to the grid where to create "single_plots_ecdf" and save all .png plots + """ + # Create output directory if not already there + output_dir = os.path.join(plots_path, "single_plots_ecdf") + os.makedirs(output_dir, exist_ok=True) + + for output_name, out_settings in output_settings.items(): + for param_name, settings in param_settings.items(): + tested_param = grid_params.get(param_name, []) + if len(tested_param) <= 1: + # Skip if only a single value was tested + continue + + # Plot settings for this input parameter + param_label = settings["label"] + cmap = settings["colormap"] + color_log = settings.get("log_scale", False) + + # Plot settings for this output + x_label = out_settings["label"] + x_log = out_settings.get("log_scale", False) + scale = out_settings.get("scale", 1.0) + + # Determine if the parameter array is numeric + is_numeric = np.issubdtype(np.array(tested_param).dtype, np.number) + + if is_numeric: + # Continuous colormap: Normalize either linearly or in log-space + if color_log: + norm = mpl.colors.LogNorm(vmin=min(tested_param), vmax=max(tested_param)) + else: + norm = mpl.colors.Normalize(vmin=min(tested_param), vmax=max(tested_param)) + color_func = lambda v: cmap(norm(v)) + colorbar_needed = True + else: + # Categorical colormap: map each unique value to one color + unique_vals = sorted(set(tested_param)) + cats_cmap = mpl.colormaps.get_cmap(cmap.name).resampled(len(unique_vals)) + color_map = {val: cats_cmap(i) for i, val in enumerate(unique_vals)} + color_func = lambda val: color_map[val] + colorbar_needed = False + + # Create a new figure & axes + fig, ax = plt.subplots(figsize=(10, 6)) + data_key = f"{output_name}_per_{param_name}" + + for val in tested_param: + # Skip if no data for this value + if val not in grouped_data.get(data_key, {}): + continue + raw = np.array(grouped_data[data_key][val]) * scale + sns.ecdfplot( + data=raw, + log_scale=x_log, + stat="proportion", + color=color_func(val), + linewidth=3, + ax=ax + ) + + # Axis formatting + ax.set_xlabel(x_label, fontsize=14) + ax.set_ylabel("Normalized cumulative fraction of simulations", fontsize=14) + ax.grid(alpha=0.1) + + # Colorbar or legend + if colorbar_needed: + sm = mpl.cm.ScalarMappable(cmap=cmap, norm=norm) + cbar = fig.colorbar(sm, ax=ax, pad=0.02, aspect=30) + cbar.set_label(param_label, fontsize=14) + # Set ticks at each tested parameter value + ticks = sorted(set(tested_param)) + cbar.set_ticks(ticks) + else: + # Build a legend for categorical values + unique_vals = sorted(set(tested_param)) + handles = [ + mpl.lines.Line2D([0], [0], color=color_map[val], lw=3, label=str(val)) + for val in unique_vals + ] + ax.legend(handles=handles, loc="lower right", title=param_label) + + # Save and close + fname = f"ecdf_{output_name}_per_{param_name.replace('.', '_')}.png" + plt.tight_layout() + plt.savefig(os.path.join(output_dir, fname), dpi=300) + plt.close() + + print(f"All single ECDF plots are available at {output_dir}") + +def ecdf_grid_plot(grid_params: dict, grouped_data: dict, param_settings: dict, output_settings: dict, plots_path: str): + """ + Creates a grid of ECDF plots where each row corresponds to one input parameter + and each column corresponds to one output. Saves the resulting figure as a PNG. + + Parameters + ---------- + + grid_params : dict + A mapping from parameter names (e.g. "orbit.semimajoraxis") to arrays/lists of tested values. + + grouped_data : dict + Dictionary where each key is of the form '[output]_per_[parameter]', and each value is a dict {param_value: [output_values]}. + + param_settings : dict + For each input-parameter key, a dict containing: + - "label": label of the colormap for the corresponding input parameter + - "colormap": a matplotlib colormap (e.g. mpl.cm.plasma) + - "log_scale": bool, whether to color-normalize on a log scale + + output_settings : dict + For each output key, a dict containing: + - "label": label of the x-axis for the corresponding output quantity + - "log_scale": bool, whether to plot the x-axis on log scale + - "scale": float, a factor to multiply raw values by before plotting + + plots_path : str + Path to the grid where to create "single_plots_ecdf" and save all .png plots + """ + # Ensure output directory exists + os.makedirs(plots_path, exist_ok=True) + + # List of parameter names (rows) and output names (columns) + param_names = list(param_settings.keys()) + out_names = list(output_settings.keys()) + + # Create subplot grid: rows = parameters, columns = outputs + n_rows = len(param_names) + n_cols = len(out_names) + fig, axes = plt.subplots(n_rows, n_cols, figsize=(4 * n_cols, 2.5 * n_rows), squeeze=False) + + # Loop through parameters (rows) and outputs (columns) + for i, param_name in enumerate(param_names): + tested_param = grid_params.get(param_name, []) + settings = param_settings[param_name] + # Determine coloring + is_numeric = np.issubdtype(np.array(tested_param).dtype, np.number) + if is_numeric: + norm = mpl.colors.LogNorm(vmin=min(tested_param), vmax=max(tested_param)) if settings.get("log_scale", False) else mpl.colors.Normalize(vmin=min(tested_param), vmax=max(tested_param)) + color_func = lambda v: settings["colormap"](norm(v)) + colorbar_needed = True + else: + unique_vals = sorted(set(tested_param)) + cmap = mpl.colormaps.get_cmap(settings["colormap"]).resampled(len(unique_vals)) + color_map = {val: cmap(j) for j, val in enumerate(unique_vals)} + color_func = lambda v: color_map[v] + colorbar_needed = False + + for j, output_name in enumerate(out_names): + ax = axes[i][j] + out_settings = output_settings[output_name] + + # Plot one ECDF per tested parameter value + for val in tested_param: + data_key = f"{output_name}_per_{param_name}" + if val not in grouped_data.get(data_key, {}): + continue + raw = np.array(grouped_data[data_key][val]) * out_settings.get("scale", 1.0) + sns.ecdfplot( + data=raw, + log_scale=out_settings.get("log_scale", False), + color=color_func(val), + linewidth=4, + ax=ax + ) + + # Configure x-axis labels, ticks, grids + if i == n_rows - 1: + ax.set_xlabel(out_settings["label"], fontsize=22) + ax.xaxis.set_label_coords(0.5, -0.3) + ax.tick_params(axis='x', labelsize=22) + else: + ax.tick_params(axis='x', labelbottom=False) + + # Configure y-axis (shared label added later) + if j == 0: + ticks = [0.0, 0.5, 1.0] + ax.set_yticks(ticks) + ax.tick_params(axis='y', labelsize=22) + else: + ax.set_ylabel("") + ax.set_yticks(ticks) + ax.tick_params(axis='y', labelleft=False) + + ax.grid(alpha=0.4) + + # After plotting all outputs for this parameter (row), add colorbar or legend + if colorbar_needed: + sm = mpl.cm.ScalarMappable(cmap=settings["colormap"], norm=norm) + cbar = fig.colorbar(sm, ax=axes[i, :], pad=0.08, aspect=10) + cbar.set_label(settings["label"], fontsize=24) + cbar.ax.yaxis.set_label_coords(5.5, 0.5) + ticks = sorted(set(tested_param)) + cbar.set_ticks(ticks) + cbar.ax.tick_params(labelsize=22) + else: + handles = [mpl.lines.Line2D([0], [0], color=color_map[val], lw=4, label=str(val)) for val in unique_vals] + ax.legend(handles=handles, fontsize=24,bbox_to_anchor=(1.01, 1), loc='upper left') + + # Add a single, shared y-axis label + fig.text(0.04, 0.5, 'Normalized cumulative fraction of simulations', va='center', rotation='vertical', fontsize=40) + + # Tweak layout and save + plt.tight_layout(rect=[0.08, 0.02, 1, 0.97]) + filename = "ecdf_grid_plot.png" + out_path = os.path.join(plots_path, filename) + fig.savefig(out_path, dpi=300) + plt.close(fig) + + print(f"Grid ECDF plot saved at {out_path}") \ No newline at end of file From dea529861375d5b6af1a7887050aacbe256dcca6 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Mon, 2 Jun 2025 18:04:21 +0200 Subject: [PATCH 031/105] grid plot ok :) --- src/proteus/grid/grid_analysis.py | 32 ++++++++++++++-------------- src/proteus/grid/postprocess_grid.py | 6 +++++- 2 files changed, 21 insertions(+), 17 deletions(-) diff --git a/src/proteus/grid/grid_analysis.py b/src/proteus/grid/grid_analysis.py index 2c88f12fd..95b180245 100644 --- a/src/proteus/grid/grid_analysis.py +++ b/src/proteus/grid/grid_analysis.py @@ -25,24 +25,24 @@ plots_path = f'{postprocess_path}plots_grid/' # Path to the directory where the plots will be saved plot_dir_exists(plots_path) # Check if the plot directory exists. If not, create it. - # User choose the parameters to post-process the grid - output_to_extract = ['esc_rate_total','Phi_global','P_surf','T_surf','M_planet','atm_kg_per_mol'] # Output columns to extract from 'runtime_helpfile.csv' of each case. For the units, check the file src/proteus/utils/coupler.py, lines 348-400 (keys) + # # User choose the parameters to post-process the grid + # output_to_extract = ['esc_rate_total','Phi_global','P_surf','T_surf','M_planet','atm_kg_per_mol'] # Output columns to extract from 'runtime_helpfile.csv' of each case. For the units, check the file src/proteus/utils/coupler.py, lines 348-400 (keys) - ### Step 1: Post-processing the grid + # ### Step 1: Post-processing the grid - print('-----------------------------------------------------------') - print(f'Step 1 : Post-processing the grid {grid_name} ...') - print('-----------------------------------------------------------') + # print('-----------------------------------------------------------') + # print(f'Step 1 : Post-processing the grid {grid_name} ...') + # print('-----------------------------------------------------------') - extracted_value = {} # Initialize the dictionary to store extracted values - cases_data = load_grid_cases(grid_path) # Load all simulation cases - grid_parameters, case_init_param = get_grid_parameters(grid_path) # Extract grid parameters - for param in output_to_extract: - extracted_value[param] = extract_grid_output(cases_data, param) # Extract output values - solidification_times = extract_solidification_time(cases_data) # Extract the solidification time - extracted_value['solidification_time'] = solidification_times # Add solidification time to the extracted_values - save_grid_data_to_csv(grid_name, cases_data, grid_parameters, case_init_param, - extracted_value, solidification_times, data_dir) # Save all the extracted data to a CSV file + # extracted_value = {} # Initialize the dictionary to store extracted values + # cases_data = load_grid_cases(grid_path) # Load all simulation cases + # grid_parameters, case_init_param = get_grid_parameters(grid_path) # Extract grid parameters + # for param in output_to_extract: + # extracted_value[param] = extract_grid_output(cases_data, param) # Extract output values + # solidification_times = extract_solidification_time(cases_data) # Extract the solidification time + # extracted_value['solidification_time'] = solidification_times # Add solidification time to the extracted_values + # save_grid_data_to_csv(grid_name, cases_data, grid_parameters, case_init_param, + # extracted_value, solidification_times, data_dir) # Save all the extracted data to a CSV file ### Step 2: Load data and plot @@ -73,7 +73,7 @@ 'solidification_time': {"label": "Solidification time [yr]", "log_scale": True, "scale": 1.0}, 'T_surf': {"label": r"T$_{surf}$ [K]", "log_scale": False, "scale": 1.0}, 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24}} - ecdf_single_plots(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_single, output_settings=output_settings_single, plots_path=plots_path) + #ecdf_single_plots(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_single, output_settings=output_settings_single, plots_path=plots_path) # ECDF Grid Plot param_settings_grid = { diff --git a/src/proteus/grid/postprocess_grid.py b/src/proteus/grid/postprocess_grid.py index afe4c1dda..627eaafaf 100644 --- a/src/proteus/grid/postprocess_grid.py +++ b/src/proteus/grid/postprocess_grid.py @@ -866,6 +866,7 @@ def ecdf_grid_plot(grid_params: dict, grouped_data: dict, param_settings: dict, # Configure y-axis (shared label added later) if j == 0: + ax.set_ylabel("") ticks = [0.0, 0.5, 1.0] ax.set_yticks(ticks) ax.tick_params(axis='y', labelsize=22) @@ -879,8 +880,11 @@ def ecdf_grid_plot(grid_params: dict, grouped_data: dict, param_settings: dict, # After plotting all outputs for this parameter (row), add colorbar or legend if colorbar_needed: sm = mpl.cm.ScalarMappable(cmap=settings["colormap"], norm=norm) - cbar = fig.colorbar(sm, ax=axes[i, :], pad=0.08, aspect=10) + # attach the colorbar to the right‐most subplot in row i: + rightmost_ax = axes[i, -1] + cbar = fig.colorbar(sm,ax=rightmost_ax,pad=0.03,aspect=10) cbar.set_label(settings["label"], fontsize=24) + # (you can remove or tweak the label‐coords line if it ends up too far to the right) cbar.ax.yaxis.set_label_coords(5.5, 0.5) ticks = sorted(set(tested_param)) cbar.set_ticks(ticks) From 38f65f4bd6c9ba9bc7e8c7e2ecc916a82f5806fd Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 3 Jun 2025 14:01:26 +0200 Subject: [PATCH 032/105] extract more outpout from grid and test plots for different outpouts --- src/proteus/grid/grid_analysis.py | 93 ++++++++++++++++++++-------- src/proteus/grid/postprocess_grid.py | 48 +------------- 2 files changed, 70 insertions(+), 71 deletions(-) diff --git a/src/proteus/grid/grid_analysis.py b/src/proteus/grid/grid_analysis.py index 95b180245..e8e7b64ce 100644 --- a/src/proteus/grid/grid_analysis.py +++ b/src/proteus/grid/grid_analysis.py @@ -8,47 +8,78 @@ # update the related plotting variables accordingly. import os -from postprocess_grid import load_grid_cases, get_grid_parameters, extract_grid_output, extract_solidification_time, save_grid_data_to_csv, load_extracted_data, plot_dir_exists, group_output_by_parameter, plot_grid_status, ecdf_single_plots, ecdf_grid_plot +from postprocess_grid import load_grid_cases, get_grid_parameters, extract_grid_output, extract_solidification_time, \ + save_grid_data_to_csv, load_extracted_data, plot_dir_exists, group_output_by_parameter, \ + plot_grid_status, ecdf_single_plots, ecdf_grid_plot import matplotlib.cm as cm if __name__ == '__main__': + # --------------------------------------------- + # Initialization + # --------------------------------------------- + # User needs to specify paths path_to_grid = '/home2/p315557/PROTEUS/output/scratch/' grid_name = 'escape_grid_habrok_7_params_1Msun' - grid_path = f'{path_to_grid}{grid_name}/' # Path to the grid directory + update_csv = True # Set to True if you want to update the CSV file / False if you want to skip the CSV update step + + grid_path = f'{path_to_grid}{grid_name}/' # Path to the grid directory postprocess_path = f'{grid_path}post_processing_grid/' # Path to the postprocess directory - data_dir = f'{postprocess_path}/extracted_data/' # Path to the directory where the data will be saved - os.makedirs(data_dir, exist_ok=True) # Create the directory if it doesn't exist - plots_path = f'{postprocess_path}plots_grid/' # Path to the directory where the plots will be saved - plot_dir_exists(plots_path) # Check if the plot directory exists. If not, create it. + data_dir = f'{postprocess_path}extracted_data/' # Path to the directory where the data will be saved + os.makedirs(data_dir, exist_ok=True) # Create the directory if it doesn't exist + plots_path = f'{postprocess_path}plots_grid/' # Path to the directory where the plots will be saved + plot_dir_exists(plots_path) # Check if the plot directory exists. If not, create it. + csv_file = os.path.join(data_dir, f'{grid_name}_extracted_data.csv') # Path to the CSV file where the extracted data will be saved + + # User choose the output to extract from 'runtime_helpfile.csv' of each case (always the [-1] element of the column). For the units, check the file src/proteus/utils/coupler.py, lines 348-400 (keys) + output_to_extract = ['esc_rate_total','Phi_global','P_surf','T_surf','M_planet','atm_kg_per_mol', + 'H_kg_atm','O_kg_atm','C_kg_atm','N_kg_atm','S_kg_atm', 'Si_kg_atm', 'Mg_kg_atm', 'Fe_kg_atm', 'Na_kg_atm', + 'H2O_kg_atm','CO2_kg_atm', 'O2_kg_atm', 'H2_kg_atm', 'CH4_kg_atm', 'CO_kg_atm', 'N2_kg_atm', 'NH3_kg_atm', + 'S2_kg_atm', 'SO2_kg_atm', 'H2S_kg_atm', 'SiO_kg_atm','SiO2_kg_atm', 'MgO_kg_atm', 'FeO2_kg_atm'] + + # --------------------------------------------- + # STEP 1: Post-processing the grid → producing CSV + # --------------------------------------------- + # Only run Step 1 if update_csv=True OR if the CSV does not exist yet + + if update_csv or not os.path.isfile(csv_file): + + print('-----------------------------------------------------------') + print(f'Step 1 : Post-processing the grid {grid_name} ...') + print('-----------------------------------------------------------') - # # User choose the parameters to post-process the grid - # output_to_extract = ['esc_rate_total','Phi_global','P_surf','T_surf','M_planet','atm_kg_per_mol'] # Output columns to extract from 'runtime_helpfile.csv' of each case. For the units, check the file src/proteus/utils/coupler.py, lines 348-400 (keys) + extracted_value = {} # Initialize the dictionary to store extracted values - # ### Step 1: Post-processing the grid + cases_data = load_grid_cases(grid_path) # Load all simulation cases + grid_parameters, case_init_param = get_grid_parameters(grid_path) # Extract grid parameters - # print('-----------------------------------------------------------') - # print(f'Step 1 : Post-processing the grid {grid_name} ...') - # print('-----------------------------------------------------------') + for param in output_to_extract: + extracted_value[param] = extract_grid_output(cases_data, param) # Extract output values - # extracted_value = {} # Initialize the dictionary to store extracted values - # cases_data = load_grid_cases(grid_path) # Load all simulation cases - # grid_parameters, case_init_param = get_grid_parameters(grid_path) # Extract grid parameters - # for param in output_to_extract: - # extracted_value[param] = extract_grid_output(cases_data, param) # Extract output values - # solidification_times = extract_solidification_time(cases_data) # Extract the solidification time - # extracted_value['solidification_time'] = solidification_times # Add solidification time to the extracted_values - # save_grid_data_to_csv(grid_name, cases_data, grid_parameters, case_init_param, - # extracted_value, solidification_times, data_dir) # Save all the extracted data to a CSV file + solidification_times = extract_solidification_time(cases_data) # Extract the solidification time + extracted_value['solidification_time'] = solidification_times # Add solidification time to the extracted_values - ### Step 2: Load data and plot + save_grid_data_to_csv(grid_name, cases_data, grid_parameters, case_init_param, + extracted_value, solidification_times, data_dir) # Save all the extracted data to a CSV file + print(f'--> CSV file written to: {csv_file}') + + else: + print('-----------------------------------------------------------') + print(f'Step 1 : Skipped (CSV already exists at {csv_file})') + print('-----------------------------------------------------------') + + + # --------------------------------------------- + # STEP 2: Load data from CSV and make plots + # --------------------------------------------- print('-----------------------------------------------------------') print(f'Step 2 : Loading data and plotting for grid {grid_name} ...') print('-----------------------------------------------------------') + df, grid_params, extracted_outputs = load_extracted_data(data_dir, grid_name) # Load the data grouped_data = group_output_by_parameter(df, grid_params, extracted_outputs) # Group extracted outputs by grid parameters @@ -57,14 +88,16 @@ plot_grid_status(df, plots_path, grid_name) # Plot the grid statuses in an histogram # Single ECDF Plots + # The user needs to comment the parameters he didn't used in the grid/ add the ones non-listed here. Same for the outputs. param_settings_single = { "orbit.semimajoraxis": {"label": "Semi-major axis [AU]", "colormap": cm.plasma, "log_scale": False}, "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, "escape.zephyrus.efficiency": {"label": r"Escape efficiency factor $\epsilon$", "colormap": cm.spring, "log_scale": False}, "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2)$ [IW]", "colormap": cm.coolwarm, "log_scale": False}, - "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, + "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, - "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": False}} + "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": False}, + } output_settings_single = { 'esc_rate_total': {"label": "Total escape rate [kg/s]", "log_scale": True, "scale": 1.0}, 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, @@ -72,10 +105,13 @@ 'atm_kg_per_mol': {"label": "Mean molecular weight (MMW) [g/mol]", "log_scale": False, "scale": 1000.0}, 'solidification_time': {"label": "Solidification time [yr]", "log_scale": True, "scale": 1.0}, 'T_surf': {"label": r"T$_{surf}$ [K]", "log_scale": False, "scale": 1.0}, - 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24}} - #ecdf_single_plots(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_single, output_settings=output_settings_single, plots_path=plots_path) + 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24}, + 'H_kg_atm': {"label": r"[H$_{atm}$] [kg]", "log_scale": True, "scale": 1.0} +} + ecdf_single_plots(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_single, output_settings=output_settings_single, plots_path=plots_path) # ECDF Grid Plot + # The user needs to comment the parameters he didn't used in the grid/ add the ones non-listed here. Same for the outputs. param_settings_grid = { "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, "orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.plasma, "log_scale": False}, @@ -92,6 +128,11 @@ 'atm_kg_per_mol': {"label": "MMW [g/mol]", "log_scale": False, "scale": 1000.0}, 'T_surf': {"label": r"T$_{surf}$ [K]", "log_scale": False, "scale": 1.0}, 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24} + 'H_kg_atm': {"label": r"[H$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, + #'O_kg_atm': {"label": r"[O$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, + #'C_kg_atm': {"label": r"[C$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, + #'N_kg_atm': {"label": r"[N$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, + #'S_kg_atm': {"label": r"[S$_{atm}$] [kg]", "log_scale": True, "scale": 1.0} } ecdf_grid_plot(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_grid, output_settings=output_settings_grid, plots_path=plots_path) diff --git a/src/proteus/grid/postprocess_grid.py b/src/proteus/grid/postprocess_grid.py index 627eaafaf..ade1f28c2 100644 --- a/src/proteus/grid/postprocess_grid.py +++ b/src/proteus/grid/postprocess_grid.py @@ -12,6 +12,7 @@ import matplotlib.pyplot as plt import matplotlib as mpl import matplotlib.cm as cm +import matplotlib.colors as mcolors ##### Functions for extracting grid data ##### @@ -285,50 +286,6 @@ def extract_solidification_time(cases_data: list, phi_crit: float = 0.005): return solidification_times -def extract_CHNOS_totals(cases_data: List[dict]) -> List[pd.Series]: - """ - For each case in `cases_data`, compute the row-wise total of H, C, N, O, S. - - Parameters - ---------- - cases_data : List[dict] - Each dict must have an 'output_values' key holding a pandas DataFrame. - - Returns - ------- - totals_list : List[pd.Series] - One Series per case, indexed as in the original DataFrame, containing - the sum H+C+N+O+S at each row. Cases missing any required column are skipped. - """ - # Columns to sum - _cols = ['H_kg_total', 'C_kg_total', 'N_kg_total', 'O_kg_total', 'S_kg_total'] - - totals_list: List[pd.Series] = [] - warned = False - - for case in cases_data: - df = case.get('output_values') - name = case.get('init_parameters', {}).get('name', '') - if df is None: - continue - - missing = [c for c in _cols if c not in df.columns] - if missing: - if not warned: - print(f"Warning: missing columns {missing!r} in case '{name}'. " - f"Available: {df.columns.tolist()!r}") - warned = True - continue - - # Compute row-wise sum of the five elements - total = df[_cols].sum(axis=1) - total.name = f"{name}_CHNOS_total" - totals_list.append(total) - - print(f"Extracted CHNOS totals for {len(totals_list)} cases.") - print('-----------------------------------------------------------') - return totals_list - def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dict, case_params: Dict[int, Dict[str, Any]], extracted_value: dict, output_to_extract: list, output_dir: Path, phi_crit: float = 0.005): """ @@ -903,4 +860,5 @@ def ecdf_grid_plot(grid_params: dict, grouped_data: dict, param_settings: dict, fig.savefig(out_path, dpi=300) plt.close(fig) - print(f"Grid ECDF plot saved at {out_path}") \ No newline at end of file + print(f"Grid ECDF plot saved at {out_path}") + From 598e6da90623e4617d5da097d29cf2e06c7d1dae Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 3 Jun 2025 16:06:14 +0200 Subject: [PATCH 033/105] creating the proteus grid-analyze command and it worked yeah --- src/proteus/cli.py | 26 +++- src/proteus/grid/grid_analysis.py | 144 ------------------ ...rocess_grid.py => post_processing_grid.py} | 144 +++++++++++++++++- src/proteus/grid/run_grid_analysis.py | 15 ++ 4 files changed, 183 insertions(+), 146 deletions(-) delete mode 100644 src/proteus/grid/grid_analysis.py rename src/proteus/grid/{postprocess_grid.py => post_processing_grid.py} (77%) create mode 100644 src/proteus/grid/run_grid_analysis.py diff --git a/src/proteus/cli.py b/src/proteus/cli.py index fbb8449aa..7af874bde 100644 --- a/src/proteus/cli.py +++ b/src/proteus/cli.py @@ -226,6 +226,30 @@ def observe(config_path: Path): cli.add_command(offchem) cli.add_command(observe) +# ---------------- +# 'grid_analyze' postprocessing commands +# ---------------- + +@click.command() +@click.argument("grid_path", type=str, default=None, required=True) +@click.argument("grid_name", type=str, default=None, required=True) +@click.option("--update-csv", is_flag=True, help="Update the CSV file containing extracted data.") # If the user wants to update the CSV file, he needs to specify it in the commad line with this flag. Otherwise, set to Flase by default. + +def grid_analyze(grid_path: str, grid_name: str, update_csv: bool): + """Run grid analysis on PROTEUS grid output files + + GRID_PATH : Path to the output directory containing the PROTEUS grid files. (Do not include the grid name here) + + GRID_NAME : Name of the grid to analyze. + + Example of usage : + + proteus grid_analyze /path/to/grid/ grid_name --update-csv + """ + from proteus.grid.post_processing_grid import run_grid_analyze + run_grid_analyze(path_to_grid=grid_path, grid_name=grid_name, update_csv=update_csv) + +cli.add_command(grid_analyze) if __name__ == '__main__': - cli() + cli() \ No newline at end of file diff --git a/src/proteus/grid/grid_analysis.py b/src/proteus/grid/grid_analysis.py deleted file mode 100644 index e8e7b64ce..000000000 --- a/src/proteus/grid/grid_analysis.py +++ /dev/null @@ -1,144 +0,0 @@ -# This script is used to post-process a grid of PROTEUS simulations. -# It extracts the output values from the simulation cases, saves them to a CSV file, -# and generates plots for the grid statuses based on the extracted data. It also generates -# ECDF single plots and a big grid plot for the input parameters vs extracted outputs. - -# The users need to specify the path to the grid directory and the grid name. He also needs -# to specify the output columns to extract from the 'runtime_helpfile.csv' of each case. And -# update the related plotting variables accordingly. - -import os -from postprocess_grid import load_grid_cases, get_grid_parameters, extract_grid_output, extract_solidification_time, \ - save_grid_data_to_csv, load_extracted_data, plot_dir_exists, group_output_by_parameter, \ - plot_grid_status, ecdf_single_plots, ecdf_grid_plot -import matplotlib.cm as cm - - -if __name__ == '__main__': - - # --------------------------------------------- - # Initialization - # --------------------------------------------- - - # User needs to specify paths - path_to_grid = '/home2/p315557/PROTEUS/output/scratch/' - grid_name = 'escape_grid_habrok_7_params_1Msun' - - update_csv = True # Set to True if you want to update the CSV file / False if you want to skip the CSV update step - - grid_path = f'{path_to_grid}{grid_name}/' # Path to the grid directory - postprocess_path = f'{grid_path}post_processing_grid/' # Path to the postprocess directory - data_dir = f'{postprocess_path}extracted_data/' # Path to the directory where the data will be saved - os.makedirs(data_dir, exist_ok=True) # Create the directory if it doesn't exist - plots_path = f'{postprocess_path}plots_grid/' # Path to the directory where the plots will be saved - plot_dir_exists(plots_path) # Check if the plot directory exists. If not, create it. - csv_file = os.path.join(data_dir, f'{grid_name}_extracted_data.csv') # Path to the CSV file where the extracted data will be saved - - # User choose the output to extract from 'runtime_helpfile.csv' of each case (always the [-1] element of the column). For the units, check the file src/proteus/utils/coupler.py, lines 348-400 (keys) - output_to_extract = ['esc_rate_total','Phi_global','P_surf','T_surf','M_planet','atm_kg_per_mol', - 'H_kg_atm','O_kg_atm','C_kg_atm','N_kg_atm','S_kg_atm', 'Si_kg_atm', 'Mg_kg_atm', 'Fe_kg_atm', 'Na_kg_atm', - 'H2O_kg_atm','CO2_kg_atm', 'O2_kg_atm', 'H2_kg_atm', 'CH4_kg_atm', 'CO_kg_atm', 'N2_kg_atm', 'NH3_kg_atm', - 'S2_kg_atm', 'SO2_kg_atm', 'H2S_kg_atm', 'SiO_kg_atm','SiO2_kg_atm', 'MgO_kg_atm', 'FeO2_kg_atm'] - - # --------------------------------------------- - # STEP 1: Post-processing the grid → producing CSV - # --------------------------------------------- - # Only run Step 1 if update_csv=True OR if the CSV does not exist yet - - if update_csv or not os.path.isfile(csv_file): - - print('-----------------------------------------------------------') - print(f'Step 1 : Post-processing the grid {grid_name} ...') - print('-----------------------------------------------------------') - - extracted_value = {} # Initialize the dictionary to store extracted values - - cases_data = load_grid_cases(grid_path) # Load all simulation cases - grid_parameters, case_init_param = get_grid_parameters(grid_path) # Extract grid parameters - - for param in output_to_extract: - extracted_value[param] = extract_grid_output(cases_data, param) # Extract output values - - solidification_times = extract_solidification_time(cases_data) # Extract the solidification time - extracted_value['solidification_time'] = solidification_times # Add solidification time to the extracted_values - - save_grid_data_to_csv(grid_name, cases_data, grid_parameters, case_init_param, - extracted_value, solidification_times, data_dir) # Save all the extracted data to a CSV file - print(f'--> CSV file written to: {csv_file}') - - else: - print('-----------------------------------------------------------') - print(f'Step 1 : Skipped (CSV already exists at {csv_file})') - print('-----------------------------------------------------------') - - - # --------------------------------------------- - # STEP 2: Load data from CSV and make plots - # --------------------------------------------- - - print('-----------------------------------------------------------') - print(f'Step 2 : Loading data and plotting for grid {grid_name} ...') - print('-----------------------------------------------------------') - - - df, grid_params, extracted_outputs = load_extracted_data(data_dir, grid_name) # Load the data - grouped_data = group_output_by_parameter(df, grid_params, extracted_outputs) # Group extracted outputs by grid parameters - - # Histogram of grid statuses - plot_grid_status(df, plots_path, grid_name) # Plot the grid statuses in an histogram - - # Single ECDF Plots - # The user needs to comment the parameters he didn't used in the grid/ add the ones non-listed here. Same for the outputs. - param_settings_single = { - "orbit.semimajoraxis": {"label": "Semi-major axis [AU]", "colormap": cm.plasma, "log_scale": False}, - "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, - "escape.zephyrus.efficiency": {"label": r"Escape efficiency factor $\epsilon$", "colormap": cm.spring, "log_scale": False}, - "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2)$ [IW]", "colormap": cm.coolwarm, "log_scale": False}, - "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, - "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, - "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": False}, - } - output_settings_single = { - 'esc_rate_total': {"label": "Total escape rate [kg/s]", "log_scale": True, "scale": 1.0}, - 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, - 'P_surf': {"label": "Surface pressure [bar]", "log_scale": True, "scale": 1.0}, - 'atm_kg_per_mol': {"label": "Mean molecular weight (MMW) [g/mol]", "log_scale": False, "scale": 1000.0}, - 'solidification_time': {"label": "Solidification time [yr]", "log_scale": True, "scale": 1.0}, - 'T_surf': {"label": r"T$_{surf}$ [K]", "log_scale": False, "scale": 1.0}, - 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24}, - 'H_kg_atm': {"label": r"[H$_{atm}$] [kg]", "log_scale": True, "scale": 1.0} -} - ecdf_single_plots(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_single, output_settings=output_settings_single, plots_path=plots_path) - - # ECDF Grid Plot - # The user needs to comment the parameters he didn't used in the grid/ add the ones non-listed here. Same for the outputs. - param_settings_grid = { - "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, - "orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.plasma, "log_scale": False}, - "escape.zephyrus.efficiency": {"label": r"$\epsilon$", "colormap": cm.spring, "log_scale": False}, - "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, - "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2 / IW)$", "colormap": cm.coolwarm, "log_scale": False}, - "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, - "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.winter, "log_scale": False}} - output_settings_grid = { - 'solidification_time': {"label": "Solidification [yr]", "log_scale": True, "scale": 1.0}, - 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, - 'P_surf': {"label": "Surface pressure [bar]", "log_scale": True, "scale": 1.0}, - 'esc_rate_total': {"label": "Escape rate [kg/s]", "log_scale": True, "scale": 1.0}, - 'atm_kg_per_mol': {"label": "MMW [g/mol]", "log_scale": False, "scale": 1000.0}, - 'T_surf': {"label": r"T$_{surf}$ [K]", "log_scale": False, "scale": 1.0}, - 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24} - 'H_kg_atm': {"label": r"[H$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, - #'O_kg_atm': {"label": r"[O$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, - #'C_kg_atm': {"label": r"[C$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, - #'N_kg_atm': {"label": r"[N$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, - #'S_kg_atm': {"label": r"[S$_{atm}$] [kg]", "log_scale": True, "scale": 1.0} - } - ecdf_grid_plot(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_grid, output_settings=output_settings_grid, plots_path=plots_path) - - print('-----------------------------------------------------------') - print(f'Plots saved in {plots_path}') - print(f'Post-processing of grid {grid_name} completed successfully!') - print('-----------------------------------------------------------') - print('If you want to change the parameters to post-process the grid, please edit the code in PROTEUS/src/proteus/grid/grid_analysis.py') - print('-----------------------------------------------------------') diff --git a/src/proteus/grid/postprocess_grid.py b/src/proteus/grid/post_processing_grid.py similarity index 77% rename from src/proteus/grid/postprocess_grid.py rename to src/proteus/grid/post_processing_grid.py index ade1f28c2..21d6d0133 100644 --- a/src/proteus/grid/postprocess_grid.py +++ b/src/proteus/grid/post_processing_grid.py @@ -372,7 +372,6 @@ def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dic print(f"Extracted data has been successfully saved to {csv_file}.") print('-----------------------------------------------------------') - ##### Functions for plotting grid data results ##### def load_extracted_data(data_path : str | Path, grid_name :str): @@ -862,3 +861,146 @@ def ecdf_grid_plot(grid_params: dict, grouped_data: dict, param_settings: dict, print(f"Grid ECDF plot saved at {out_path}") +##### Function for extracting and plotting grid data ##### + +def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True): + """ + Run the post-processing of a PROTEUS grid, extracting simulation data in a CSV file and generating plots. + + Parameters + ---------- + path_to_grid : str + Path to the directory containing the grid folder. + + grid_name : str + Name of the grid folder to process. + + update_csv : bool, optional + If True, the CSV file will be updated or created. If False, it will skip the CSV extraction step if the file already exists. + """ + + # ------------------------------------------------------------ + # 1) Build all the folder/filename strings + # ------------------------------------------------------------ + grid_path = os.path.join(path_to_grid, grid_name) + os.sep + postprocess_path = os.path.join(grid_path, "post_processing_grid") + os.sep + data_dir = os.path.join(postprocess_path, "extracted_data") + os.sep + os.makedirs(data_dir, exist_ok=True) + + + plots_path = os.path.join(postprocess_path, "plots_grid") + os.sep + plot_dir_exists(plots_path) + + csv_file = os.path.join(data_dir, f"{grid_name}_extracted_data.csv") + + # ------------------------------------------------------------ + # 2) Define which outputs to pull from each case's runtime_helpfile.csv + # ------------------------------------------------------------ + # User choose the output to extract from 'runtime_helpfile.csv' of each case (always the [-1] element of the column). + # For the units, check the file src/proteus/utils/coupler.py, lines 348-400 (keys) + + output_to_extract = ['esc_rate_total','Phi_global','P_surf','T_surf','M_planet','atm_kg_per_mol', + 'H_kg_atm','O_kg_atm','C_kg_atm','N_kg_atm','S_kg_atm', 'Si_kg_atm', 'Mg_kg_atm', 'Fe_kg_atm', 'Na_kg_atm', + 'H2O_kg_atm','CO2_kg_atm', 'O2_kg_atm', 'H2_kg_atm', 'CH4_kg_atm', 'CO_kg_atm', 'N2_kg_atm', 'NH3_kg_atm', + 'S2_kg_atm', 'SO2_kg_atm', 'H2S_kg_atm', 'SiO_kg_atm','SiO2_kg_atm', 'MgO_kg_atm', 'FeO2_kg_atm'] + + # ------------------------------------------------------------ + # STEP 1: CSV extraction (only if update_csv=True or CSV missing) + # ------------------------------------------------------------ + + if update_csv or not os.path.isfile(csv_file): + + print('-----------------------------------------------------------') + print(f'Step 1 : Post-processing the grid {grid_name} ...') + print('-----------------------------------------------------------') + + extracted_value = {} # Initialize the dictionary to store extracted values + + cases_data = load_grid_cases(grid_path) # Load all simulation cases + grid_parameters, case_init_param = get_grid_parameters(grid_path) # Extract grid parameters + + for param in output_to_extract: + extracted_value[param] = extract_grid_output(cases_data, param) # Extract output values + + solidification_times = extract_solidification_time(cases_data) # Extract the solidification time + extracted_value['solidification_time'] = solidification_times # Add solidification time to the extracted_values + + save_grid_data_to_csv(grid_name, cases_data, grid_parameters, case_init_param, + extracted_value, solidification_times, data_dir) # Save all the extracted data to a CSV file + print(f'--> CSV file written to: {csv_file}') + + else: + print('-----------------------------------------------------------') + print(f'Step 1 : Skipped (CSV already exists at {csv_file})') + print('-----------------------------------------------------------') + + # --------------------------------------------- + # STEP 2: Load data from CSV and make plots + # --------------------------------------------- + + print('-----------------------------------------------------------') + print(f'Step 2 : Loading data and plotting for grid {grid_name} ...') + print('-----------------------------------------------------------') + + + df, grid_params, extracted_outputs = load_extracted_data(data_dir, grid_name) # Load the data + grouped_data = group_output_by_parameter(df, grid_params, extracted_outputs) # Group extracted outputs by grid parameters + + # Histogram of grid statuses + plot_grid_status(df, plots_path, grid_name) # Plot the grid statuses in an histogram + + # Single ECDF Plots + # The user needs to comment the parameters he didn't used in the grid/ add the ones non-listed here. Same for the outputs. + param_settings_single = { + "orbit.semimajoraxis": {"label": "Semi-major axis [AU]", "colormap": cm.plasma, "log_scale": False}, + "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, + "escape.zephyrus.efficiency": {"label": r"Escape efficiency factor $\epsilon$", "colormap": cm.spring, "log_scale": False}, + "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2)$ [IW]", "colormap": cm.coolwarm, "log_scale": False}, + "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, + "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, + "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": False}, + } + output_settings_single = { + 'esc_rate_total': {"label": "Total escape rate [kg/s]", "log_scale": True, "scale": 1.0}, + 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, + 'P_surf': {"label": "Surface pressure [bar]", "log_scale": True, "scale": 1.0}, + 'atm_kg_per_mol': {"label": "Mean molecular weight (MMW) [g/mol]", "log_scale": False, "scale": 1000.0}, + 'solidification_time': {"label": "Solidification time [yr]", "log_scale": True, "scale": 1.0}, + 'T_surf': {"label": r"T$_{surf}$ [K]", "log_scale": False, "scale": 1.0}, + 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24}, + 'H_kg_atm': {"label": r"[H$_{atm}$] [kg]", "log_scale": True, "scale": 1.0} + } + ecdf_single_plots(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_single, output_settings=output_settings_single, plots_path=plots_path) + + # ECDF Grid Plot + # The user needs to comment the parameters he didn't used in the grid/ add the ones non-listed here. Same for the outputs. + param_settings_grid = { + "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, + "orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.plasma, "log_scale": False}, + "escape.zephyrus.efficiency": {"label": r"$\epsilon$", "colormap": cm.spring, "log_scale": False}, + "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, + "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2 / IW)$", "colormap": cm.coolwarm, "log_scale": False}, + "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, + "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.winter, "log_scale": False}} + output_settings_grid = { + 'solidification_time': {"label": "Solidification [yr]", "log_scale": True, "scale": 1.0}, + 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, + 'P_surf': {"label": "Surface pressure [bar]", "log_scale": True, "scale": 1.0}, + 'esc_rate_total': {"label": "Escape rate [kg/s]", "log_scale": True, "scale": 1.0}, + 'atm_kg_per_mol': {"label": "MMW [g/mol]", "log_scale": False, "scale": 1000.0}, + 'T_surf': {"label": r"T$_{surf}$ [K]", "log_scale": False, "scale": 1.0}, + 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24}, + 'H_kg_atm': {"label": r"[H$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, + #'O_kg_atm': {"label": r"[O$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, + #'C_kg_atm': {"label": r"[C$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, + #'N_kg_atm': {"label": r"[N$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, + #'S_kg_atm': {"label": r"[S$_{atm}$] [kg]", "log_scale": True, "scale": 1.0} + } + ecdf_grid_plot(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_grid, output_settings=output_settings_grid, plots_path=plots_path) + + print('-----------------------------------------------------------') + print(f'Plots saved in {plots_path}') + print(f'Post-processing of grid {grid_name} completed successfully!') + print('-----------------------------------------------------------') + print('If you want to change the parameters to post-process the grid, please edit the code in PROTEUS/src/proteus/grid/post_processing_grid.py') + print('-----------------------------------------------------------') \ No newline at end of file diff --git a/src/proteus/grid/run_grid_analysis.py b/src/proteus/grid/run_grid_analysis.py new file mode 100644 index 000000000..64b9be992 --- /dev/null +++ b/src/proteus/grid/run_grid_analysis.py @@ -0,0 +1,15 @@ +# This script is used to post-process a grid of PROTEUS simulations. +# It extracts the output values from the simulation cases, saves them to a CSV file, +# and generates plots for the grid statuses based on the extracted data. It also generates +# ECDF single plots and a big grid plot for the input parameters vs extracted outputs. + +# The users need to specify the path to the grid directory and the grid name. (see the example below) +# He also needs to specify the output columns to extract from the 'runtime_helpfile.csv' of each case and +# update the related plotting variables accordingly. This can be done in the `run_grid_postprocessing` function. (see src/proteus/grid/postprocess_grid.py) + +from post_processing_grid import run_grid_analyze + +if __name__ == "__main__": + run_grid_analyze(path_to_grid="/home2/p315557/PROTEUS/output/scratch/", + grid_name="escape_grid_habrok_7_params_1Msun", + update_csv=True) From d6a07ab3cd46567b12b071615a712c9c955fb020 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 3 Jun 2025 17:56:44 +0200 Subject: [PATCH 034/105] update doc with the proteus grid-analyze command --- docs/usage.md | 54 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/docs/usage.md b/docs/usage.md index 0403f9c9b..e6a91e048 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -149,6 +149,60 @@ proteus observe -c [cfgfile] PROTEUS will perform this step automatically if enabled in the configuration file. +## Postprocessing of PROTEUS simulation grids + +Results from a PROTEUS grid can be post-processed using the `proteus grid_analyze` command. + +This will generate a CSV file with extrated data (`your_grid_name_extracted_data.csv`) from the grid results and ECDF plots +(see [seaborn.ecdfplot doc](https://seaborn.pydata.org/generated/seaborn.ecdfplot.html)). +Here is the structure of the generated `post_processing_grid` folder inside the grid directory : + +```console +your_grid_name/ + ├─case_00000 <---- case of your grid (for the structure refer to the tree from the [## Output and results] section) + ├─case_00001 + ├─... + ├─cfgs <---- folder with all the `input.toml` files for all cases + ├─logs <---- folder with all the `proteus_case_number.log` files for all cases + ├─manager.log <---- the log file of the grid + ├─slurm_dispatch.sh <---- if use_slurm=True in `grid_proteus.py`, this is the slurm file to submit with `sbatch` command + ├─post_processing_grid <---- this folder contains all the output from this script + │ └─extracted_data <---- folder with the generated CSV file + │ └─your_grid_name_extracted_data.csv <---- CSV file containing the tested input parameters and extracted output from the grid + │ └─plots_grid <---- folder with the generated plots + │ ├─ecdf_grid_plot.png <---- Grid plot to visualize all tested input parameters vs extracted outputs using ECDF distribution + │ ├─grid_statuses_summary.png <---- Summary plot of statuses for all cases of the grid + │ └─single_plots_ecdf <---- folder with all the single ECDF plots corresponding to all the panels from the grid plot + │ ├─ecdf_[extracted_output]_per_[input_param].png <---- Single plot using ECDF distribution to visualize one tested input parameter vs one extracted output for all cases + │ └─... +``` + + + To post-processed the grid and generate ECDF plots for further analysis, use the proteus command line interface: + +```console +proteus grid-analyze /path/to/grid/ [grid_name] +``` + +The user can also specify to update the CSV file with new output to extract for instance by adding the `--update-csv` flag, using : + +```console +proteus grid-analyze /path/to/grid/ [grid_name] --update-csv +``` + +To get more information about this command, run : + +```console +proteus grid-analyze --help +``` + +*Note to the user : update `output_to_extract` for your grid* + +1. The user can choose the output to extract for each simulations at the last time-step (from the `runtime_helpfile.csv` file of each cases) like 'esc_rate_total','Phi_global','P_surf','T_surf','M_planet'... +To do so, the user should go to `PROTEUS/src/proteus/grid/post_processing_grid.py` and modify the variable `output_to_extract` within the `run_grid_analyze` function. + +2. In the Step 2 of the same function, the user should also modify accordingly the `param_settings_single` and `output_settings_single` object for generating single plots (same for the grid plot). For this, the user should add the input parameters and output extracted from your grid if this is not already present in the scripe and comment the one useless for your grid. + ## Archiving output files Running PROTEUS can generate a large number of files, which is problematic when also running From 4cd918f40e41b7bda978bd470188d96634b62d61 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Thu, 5 Jun 2025 17:34:55 +0200 Subject: [PATCH 035/105] add the csv_error_cases but still need to work on this --- src/proteus/grid/post_processing_grid.py | 108 +++++++++++++++++++++-- src/proteus/grid/run_grid_analysis.py | 2 +- 2 files changed, 102 insertions(+), 8 deletions(-) diff --git a/src/proteus/grid/post_processing_grid.py b/src/proteus/grid/post_processing_grid.py index 21d6d0133..e108ab1f9 100644 --- a/src/proteus/grid/post_processing_grid.py +++ b/src/proteus/grid/post_processing_grid.py @@ -13,6 +13,7 @@ import matplotlib as mpl import matplotlib.cm as cm import matplotlib.colors as mcolors +import math ##### Functions for extracting grid data ##### @@ -104,8 +105,6 @@ def load_grid_cases(grid_dir: Path): return combined_data - return combined_data - def get_grid_parameters(grid_dir: str): """ Extract grid parameter names and values from the 'manager.log' file. @@ -193,9 +192,11 @@ def extract_grid_output(cases_data: list, parameter_name: str): parameter_values = [] columns_printed = False # Flag to print columns only once - for case in cases_data: + for case_index, case in enumerate(cases_data): df = case['output_values'] if df is None: + print(f"Warning: No output values found for case number '{case_index}'") + parameter_values.append(np.nan) # Append NaN if no output values continue # Skip cases with no output if parameter_name in df.columns: parameter_value = df[parameter_name].iloc[-1] @@ -327,6 +328,15 @@ def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dic # CSV file path csv_file = output_dir / f"{grid_name}_extracted_data.csv" + # (3) Pad each extracted‐output list to length = number of cases + num_cases = len(cases_data) + for param, val_list in extracted_value.items(): + print(f"→ param {param!r} has {len(val_list)} entries (should be {len(cases_data)})") + missing = num_cases - len(val_list) + if missing > 0: + # Append 'NA' for each missing case index + val_list.extend(['NA'] * missing) + with open(csv_file, 'w', newline='') as csvfile: writer = csv.writer(csvfile) @@ -362,16 +372,91 @@ def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dic case_param_values = case_params.get(case_index, {}) for param in grid_parameters.keys(): row.append(case_param_values.get(param, 'NA')) - # Add extracted output values for each case + # Add extracted‐output values (now every list is length=num_cases) for param in extracted_value.keys(): - value_list = extracted_value.get(param, []) - row.append(value_list[case_index] if case_index < len(value_list) else 'NA') + value_list = extracted_value[param] + row.append(value_list[case_index]) + # Write the row to the CSV file writer.writerow(row) print(f"Extracted data has been successfully saved to {csv_file}.") print('-----------------------------------------------------------') +def save_error_running_cases(grid_name: str, cases_data: List[Dict[str, Any]], grid_parameters: Dict[str, List[Any]], case_params: Dict[int, Dict[str, Any]], + extracted_value: Dict[str, List[Any]], output_to_extract: List[str], output_dir: Path,) -> None: + """ + Scan through `cases_data` and pick out any case whose status is exactly 'error' or 'running', + then write those rows (with identical columns) to a separate CSV named + '{grid_name}_error_running_cases.csv'.ß + """ + # 1) Find indices with status == 'error' or 'running' + error_running_indices = set() + for idx, case_data in enumerate(cases_data): + status = case_data.get("status", "").lower() + if status in ("error", "running"): + error_running_indices.add(idx) + + # 2) Find indices with any missing ("NA" or None) in extracted outputs + na_indices = set() + for param, vals in extracted_value.items(): + for idx, val in enumerate(vals): + # 2) Convert everything to a trimmed uppercase string + s = str(val).strip().upper() + if s == "" or s == "NA" or s == "None": + na_indices.add(idx) + + # 3) Merge both sets of indices + bad_indices = sorted(error_running_indices.union(na_indices)) + if not bad_indices: + print("→ No 'error'/'running' or missing-outputs cases found; skipping error/running CSV.") + return + + # 4) Path for the new CSV + err_csv = output_dir + f"{grid_name}_error_running_cases.csv" + + with open(err_csv, mode="w", newline="") as csvfile: + writer = csv.writer(csvfile) + + # --- Header block (same style as master CSV) --- + writer.writerow(["##########################################" + "#############################################"]) + writer.writerow([f"Grid name: {grid_name}"]) + writer.writerow([f"Total number of selected cases: {len(bad_indices)}"]) + writer.writerow([f"Including statuses: Error or Running {len(error_running_indices)}"]) + writer.writerow([f"Including missing-outputs (NA or None) {len(na_indices)}"]) + writer.writerow(["----------------------------------------------------------"]) + # Column names row + writer.writerow( + ["Case number", "Status"] + + list(grid_parameters.keys()) + + list(extracted_value.keys()) + ) + writer.writerow(["##########################################" + "#############################################"]) + writer.writerow([]) + + # --- Data rows: only indices in bad_indices --- + for case_index in bad_indices: + status = cases_data[case_index].get("status", "Unknown") or "Unknown" + row = [case_index, f"'{status}'"] + + # Add grid-parameter values for this case + for param in grid_parameters.keys(): + row.append(case_params.get(case_index, {}).get(param, "NA")) + + # Add extracted-output values (which may be "NA" or None) + for param in extracted_value.keys(): + vals = extracted_value[param] + if case_index < len(vals): + row.append(vals[case_index]) + else: + row.append("NA") + + writer.writerow(row) + + print(f"→ Error/Running (and missing-output) CSV saved to: {err_csv}") + ##### Functions for plotting grid data results ##### def load_extracted_data(data_path : str | Path, grid_name :str): @@ -902,7 +987,7 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) output_to_extract = ['esc_rate_total','Phi_global','P_surf','T_surf','M_planet','atm_kg_per_mol', 'H_kg_atm','O_kg_atm','C_kg_atm','N_kg_atm','S_kg_atm', 'Si_kg_atm', 'Mg_kg_atm', 'Fe_kg_atm', 'Na_kg_atm', 'H2O_kg_atm','CO2_kg_atm', 'O2_kg_atm', 'H2_kg_atm', 'CH4_kg_atm', 'CO_kg_atm', 'N2_kg_atm', 'NH3_kg_atm', - 'S2_kg_atm', 'SO2_kg_atm', 'H2S_kg_atm', 'SiO_kg_atm','SiO2_kg_atm', 'MgO_kg_atm', 'FeO2_kg_atm'] + 'S2_kg_atm', 'SO2_kg_atm', 'H2S_kg_atm', 'SiO_kg_atm','SiO2_kg_atm', 'MgO_kg_atm', 'FeO2_kg_atm', 'runtime'] # ------------------------------------------------------------ # STEP 1: CSV extraction (only if update_csv=True or CSV missing) @@ -929,11 +1014,20 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) extracted_value, solidification_times, data_dir) # Save all the extracted data to a CSV file print(f'--> CSV file written to: {csv_file}') + save_error_running_cases( + grid_name=grid_name, + cases_data=cases_data, + grid_parameters=grid_parameters, + case_params=case_init_param, + extracted_value=extracted_value, + output_to_extract=output_to_extract, + output_dir=data_dir) else: print('-----------------------------------------------------------') print(f'Step 1 : Skipped (CSV already exists at {csv_file})') print('-----------------------------------------------------------') + # --------------------------------------------- # STEP 2: Load data from CSV and make plots # --------------------------------------------- diff --git a/src/proteus/grid/run_grid_analysis.py b/src/proteus/grid/run_grid_analysis.py index 64b9be992..5d56ce93e 100644 --- a/src/proteus/grid/run_grid_analysis.py +++ b/src/proteus/grid/run_grid_analysis.py @@ -5,7 +5,7 @@ # The users need to specify the path to the grid directory and the grid name. (see the example below) # He also needs to specify the output columns to extract from the 'runtime_helpfile.csv' of each case and -# update the related plotting variables accordingly. This can be done in the `run_grid_postprocessing` function. (see src/proteus/grid/postprocess_grid.py) +# update the related plotting variables accordingly. This can be done in the `run_grid_analyze` function. (see src/proteus/grid/postprocess_grid.py) from post_processing_grid import run_grid_analyze From f389ccb61a8568c4a08627611ad0c94e35de3f07 Mon Sep 17 00:00:00 2001 From: Emma Postolec Date: Fri, 6 Jun 2025 11:16:01 +0200 Subject: [PATCH 036/105] commit by copilot ai : Update src/proteus/cli.py base test so copilot can commit his own submitted changes Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- src/proteus/cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proteus/cli.py b/src/proteus/cli.py index 7af874bde..355988f03 100644 --- a/src/proteus/cli.py +++ b/src/proteus/cli.py @@ -233,7 +233,7 @@ def observe(config_path: Path): @click.command() @click.argument("grid_path", type=str, default=None, required=True) @click.argument("grid_name", type=str, default=None, required=True) -@click.option("--update-csv", is_flag=True, help="Update the CSV file containing extracted data.") # If the user wants to update the CSV file, he needs to specify it in the commad line with this flag. Otherwise, set to Flase by default. +@click.option("--update-csv", is_flag=True, help="Update the CSV file containing extracted data.") # If the user wants to update the CSV file, he needs to specify it in the command line with this flag. Otherwise, set to False by default. def grid_analyze(grid_path: str, grid_name: str, update_csv: bool): """Run grid analysis on PROTEUS grid output files From c2d8bcba19e66cbfbdb23398dcba67cae71ce315 Mon Sep 17 00:00:00 2001 From: Emma Postolec Date: Fri, 6 Jun 2025 11:17:00 +0200 Subject: [PATCH 037/105] commit by copilot ai : Update input/planets/toi561b.toml correct for minor typo Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- input/planets/toi561b.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/input/planets/toi561b.toml b/input/planets/toi561b.toml index cc831f592..c88f8ec16 100644 --- a/input/planets/toi561b.toml +++ b/input/planets/toi561b.toml @@ -62,7 +62,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" rtol = 1e-3 # relative tolerance [params.stop.escape] - enabled = fasle + enabled = false p_stop = 1.0 # bar, model will terminate with p_surf < p_stop From 1573e04f86b2e3d2325ec0bee7540a10fa9d6140 Mon Sep 17 00:00:00 2001 From: Emma Postolec Date: Fri, 6 Jun 2025 11:17:32 +0200 Subject: [PATCH 038/105] commit copilot ai : Update docs/usage.md correct minor typo Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- docs/usage.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/usage.md b/docs/usage.md index e6a91e048..d6f638ab3 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -153,7 +153,7 @@ PROTEUS will perform this step automatically if enabled in the configuration fil Results from a PROTEUS grid can be post-processed using the `proteus grid_analyze` command. -This will generate a CSV file with extrated data (`your_grid_name_extracted_data.csv`) from the grid results and ECDF plots +This will generate a CSV file with extracted data (`your_grid_name_extracted_data.csv`) from the grid results and ECDF plots (see [seaborn.ecdfplot doc](https://seaborn.pydata.org/generated/seaborn.ecdfplot.html)). Here is the structure of the generated `post_processing_grid` folder inside the grid directory : From 408a6efbf29412fb0030bfb32c59625f8ed19836 Mon Sep 17 00:00:00 2001 From: Emma Postolec Date: Fri, 6 Jun 2025 11:18:28 +0200 Subject: [PATCH 039/105] commit copilot AI : Update docs/usage.md correct for minor typo Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- docs/usage.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/usage.md b/docs/usage.md index d6f638ab3..c0820123d 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -201,7 +201,7 @@ proteus grid-analyze --help 1. The user can choose the output to extract for each simulations at the last time-step (from the `runtime_helpfile.csv` file of each cases) like 'esc_rate_total','Phi_global','P_surf','T_surf','M_planet'... To do so, the user should go to `PROTEUS/src/proteus/grid/post_processing_grid.py` and modify the variable `output_to_extract` within the `run_grid_analyze` function. -2. In the Step 2 of the same function, the user should also modify accordingly the `param_settings_single` and `output_settings_single` object for generating single plots (same for the grid plot). For this, the user should add the input parameters and output extracted from your grid if this is not already present in the scripe and comment the one useless for your grid. +2. In the Step 2 of the same function, the user should also modify accordingly the `param_settings_single` and `output_settings_single` object for generating single plots (same for the grid plot). For this, the user should add the input parameters and output extracted from your grid if this is not already present in the script and comment the one useless for your grid. ## Archiving output files From 0f07412d72a35cdee185fa246e7d021f71f6a278 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Fri, 6 Jun 2025 15:25:49 +0200 Subject: [PATCH 040/105] move the big run_grid_analyze funciton to the run_analysis_grid.py script for easier --- src/proteus/cli.py | 2 +- src/proteus/grid/post_processing_grid.py | 233 +++++------------------ src/proteus/grid/run_grid_analysis.py | 155 ++++++++++++++- 3 files changed, 197 insertions(+), 193 deletions(-) diff --git a/src/proteus/cli.py b/src/proteus/cli.py index 355988f03..a726de93d 100644 --- a/src/proteus/cli.py +++ b/src/proteus/cli.py @@ -246,7 +246,7 @@ def grid_analyze(grid_path: str, grid_name: str, update_csv: bool): proteus grid_analyze /path/to/grid/ grid_name --update-csv """ - from proteus.grid.post_processing_grid import run_grid_analyze + from proteus.grid.run_grid_analysis import run_grid_analyze run_grid_analyze(path_to_grid=grid_path, grid_name=grid_name, update_csv=update_csv) cli.add_command(grid_analyze) diff --git a/src/proteus/grid/post_processing_grid.py b/src/proteus/grid/post_processing_grid.py index e108ab1f9..0909946f3 100644 --- a/src/proteus/grid/post_processing_grid.py +++ b/src/proteus/grid/post_processing_grid.py @@ -81,7 +81,7 @@ def load_grid_cases(grid_dir: Path): else: print(f"WARNING : Missing status file in {case.name}") - # # THIS IS ONLY FOR MY CURRENT GRID ON HABROK + # THIS IS ONLY FOR MY CURRENT GRID ON HABROK # if status in ('Unknown', 'Empty'): # status = 'Disk quota exceeded' @@ -240,7 +240,7 @@ def extract_solidification_time(cases_data: list, phi_crit: float = 0.005): df = case['output_values'] # Check if the required columns exist in the dataframe if df is None: - solidification_times.append(0.0) + solidification_times.append(np.nan) # Append NaN if no output values continue if 'Phi_global' in df.columns and 'Time' in df.columns: @@ -250,7 +250,7 @@ def extract_solidification_time(cases_data: list, phi_crit: float = 0.005): solid_time = df.loc[first_index, 'Time'] # Get the index of the time at which the condition is first satisfied solidification_times.append(solid_time) else: - solidification_times.append(0.0) # Append NaN if condition is not satisfied + solidification_times.append(np.nan) # Append NaN if condition is not satisfied else: if not columns_printed: print("Warning: 'Phi_global' and/or 'Time' columns not found in some cases.") @@ -328,14 +328,8 @@ def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dic # CSV file path csv_file = output_dir / f"{grid_name}_extracted_data.csv" - # (3) Pad each extracted‐output list to length = number of cases + # Write CSV file num_cases = len(cases_data) - for param, val_list in extracted_value.items(): - print(f"→ param {param!r} has {len(val_list)} entries (should be {len(cases_data)})") - missing = num_cases - len(val_list) - if missing > 0: - # Append 'NA' for each missing case index - val_list.extend(['NA'] * missing) with open(csv_file, 'w', newline='') as csvfile: writer = csv.writer(csvfile) @@ -383,69 +377,79 @@ def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dic print(f"Extracted data has been successfully saved to {csv_file}.") print('-----------------------------------------------------------') -def save_error_running_cases(grid_name: str, cases_data: List[Dict[str, Any]], grid_parameters: Dict[str, List[Any]], case_params: Dict[int, Dict[str, Any]], - extracted_value: Dict[str, List[Any]], output_to_extract: List[str], output_dir: Path,) -> None: +def save_error_running_cases(grid_name: str, cases_data: List[Dict[str, Any]], grid_parameters: Dict[str, List[Any]], case_params: Dict[int, Dict[str, Any]], extracted_value: Dict[str, List[Any]], output_to_extract: List[str], output_dir: Path,) -> None: """ - Scan through `cases_data` and pick out any case whose status is exactly 'error' or 'running', - then write those rows (with identical columns) to a separate CSV named - '{grid_name}_error_running_cases.csv'.ß + Scan through `cases_data` and pick out any case whose status is 'running' + or starts with 'error', then write those rows (with identical columns) + to a separate CSV named '{grid_name}_error_running_cases.csv'. """ - # 1) Find indices with status == 'error' or 'running' - error_running_indices = set() + # 1) Find indices with status starting with 'error' (case‐insensitive) and exactly 'running' + error_indices = set() + running_indices = set() + for idx, case_data in enumerate(cases_data): - status = case_data.get("status", "").lower() - if status in ("error", "running"): - error_running_indices.add(idx) + status_raw = case_data.get("status", "") + status = status_raw.strip().lower() + if status.startswith("error"): + error_indices.add(idx) + elif status == "running": + running_indices.add(idx) + # Combine both into a single set of “status‐based” bad indices + status_bad_indices = error_indices.union(running_indices) # 2) Find indices with any missing ("NA" or None) in extracted outputs na_indices = set() for param, vals in extracted_value.items(): for idx, val in enumerate(vals): - # 2) Convert everything to a trimmed uppercase string s = str(val).strip().upper() - if s == "" or s == "NA" or s == "None": + if s == "" or s == "NA" or s == "NONE": na_indices.add(idx) - # 3) Merge both sets of indices - bad_indices = sorted(error_running_indices.union(na_indices)) + # 3) Union of (error/running) and (missing) indices + bad_indices = sorted(status_bad_indices.union(na_indices)) if not bad_indices: - print("→ No 'error'/'running' or missing-outputs cases found; skipping error/running CSV.") + print("→ No 'error'/'running' or missing‐outputs cases found; skipping error/running CSV.") return - # 4) Path for the new CSV + # 4) Build output path err_csv = output_dir + f"{grid_name}_error_running_cases.csv" with open(err_csv, mode="w", newline="") as csvfile: writer = csv.writer(csvfile) - # --- Header block (same style as master CSV) --- - writer.writerow(["##########################################" - "#############################################"]) + # --- Header block --- + writer.writerow([ + "############################################################" + "################################" + ]) writer.writerow([f"Grid name: {grid_name}"]) writer.writerow([f"Total number of selected cases: {len(bad_indices)}"]) - writer.writerow([f"Including statuses: Error or Running {len(error_running_indices)}"]) - writer.writerow([f"Including missing-outputs (NA or None) {len(na_indices)}"]) + writer.writerow([f"Number of 'error…' cases: {len(error_indices)}"]) + writer.writerow([f"Number of 'running' cases: {len(running_indices)}"]) + writer.writerow([f"Number of missing‐output (NA or None): {len(na_indices)}"]) writer.writerow(["----------------------------------------------------------"]) - # Column names row + # Column names writer.writerow( ["Case number", "Status"] + list(grid_parameters.keys()) + list(extracted_value.keys()) ) - writer.writerow(["##########################################" - "#############################################"]) + writer.writerow([ + "############################################################" + "################################" + ]) writer.writerow([]) - # --- Data rows: only indices in bad_indices --- + # --- Data rows for each bad index --- for case_index in bad_indices: status = cases_data[case_index].get("status", "Unknown") or "Unknown" row = [case_index, f"'{status}'"] - # Add grid-parameter values for this case + # Grid‐parameter columns for param in grid_parameters.keys(): row.append(case_params.get(case_index, {}).get(param, "NA")) - # Add extracted-output values (which may be "NA" or None) + # Extracted‐output columns for param in extracted_value.keys(): vals = extracted_value[param] if case_index < len(vals): @@ -455,7 +459,8 @@ def save_error_running_cases(grid_name: str, cases_data: List[Dict[str, Any]], g writer.writerow(row) - print(f"→ Error/Running (and missing-output) CSV saved to: {err_csv}") + print(f"→ Error/Running (and missing‐output) CSV saved to: {err_csv}") + ##### Functions for plotting grid data results ##### @@ -946,155 +951,3 @@ def ecdf_grid_plot(grid_params: dict, grouped_data: dict, param_settings: dict, print(f"Grid ECDF plot saved at {out_path}") -##### Function for extracting and plotting grid data ##### - -def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True): - """ - Run the post-processing of a PROTEUS grid, extracting simulation data in a CSV file and generating plots. - - Parameters - ---------- - path_to_grid : str - Path to the directory containing the grid folder. - - grid_name : str - Name of the grid folder to process. - - update_csv : bool, optional - If True, the CSV file will be updated or created. If False, it will skip the CSV extraction step if the file already exists. - """ - - # ------------------------------------------------------------ - # 1) Build all the folder/filename strings - # ------------------------------------------------------------ - grid_path = os.path.join(path_to_grid, grid_name) + os.sep - postprocess_path = os.path.join(grid_path, "post_processing_grid") + os.sep - data_dir = os.path.join(postprocess_path, "extracted_data") + os.sep - os.makedirs(data_dir, exist_ok=True) - - - plots_path = os.path.join(postprocess_path, "plots_grid") + os.sep - plot_dir_exists(plots_path) - - csv_file = os.path.join(data_dir, f"{grid_name}_extracted_data.csv") - - # ------------------------------------------------------------ - # 2) Define which outputs to pull from each case's runtime_helpfile.csv - # ------------------------------------------------------------ - # User choose the output to extract from 'runtime_helpfile.csv' of each case (always the [-1] element of the column). - # For the units, check the file src/proteus/utils/coupler.py, lines 348-400 (keys) - - output_to_extract = ['esc_rate_total','Phi_global','P_surf','T_surf','M_planet','atm_kg_per_mol', - 'H_kg_atm','O_kg_atm','C_kg_atm','N_kg_atm','S_kg_atm', 'Si_kg_atm', 'Mg_kg_atm', 'Fe_kg_atm', 'Na_kg_atm', - 'H2O_kg_atm','CO2_kg_atm', 'O2_kg_atm', 'H2_kg_atm', 'CH4_kg_atm', 'CO_kg_atm', 'N2_kg_atm', 'NH3_kg_atm', - 'S2_kg_atm', 'SO2_kg_atm', 'H2S_kg_atm', 'SiO_kg_atm','SiO2_kg_atm', 'MgO_kg_atm', 'FeO2_kg_atm', 'runtime'] - - # ------------------------------------------------------------ - # STEP 1: CSV extraction (only if update_csv=True or CSV missing) - # ------------------------------------------------------------ - - if update_csv or not os.path.isfile(csv_file): - - print('-----------------------------------------------------------') - print(f'Step 1 : Post-processing the grid {grid_name} ...') - print('-----------------------------------------------------------') - - extracted_value = {} # Initialize the dictionary to store extracted values - - cases_data = load_grid_cases(grid_path) # Load all simulation cases - grid_parameters, case_init_param = get_grid_parameters(grid_path) # Extract grid parameters - - for param in output_to_extract: - extracted_value[param] = extract_grid_output(cases_data, param) # Extract output values - - solidification_times = extract_solidification_time(cases_data) # Extract the solidification time - extracted_value['solidification_time'] = solidification_times # Add solidification time to the extracted_values - - save_grid_data_to_csv(grid_name, cases_data, grid_parameters, case_init_param, - extracted_value, solidification_times, data_dir) # Save all the extracted data to a CSV file - print(f'--> CSV file written to: {csv_file}') - - save_error_running_cases( - grid_name=grid_name, - cases_data=cases_data, - grid_parameters=grid_parameters, - case_params=case_init_param, - extracted_value=extracted_value, - output_to_extract=output_to_extract, - output_dir=data_dir) - else: - print('-----------------------------------------------------------') - print(f'Step 1 : Skipped (CSV already exists at {csv_file})') - print('-----------------------------------------------------------') - - - # --------------------------------------------- - # STEP 2: Load data from CSV and make plots - # --------------------------------------------- - - print('-----------------------------------------------------------') - print(f'Step 2 : Loading data and plotting for grid {grid_name} ...') - print('-----------------------------------------------------------') - - - df, grid_params, extracted_outputs = load_extracted_data(data_dir, grid_name) # Load the data - grouped_data = group_output_by_parameter(df, grid_params, extracted_outputs) # Group extracted outputs by grid parameters - - # Histogram of grid statuses - plot_grid_status(df, plots_path, grid_name) # Plot the grid statuses in an histogram - - # Single ECDF Plots - # The user needs to comment the parameters he didn't used in the grid/ add the ones non-listed here. Same for the outputs. - param_settings_single = { - "orbit.semimajoraxis": {"label": "Semi-major axis [AU]", "colormap": cm.plasma, "log_scale": False}, - "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, - "escape.zephyrus.efficiency": {"label": r"Escape efficiency factor $\epsilon$", "colormap": cm.spring, "log_scale": False}, - "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2)$ [IW]", "colormap": cm.coolwarm, "log_scale": False}, - "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, - "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, - "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": False}, - } - output_settings_single = { - 'esc_rate_total': {"label": "Total escape rate [kg/s]", "log_scale": True, "scale": 1.0}, - 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, - 'P_surf': {"label": "Surface pressure [bar]", "log_scale": True, "scale": 1.0}, - 'atm_kg_per_mol': {"label": "Mean molecular weight (MMW) [g/mol]", "log_scale": False, "scale": 1000.0}, - 'solidification_time': {"label": "Solidification time [yr]", "log_scale": True, "scale": 1.0}, - 'T_surf': {"label": r"T$_{surf}$ [K]", "log_scale": False, "scale": 1.0}, - 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24}, - 'H_kg_atm': {"label": r"[H$_{atm}$] [kg]", "log_scale": True, "scale": 1.0} - } - ecdf_single_plots(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_single, output_settings=output_settings_single, plots_path=plots_path) - - # ECDF Grid Plot - # The user needs to comment the parameters he didn't used in the grid/ add the ones non-listed here. Same for the outputs. - param_settings_grid = { - "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, - "orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.plasma, "log_scale": False}, - "escape.zephyrus.efficiency": {"label": r"$\epsilon$", "colormap": cm.spring, "log_scale": False}, - "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, - "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2 / IW)$", "colormap": cm.coolwarm, "log_scale": False}, - "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, - "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.winter, "log_scale": False}} - output_settings_grid = { - 'solidification_time': {"label": "Solidification [yr]", "log_scale": True, "scale": 1.0}, - 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, - 'P_surf': {"label": "Surface pressure [bar]", "log_scale": True, "scale": 1.0}, - 'esc_rate_total': {"label": "Escape rate [kg/s]", "log_scale": True, "scale": 1.0}, - 'atm_kg_per_mol': {"label": "MMW [g/mol]", "log_scale": False, "scale": 1000.0}, - 'T_surf': {"label": r"T$_{surf}$ [K]", "log_scale": False, "scale": 1.0}, - 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24}, - 'H_kg_atm': {"label": r"[H$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, - #'O_kg_atm': {"label": r"[O$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, - #'C_kg_atm': {"label": r"[C$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, - #'N_kg_atm': {"label": r"[N$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, - #'S_kg_atm': {"label": r"[S$_{atm}$] [kg]", "log_scale": True, "scale": 1.0} - } - ecdf_grid_plot(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_grid, output_settings=output_settings_grid, plots_path=plots_path) - - print('-----------------------------------------------------------') - print(f'Plots saved in {plots_path}') - print(f'Post-processing of grid {grid_name} completed successfully!') - print('-----------------------------------------------------------') - print('If you want to change the parameters to post-process the grid, please edit the code in PROTEUS/src/proteus/grid/post_processing_grid.py') - print('-----------------------------------------------------------') \ No newline at end of file diff --git a/src/proteus/grid/run_grid_analysis.py b/src/proteus/grid/run_grid_analysis.py index 5d56ce93e..fc0f68eac 100644 --- a/src/proteus/grid/run_grid_analysis.py +++ b/src/proteus/grid/run_grid_analysis.py @@ -5,9 +5,160 @@ # The users need to specify the path to the grid directory and the grid name. (see the example below) # He also needs to specify the output columns to extract from the 'runtime_helpfile.csv' of each case and -# update the related plotting variables accordingly. This can be done in the `run_grid_analyze` function. (see src/proteus/grid/postprocess_grid.py) +# update the related plotting variables accordingly. This can be done in the `run_grid_analyze` function (see below). -from post_processing_grid import run_grid_analyze +from proteus.grid.post_processing_grid import * + +def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True): + """ + Run the post-processing of a PROTEUS grid, extracting simulation data in a CSV file and generating plots. + + Parameters + ---------- + path_to_grid : str + Path to the directory containing the grid folder. + + grid_name : str + Name of the grid folder to process. + + update_csv : bool, optional + If True, the CSV file will be updated or created. If False, it will skip the CSV extraction step if the file already exists. + """ + + # ------------------------------------------------------------ + # 1) Build all the folder/filename strings + # ------------------------------------------------------------ + grid_path = os.path.join(path_to_grid, grid_name) + os.sep + postprocess_path = os.path.join(grid_path, "post_processing_grid") + os.sep + data_dir = os.path.join(postprocess_path, "extracted_data") + os.sep + os.makedirs(data_dir, exist_ok=True) + + + plots_path = os.path.join(postprocess_path, "plots_grid") + os.sep + plot_dir_exists(plots_path) + + csv_file = os.path.join(data_dir, f"{grid_name}_extracted_data.csv") + + # ------------------------------------------------------------ + # 2) Define which outputs to pull from each case's runtime_helpfile.csv + # ------------------------------------------------------------ + # User choose the output to extract from 'runtime_helpfile.csv' of each case (always the [-1] element of the column). + # For the units, check the file src/proteus/utils/coupler.py, lines 348-400 (keys) + + output_to_extract = ['esc_rate_total','Phi_global','P_surf','T_surf','M_planet','atm_kg_per_mol', + 'H_kg_atm','O_kg_atm','C_kg_atm','N_kg_atm','S_kg_atm', 'Si_kg_atm', 'Mg_kg_atm', 'Fe_kg_atm', 'Na_kg_atm', + 'H2O_kg_atm','CO2_kg_atm', 'O2_kg_atm', 'H2_kg_atm', 'CH4_kg_atm', 'CO_kg_atm', 'N2_kg_atm', 'NH3_kg_atm', + 'S2_kg_atm', 'SO2_kg_atm', 'H2S_kg_atm', 'SiO_kg_atm','SiO2_kg_atm', 'MgO_kg_atm', 'FeO2_kg_atm', 'runtime'] + + # ------------------------------------------------------------ + # STEP 1: CSV extraction (only if update_csv=True or CSV missing) + # ------------------------------------------------------------ + + if update_csv or not os.path.isfile(csv_file): + + print('-----------------------------------------------------------') + print(f'Step 1 : Post-processing the grid {grid_name} ...') + print('-----------------------------------------------------------') + + extracted_value = {} # Initialize the dictionary to store extracted values + + cases_data = load_grid_cases(grid_path) # Load all simulation cases + grid_parameters, case_init_param = get_grid_parameters(grid_path) # Extract grid parameters + + for param in output_to_extract: + extracted_value[param] = extract_grid_output(cases_data, param) # Extract output values + + solidification_times = extract_solidification_time(cases_data) # Extract the solidification time + extracted_value['solidification_time'] = solidification_times # Add solidification time to the extracted_values + + save_grid_data_to_csv(grid_name, cases_data, grid_parameters, case_init_param, + extracted_value, solidification_times, data_dir) # Save all the extracted data to a CSV file + print(f'--> CSV file written to: {csv_file}') + + save_error_running_cases( + grid_name=grid_name, + cases_data=cases_data, + grid_parameters=grid_parameters, + case_params=case_init_param, + extracted_value=extracted_value, + output_to_extract=output_to_extract, + output_dir=data_dir) + else: + print('-----------------------------------------------------------') + print(f'Step 1 : Skipped (CSV already exists at {csv_file})') + print('-----------------------------------------------------------') + + + # --------------------------------------------- + # STEP 2: Load data from CSV and make plots + # --------------------------------------------- + + print('-----------------------------------------------------------') + print(f'Step 2 : Loading data and plotting for grid {grid_name} ...') + print('-----------------------------------------------------------') + + + df, grid_params, extracted_outputs = load_extracted_data(data_dir, grid_name) # Load the data + grouped_data = group_output_by_parameter(df, grid_params, extracted_outputs) # Group extracted outputs by grid parameters + + # Histogram of grid statuses + plot_grid_status(df, plots_path, grid_name) # Plot the grid statuses in an histogram + + # Single ECDF Plots + # The user needs to comment the parameters he didn't used in the grid/ add the ones non-listed here. Same for the outputs. + param_settings_single = { + "orbit.semimajoraxis": {"label": "Semi-major axis [AU]", "colormap": cm.plasma, "log_scale": False}, + "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, + "escape.zephyrus.efficiency": {"label": r"Escape efficiency factor $\epsilon$", "colormap": cm.spring, "log_scale": False}, + "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2)$ [IW]", "colormap": cm.coolwarm, "log_scale": False}, + "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, + "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, + "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": False}, + } + output_settings_single = { + 'esc_rate_total': {"label": "Total escape rate [kg/s]", "log_scale": True, "scale": 1.0}, + 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, + 'P_surf': {"label": "Surface pressure [bar]", "log_scale": True, "scale": 1.0}, + 'atm_kg_per_mol': {"label": "Mean molecular weight (MMW) [g/mol]", "log_scale": False, "scale": 1000.0}, + 'solidification_time': {"label": "Solidification time [yr]", "log_scale": True, "scale": 1.0}, + 'T_surf': {"label": r"T$_{surf}$ [K]", "log_scale": False, "scale": 1.0}, + 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24}, + 'H_kg_atm': {"label": r"[H$_{atm}$] [kg]", "log_scale": True, "scale": 1.0} + } + ecdf_single_plots(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_single, output_settings=output_settings_single, plots_path=plots_path) + + # ECDF Grid Plot + # The user needs to comment the parameters he didn't used in the grid/ add the ones non-listed here. Same for the outputs. + param_settings_grid = { + "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, + "orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.plasma, "log_scale": False}, + "escape.zephyrus.efficiency": {"label": r"$\epsilon$", "colormap": cm.spring, "log_scale": False}, + "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, + "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2 / IW)$", "colormap": cm.coolwarm, "log_scale": False}, + "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, + "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.winter, "log_scale": False}} + output_settings_grid = { + 'solidification_time': {"label": "Solidification [yr]", "log_scale": True, "scale": 1.0}, + 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, + 'P_surf': {"label": "Surface pressure [bar]", "log_scale": True, "scale": 1.0}, + 'esc_rate_total': {"label": "Escape rate [kg/s]", "log_scale": True, "scale": 1.0}, + 'atm_kg_per_mol': {"label": "MMW [g/mol]", "log_scale": False, "scale": 1000.0}, + 'T_surf': {"label": r"T$_{surf}$ [K]", "log_scale": False, "scale": 1.0}, + 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24}, + 'H_kg_atm': {"label": r"[H$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, + #'O_kg_atm': {"label": r"[O$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, + #'C_kg_atm': {"label": r"[C$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, + #'N_kg_atm': {"label": r"[N$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, + #'S_kg_atm': {"label": r"[S$_{atm}$] [kg]", "log_scale": True, "scale": 1.0} + } + ecdf_grid_plot(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_grid, output_settings=output_settings_grid, plots_path=plots_path) + + print('-----------------------------------------------------------') + print(f'Plots saved in {plots_path}') + print(f'Post-processing of grid {grid_name} completed successfully!') + print('-----------------------------------------------------------') + print('If you want to change the parameters to post-process the grid, please edit the code in PROTEUS/src/proteus/grid/post_processing_grid.py') + print('-----------------------------------------------------------') if __name__ == "__main__": run_grid_analyze(path_to_grid="/home2/p315557/PROTEUS/output/scratch/", From f8eb6293eb5b1e772f5c23a54ed1b7e4f2f114f3 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Fri, 6 Jun 2025 15:32:32 +0200 Subject: [PATCH 041/105] update_docs --- docs/usage.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/usage.md b/docs/usage.md index c0820123d..8da4789eb 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -151,7 +151,7 @@ PROTEUS will perform this step automatically if enabled in the configuration fil ## Postprocessing of PROTEUS simulation grids -Results from a PROTEUS grid can be post-processed using the `proteus grid_analyze` command. +Results from a PROTEUS grid can be post-processed using the `proteus grid-analyze` command. This will generate a CSV file with extracted data (`your_grid_name_extracted_data.csv`) from the grid results and ECDF plots (see [seaborn.ecdfplot doc](https://seaborn.pydata.org/generated/seaborn.ecdfplot.html)). @@ -196,12 +196,12 @@ To get more information about this command, run : proteus grid-analyze --help ``` -*Note to the user : update `output_to_extract` for your grid* +*Note to the user : update `output_to_extract` and other plotting parameters for your grid* 1. The user can choose the output to extract for each simulations at the last time-step (from the `runtime_helpfile.csv` file of each cases) like 'esc_rate_total','Phi_global','P_surf','T_surf','M_planet'... -To do so, the user should go to `PROTEUS/src/proteus/grid/post_processing_grid.py` and modify the variable `output_to_extract` within the `run_grid_analyze` function. +To do so, the user should go to `PROTEUS/src/proteus/grid/run_grid_analysis.py` and modify the variable `output_to_extract` within the `run_grid_analyze` function. -2. In the Step 2 of the same function, the user should also modify accordingly the `param_settings_single` and `output_settings_single` object for generating single plots (same for the grid plot). For this, the user should add the input parameters and output extracted from your grid if this is not already present in the script and comment the one useless for your grid. +2. In the Step 2 of the same function, the user should also modify accordingly the `param_settings_single` and `output_settings_single` object for generating single plots (same for the grid plot with `param_settings_grid` and `output_settings_grid`). For this, the user should add the input parameters and output extracted from the grid, if this is not already present in the script and comment the one useless for the grid. ## Archiving output files From ed410f18f88d6597c0c229b125ee753979259dc2 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Fri, 6 Jun 2025 16:13:45 +0200 Subject: [PATCH 042/105] try to fix the test --- docs/usage.md | 20 ++++++++++---------- src/proteus/cli.py | 3 ++- src/proteus/grid/post_processing_grid.py | 2 +- src/proteus/grid/run_grid_analysis.py | 6 +----- 4 files changed, 14 insertions(+), 17 deletions(-) diff --git a/docs/usage.md b/docs/usage.md index 8da4789eb..f6db0906c 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -149,12 +149,12 @@ proteus observe -c [cfgfile] PROTEUS will perform this step automatically if enabled in the configuration file. -## Postprocessing of PROTEUS simulation grids +## Postprocessing of PROTEUS simulation grids -Results from a PROTEUS grid can be post-processed using the `proteus grid-analyze` command. +Results from a PROTEUS grid can be post-processed using the `proteus grid-analyze` command. -This will generate a CSV file with extracted data (`your_grid_name_extracted_data.csv`) from the grid results and ECDF plots -(see [seaborn.ecdfplot doc](https://seaborn.pydata.org/generated/seaborn.ecdfplot.html)). +This will generate a CSV file with extracted data (`your_grid_name_extracted_data.csv`) from the grid results and ECDF plots +(see [seaborn.ecdfplot doc](https://seaborn.pydata.org/generated/seaborn.ecdfplot.html)). Here is the structure of the generated `post_processing_grid` folder inside the grid directory : ```console @@ -167,13 +167,13 @@ your_grid_name/ ├─manager.log <---- the log file of the grid ├─slurm_dispatch.sh <---- if use_slurm=True in `grid_proteus.py`, this is the slurm file to submit with `sbatch` command ├─post_processing_grid <---- this folder contains all the output from this script - │ └─extracted_data <---- folder with the generated CSV file + │ └─extracted_data <---- folder with the generated CSV file │ └─your_grid_name_extracted_data.csv <---- CSV file containing the tested input parameters and extracted output from the grid │ └─plots_grid <---- folder with the generated plots │ ├─ecdf_grid_plot.png <---- Grid plot to visualize all tested input parameters vs extracted outputs using ECDF distribution │ ├─grid_statuses_summary.png <---- Summary plot of statuses for all cases of the grid │ └─single_plots_ecdf <---- folder with all the single ECDF plots corresponding to all the panels from the grid plot - │ ├─ecdf_[extracted_output]_per_[input_param].png <---- Single plot using ECDF distribution to visualize one tested input parameter vs one extracted output for all cases + │ ├─ecdf_[extracted_output]_per_[input_param].png <---- Single plot using ECDF distribution to visualize one tested input parameter vs one extracted output for all cases │ └─... ``` @@ -181,7 +181,7 @@ your_grid_name/ To post-processed the grid and generate ECDF plots for further analysis, use the proteus command line interface: ```console -proteus grid-analyze /path/to/grid/ [grid_name] +proteus grid-analyze /path/to/grid/ [grid_name] ``` The user can also specify to update the CSV file with new output to extract for instance by adding the `--update-csv` flag, using : @@ -198,10 +198,10 @@ proteus grid-analyze --help *Note to the user : update `output_to_extract` and other plotting parameters for your grid* -1. The user can choose the output to extract for each simulations at the last time-step (from the `runtime_helpfile.csv` file of each cases) like 'esc_rate_total','Phi_global','P_surf','T_surf','M_planet'... -To do so, the user should go to `PROTEUS/src/proteus/grid/run_grid_analysis.py` and modify the variable `output_to_extract` within the `run_grid_analyze` function. +1. The user can choose the output to extract for each simulations at the last time-step (from the `runtime_helpfile.csv` file of each cases) like 'esc_rate_total','Phi_global','P_surf','T_surf','M_planet'... +To do so, the user should go to `PROTEUS/src/proteus/grid/run_grid_analysis.py` and modify the variable `output_to_extract` within the `run_grid_analyze` function. -2. In the Step 2 of the same function, the user should also modify accordingly the `param_settings_single` and `output_settings_single` object for generating single plots (same for the grid plot with `param_settings_grid` and `output_settings_grid`). For this, the user should add the input parameters and output extracted from the grid, if this is not already present in the script and comment the one useless for the grid. +2. In the Step 2 of the same function, the user should also modify accordingly the `param_settings_single` and `output_settings_single` object for generating single plots (same for the grid plot with `param_settings_grid` and `output_settings_grid`).For this, the user should add the input parameters and output extracted from the grid, if this is not already present in the script and comment the one useless for the grid. ## Archiving output files diff --git a/src/proteus/cli.py b/src/proteus/cli.py index a726de93d..3de5fddae 100644 --- a/src/proteus/cli.py +++ b/src/proteus/cli.py @@ -252,4 +252,5 @@ def grid_analyze(grid_path: str, grid_name: str, update_csv: bool): cli.add_command(grid_analyze) if __name__ == '__main__': - cli() \ No newline at end of file + cli() + \ No newline at end of file diff --git a/src/proteus/grid/post_processing_grid.py b/src/proteus/grid/post_processing_grid.py index 0909946f3..5d00e78e3 100644 --- a/src/proteus/grid/post_processing_grid.py +++ b/src/proteus/grid/post_processing_grid.py @@ -950,4 +950,4 @@ def ecdf_grid_plot(grid_params: dict, grouped_data: dict, param_settings: dict, plt.close(fig) print(f"Grid ECDF plot saved at {out_path}") - + \ No newline at end of file diff --git a/src/proteus/grid/run_grid_analysis.py b/src/proteus/grid/run_grid_analysis.py index fc0f68eac..5c5444518 100644 --- a/src/proteus/grid/run_grid_analysis.py +++ b/src/proteus/grid/run_grid_analysis.py @@ -159,8 +159,4 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) print('-----------------------------------------------------------') print('If you want to change the parameters to post-process the grid, please edit the code in PROTEUS/src/proteus/grid/post_processing_grid.py') print('-----------------------------------------------------------') - -if __name__ == "__main__": - run_grid_analyze(path_to_grid="/home2/p315557/PROTEUS/output/scratch/", - grid_name="escape_grid_habrok_7_params_1Msun", - update_csv=True) + \ No newline at end of file From 2f076d4a0307b7bdd6ba4879e57cb48be5145d95 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Fri, 6 Jun 2025 16:16:52 +0200 Subject: [PATCH 043/105] add pkl to gitignore --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 05cde4c52..ddb8fe622 100644 --- a/.gitignore +++ b/.gitignore @@ -243,3 +243,6 @@ cython_debug/ # Created automatically during PR #351 Manifest.toml Project.toml + +# pickle files +*.pkl \ No newline at end of file From 1c698f92d79873eea066d379f8da15d45dcafc2d Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Fri, 6 Jun 2025 16:19:46 +0200 Subject: [PATCH 044/105] add seaborn as a dependency in pyproject.toml --- pyproject.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 0bad9f0e4..f2c7b3ed8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,7 +58,8 @@ dependencies = [ "pandas", "scipy", "sympy", - "astropy" + "astropy", + "seaborn" ] [project.urls] From fb2a90707dc157886f1b29680918b1a5ecf44ed9 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Fri, 6 Jun 2025 16:26:16 +0200 Subject: [PATCH 045/105] Fix pre-commit hook issues: trailing whitespace, end-of-file, ruff --- .gitignore | 2 +- docs/usage.md | 6 ++--- pyproject.toml | 2 +- src/proteus/cli.py | 9 +++---- src/proteus/grid/post_processing_grid.py | 7 +++--- src/proteus/grid/run_grid_analysis.py | 31 ++++++++++++------------ 6 files changed, 27 insertions(+), 30 deletions(-) diff --git a/.gitignore b/.gitignore index ddb8fe622..06a64c21b 100644 --- a/.gitignore +++ b/.gitignore @@ -245,4 +245,4 @@ Manifest.toml Project.toml # pickle files -*.pkl \ No newline at end of file +*.pkl diff --git a/docs/usage.md b/docs/usage.md index f6db0906c..87f3af064 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -160,8 +160,8 @@ Here is the structure of the generated `post_processing_grid` folder inside the ```console your_grid_name/ ├─case_00000 <---- case of your grid (for the structure refer to the tree from the [## Output and results] section) - ├─case_00001 - ├─... + ├─case_00001 + ├─... ├─cfgs <---- folder with all the `input.toml` files for all cases ├─logs <---- folder with all the `proteus_case_number.log` files for all cases ├─manager.log <---- the log file of the grid @@ -177,7 +177,7 @@ your_grid_name/ │ └─... ``` - + To post-processed the grid and generate ECDF plots for further analysis, use the proteus command line interface: ```console diff --git a/pyproject.toml b/pyproject.toml index f2c7b3ed8..ce5ae2bb5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,7 +58,7 @@ dependencies = [ "pandas", "scipy", "sympy", - "astropy", + "astropy", "seaborn" ] diff --git a/src/proteus/cli.py b/src/proteus/cli.py index 3de5fddae..14c6b8cb1 100644 --- a/src/proteus/cli.py +++ b/src/proteus/cli.py @@ -233,17 +233,17 @@ def observe(config_path: Path): @click.command() @click.argument("grid_path", type=str, default=None, required=True) @click.argument("grid_name", type=str, default=None, required=True) -@click.option("--update-csv", is_flag=True, help="Update the CSV file containing extracted data.") # If the user wants to update the CSV file, he needs to specify it in the command line with this flag. Otherwise, set to False by default. +@click.option("--update-csv", is_flag=True, help="Update the CSV file containing extracted data.") # If the user wants to update the CSV file, he needs to specify it in the command line with this flag. Otherwise, set to False by default. def grid_analyze(grid_path: str, grid_name: str, update_csv: bool): - """Run grid analysis on PROTEUS grid output files + """Run grid analysis on PROTEUS grid output files GRID_PATH : Path to the output directory containing the PROTEUS grid files. (Do not include the grid name here) GRID_NAME : Name of the grid to analyze. - Example of usage : - + Example of usage : + proteus grid_analyze /path/to/grid/ grid_name --update-csv """ from proteus.grid.run_grid_analysis import run_grid_analyze @@ -253,4 +253,3 @@ def grid_analyze(grid_path: str, grid_name: str, update_csv: bool): if __name__ == '__main__': cli() - \ No newline at end of file diff --git a/src/proteus/grid/post_processing_grid.py b/src/proteus/grid/post_processing_grid.py index 5d00e78e3..c931e90ee 100644 --- a/src/proteus/grid/post_processing_grid.py +++ b/src/proteus/grid/post_processing_grid.py @@ -81,7 +81,7 @@ def load_grid_cases(grid_dir: Path): else: print(f"WARNING : Missing status file in {case.name}") - # THIS IS ONLY FOR MY CURRENT GRID ON HABROK + # THIS IS ONLY FOR MY CURRENT GRID ON HABROK # if status in ('Unknown', 'Empty'): # status = 'Disk quota exceeded' @@ -912,7 +912,7 @@ def ecdf_grid_plot(grid_params: dict, grouped_data: dict, param_settings: dict, # Configure y-axis (shared label added later) if j == 0: - ax.set_ylabel("") + ax.set_ylabel("") ticks = [0.0, 0.5, 1.0] ax.set_yticks(ticks) ax.tick_params(axis='y', labelsize=22) @@ -920,7 +920,7 @@ def ecdf_grid_plot(grid_params: dict, grouped_data: dict, param_settings: dict, ax.set_ylabel("") ax.set_yticks(ticks) ax.tick_params(axis='y', labelleft=False) - + ax.grid(alpha=0.4) # After plotting all outputs for this parameter (row), add colorbar or legend @@ -950,4 +950,3 @@ def ecdf_grid_plot(grid_params: dict, grouped_data: dict, param_settings: dict, plt.close(fig) print(f"Grid ECDF plot saved at {out_path}") - \ No newline at end of file diff --git a/src/proteus/grid/run_grid_analysis.py b/src/proteus/grid/run_grid_analysis.py index 5c5444518..b797f0d97 100644 --- a/src/proteus/grid/run_grid_analysis.py +++ b/src/proteus/grid/run_grid_analysis.py @@ -4,7 +4,7 @@ # ECDF single plots and a big grid plot for the input parameters vs extracted outputs. # The users need to specify the path to the grid directory and the grid name. (see the example below) -# He also needs to specify the output columns to extract from the 'runtime_helpfile.csv' of each case and +# He also needs to specify the output columns to extract from the 'runtime_helpfile.csv' of each case and # update the related plotting variables accordingly. This can be done in the `run_grid_analyze` function (see below). from proteus.grid.post_processing_grid import * @@ -12,15 +12,15 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True): """ Run the post-processing of a PROTEUS grid, extracting simulation data in a CSV file and generating plots. - + Parameters ---------- - path_to_grid : str + path_to_grid : str Path to the directory containing the grid folder. grid_name : str Name of the grid folder to process. - + update_csv : bool, optional If True, the CSV file will be updated or created. If False, it will skip the CSV extraction step if the file already exists. """ @@ -32,7 +32,7 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) postprocess_path = os.path.join(grid_path, "post_processing_grid") + os.sep data_dir = os.path.join(postprocess_path, "extracted_data") + os.sep os.makedirs(data_dir, exist_ok=True) - + plots_path = os.path.join(postprocess_path, "plots_grid") + os.sep plot_dir_exists(plots_path) @@ -42,14 +42,14 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) # ------------------------------------------------------------ # 2) Define which outputs to pull from each case's runtime_helpfile.csv # ------------------------------------------------------------ - # User choose the output to extract from 'runtime_helpfile.csv' of each case (always the [-1] element of the column). + # User choose the output to extract from 'runtime_helpfile.csv' of each case (always the [-1] element of the column). # For the units, check the file src/proteus/utils/coupler.py, lines 348-400 (keys) output_to_extract = ['esc_rate_total','Phi_global','P_surf','T_surf','M_planet','atm_kg_per_mol', 'H_kg_atm','O_kg_atm','C_kg_atm','N_kg_atm','S_kg_atm', 'Si_kg_atm', 'Mg_kg_atm', 'Fe_kg_atm', 'Na_kg_atm', - 'H2O_kg_atm','CO2_kg_atm', 'O2_kg_atm', 'H2_kg_atm', 'CH4_kg_atm', 'CO_kg_atm', 'N2_kg_atm', 'NH3_kg_atm', - 'S2_kg_atm', 'SO2_kg_atm', 'H2S_kg_atm', 'SiO_kg_atm','SiO2_kg_atm', 'MgO_kg_atm', 'FeO2_kg_atm', 'runtime'] - + 'H2O_kg_atm','CO2_kg_atm', 'O2_kg_atm', 'H2_kg_atm', 'CH4_kg_atm', 'CO_kg_atm', 'N2_kg_atm', 'NH3_kg_atm', + 'S2_kg_atm', 'SO2_kg_atm', 'H2S_kg_atm', 'SiO_kg_atm','SiO2_kg_atm', 'MgO_kg_atm', 'FeO2_kg_atm', 'runtime'] + # ------------------------------------------------------------ # STEP 1: CSV extraction (only if update_csv=True or CSV missing) # ------------------------------------------------------------ @@ -57,7 +57,7 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) if update_csv or not os.path.isfile(csv_file): print('-----------------------------------------------------------') - print(f'Step 1 : Post-processing the grid {grid_name} ...') + print(f'Step 1 : Post-processing the grid {grid_name} ...') print('-----------------------------------------------------------') extracted_value = {} # Initialize the dictionary to store extracted values @@ -87,20 +87,20 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) print('-----------------------------------------------------------') print(f'Step 1 : Skipped (CSV already exists at {csv_file})') print('-----------------------------------------------------------') - + # --------------------------------------------- # STEP 2: Load data from CSV and make plots # --------------------------------------------- print('-----------------------------------------------------------') - print(f'Step 2 : Loading data and plotting for grid {grid_name} ...') + print(f'Step 2 : Loading data and plotting for grid {grid_name} ...') print('-----------------------------------------------------------') - + df, grid_params, extracted_outputs = load_extracted_data(data_dir, grid_name) # Load the data grouped_data = group_output_by_parameter(df, grid_params, extracted_outputs) # Group extracted outputs by grid parameters - + # Histogram of grid statuses plot_grid_status(df, plots_path, grid_name) # Plot the grid statuses in an histogram @@ -113,7 +113,7 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2)$ [IW]", "colormap": cm.coolwarm, "log_scale": False}, "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, - "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": False}, + "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": False}, } output_settings_single = { 'esc_rate_total': {"label": "Total escape rate [kg/s]", "log_scale": True, "scale": 1.0}, @@ -159,4 +159,3 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) print('-----------------------------------------------------------') print('If you want to change the parameters to post-process the grid, please edit the code in PROTEUS/src/proteus/grid/post_processing_grid.py') print('-----------------------------------------------------------') - \ No newline at end of file From c0db852b7711ca0f4c769be1525eeb827133d995 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Fri, 6 Jun 2025 16:34:49 +0200 Subject: [PATCH 046/105] try to fix cide style to pass the tests --- src/proteus/grid/post_processing_grid.py | 25 ++++++++++++------------ src/proteus/grid/run_grid_analysis.py | 2 ++ 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/src/proteus/grid/post_processing_grid.py b/src/proteus/grid/post_processing_grid.py index c931e90ee..a03c319db 100644 --- a/src/proteus/grid/post_processing_grid.py +++ b/src/proteus/grid/post_processing_grid.py @@ -1,19 +1,20 @@ -import toml -import re +from __future__ import annotations + import ast import csv -from typing import Tuple, Dict, List, Any -from pathlib import Path import os -import pandas as pd -import numpy as np +import re from io import StringIO -import seaborn as sns -import matplotlib.pyplot as plt +from pathlib import Path +from typing import Any, Dict, List + import matplotlib as mpl -import matplotlib.cm as cm -import matplotlib.colors as mcolors -import math +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import seaborn as sns +import toml + ##### Functions for extracting grid data ##### @@ -708,7 +709,7 @@ def plot_grid_status(cases_data, plot_dir: Path, grid_name: str, status_colors: plt.savefig(output_path, dpi=300) plt.close() - print(f"Summary plot of grid statuses is available") + print("Summary plot of grid statuses is available") def ecdf_single_plots(grid_params: dict, grouped_data: dict, param_settings: dict, output_settings: dict, plots_path: str): """ diff --git a/src/proteus/grid/run_grid_analysis.py b/src/proteus/grid/run_grid_analysis.py index b797f0d97..6250501a4 100644 --- a/src/proteus/grid/run_grid_analysis.py +++ b/src/proteus/grid/run_grid_analysis.py @@ -6,9 +6,11 @@ # The users need to specify the path to the grid directory and the grid name. (see the example below) # He also needs to specify the output columns to extract from the 'runtime_helpfile.csv' of each case and # update the related plotting variables accordingly. This can be done in the `run_grid_analyze` function (see below). +from __future__ import annotations from proteus.grid.post_processing_grid import * + def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True): """ Run the post-processing of a PROTEUS grid, extracting simulation data in a CSV file and generating plots. From 2eca57ed81baa5988c640053aaf279b828395ccd Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 11 Jun 2025 17:53:46 +0200 Subject: [PATCH 047/105] fix import cm --- src/proteus/grid/run_grid_analysis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proteus/grid/run_grid_analysis.py b/src/proteus/grid/run_grid_analysis.py index 6250501a4..f227f7be3 100644 --- a/src/proteus/grid/run_grid_analysis.py +++ b/src/proteus/grid/run_grid_analysis.py @@ -9,7 +9,7 @@ from __future__ import annotations from proteus.grid.post_processing_grid import * - +import matplotlib.cm as cm def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True): """ From 2280407c14621d36f38447d5d51008e1ecf4d2a2 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 20 Aug 2025 14:58:23 +0200 Subject: [PATCH 048/105] input file to run toi561b grids + addition of tested parameters for the plots --- input/planets/toi561b.toml | 32 ++++++++++++++------------- src/proteus/grid/run_grid_analysis.py | 19 ++++++++++------ 2 files changed, 29 insertions(+), 22 deletions(-) diff --git a/input/planets/toi561b.toml b/input/planets/toi561b.toml index c88f8ec16..029ba6d98 100644 --- a/input/planets/toi561b.toml +++ b/input/planets/toi561b.toml @@ -13,14 +13,16 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" plot_mod = 5 # Plotting frequency, 0: wait until completion | n: every n iterations plot_fmt = "png" # Plotting image file format, "png" or "pdf" recommended write_mod = 1 # Write CSV frequency, 0: wait until completion | n: every n iterations + archive_mod = 0 # Archive frequency, 0: wait until completion | n: every n iterations | none: do not archive + remove_sf = true # time-stepping [params.dt] minimum = 3e2 # yr, minimum time-step minimum_rel = 1e-5 # relative minimum time-step [dimensionless] - maximum = 1e11 # yr, maximum time-step + maximum = 1e10 # yr, maximum time-step initial = 1e3 # yr, inital step size - starspec = 3e6 # yr, interval to re-calculate the stellar spectrum + starspec = 1e9 # yr, interval to re-calculate the stellar spectrum starinst = 1e2 # yr, interval to re-calculate the instellation method = "adaptive" # proportional | adaptive | maximum @@ -72,11 +74,11 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" # Physical parameters mass = 0.806 # from Lacedelli et al., 2022 [M_sun] - age_ini = 0.05 # Gyr, model initialisation/start age + age_ini = 0.1 # Gyr, model initialisation/start age module = "mors" [star.mors] - rot_pcntle = 50.0 # rotation percentile + rot_pcntle = 20.0 # rotation percentile rot_period = 'none' # rotation period [days] tracks = "spada" # evolution tracks: spada | baraffe age_now = 11 # [Gyr] from Lacedelli et al., 2022 @@ -112,13 +114,13 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" # Atmosphere - physics table [atmos_clim] - prevent_warming = true # do not allow the planet to heat up + prevent_warming = false # do not allow the planet to heat up surface_d = 0.01 # m, conductive skin thickness surface_k = 2.0 # W m-1 K-1, conductive skin thermal conductivity cloud_enabled = false # enable water cloud radiative effects cloud_alpha = 0.0 # condensate retention fraction (1 -> fully retained) surf_state = "fixed" # surface scheme: "mixed_layer" | "fixed" | "skin" - surf_greyalbedo = 0.2 # surface grey albedo + surf_greyalbedo = 0.1 # surface grey albedo albedo_pl = 0.0 # Enforced Bond albedo (do not use with `rayleigh = true`) from Lacedelli et al. 2022 rayleigh = true # Enable rayleigh scattering tmp_minimum = 0.5 # temperature floor on solver @@ -130,7 +132,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" p_top = 1.0e-5 # bar, top of atmosphere grid pressure spectral_group = "Honeyside" # which gas opacities to include spectral_bands = "256" # how many spectral bands? - num_levels = 60 # Number of atmospheric grid levels + num_levels = 50 # Number of atmospheric grid levels chemistry = "none" # "none" | "eq" surf_material = "greybody" # surface material file for scattering solve_energy = true # solve for energy-conserving atmosphere profile @@ -161,7 +163,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [escape.zephyrus] Pxuv = 1e-2 # Pressure at which XUV radiation become opaque in the planetary atmosphere [bar] - efficiency = 0.3 # Escape efficiency factor + efficiency = 0.1 # Escape efficiency factor tidal = false # Tidal contribution enabled [escape.dummy] @@ -237,17 +239,17 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" # Set initial volatile inventory by planetary element abundances [delivery.elements] - # H_oceans = 0.0 # Hydrogen inventory in units of equivalent Earth oceans - H_ppmw = 109.0 # Hydrogen inventory in ppmw relative to mantle mass + H_oceans = 1.0 # Hydrogen inventory in units of equivalent Earth oceans + #H_ppmw = 109.0 # Hydrogen inventory in ppmw relative to mantle mass - # CH_ratio = 1.0 # C/H mass ratio in mantle/atmosphere system - C_ppmw = 109.0 # Carbon inventory in ppmw relative to mantle mass + CH_ratio = 1.0 # C/H mass ratio in mantle/atmosphere system + #C_ppmw = 109.0 # Carbon inventory in ppmw relative to mantle mass # NH_ratio = 0.018 # N/H mass ratio in mantle/atmosphere system N_ppmw = 20.1 # Nitrogen inventory in ppmw relative to mantle mass - # SH_ratio = 2.16 # S/H mass ratio in mantle/atmosphere system - S_ppmw = 235.0 # Sulfur inventory in ppmw relative to mantle mass + SH_ratio = 2.16 # S/H mass ratio in mantle/atmosphere system + #S_ppmw = 235.0 # Sulfur inventory in ppmw relative to mantle mass # Set initial volatile inventory by partial pressures in atmosphere [delivery.volatiles] @@ -266,7 +268,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [atmos_chem] module = "vulcan" # Atmospheric chemistry module - when = "manually" # When to run chemistry (manually, offline, online) + when = "offline" # When to run chemistry (manually, offline, online) # Physics flags photo_on = true # Enable photochemistry diff --git a/src/proteus/grid/run_grid_analysis.py b/src/proteus/grid/run_grid_analysis.py index f227f7be3..a442075d3 100644 --- a/src/proteus/grid/run_grid_analysis.py +++ b/src/proteus/grid/run_grid_analysis.py @@ -47,7 +47,7 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) # User choose the output to extract from 'runtime_helpfile.csv' of each case (always the [-1] element of the column). # For the units, check the file src/proteus/utils/coupler.py, lines 348-400 (keys) - output_to_extract = ['esc_rate_total','Phi_global','P_surf','T_surf','M_planet','atm_kg_per_mol', + output_to_extract = ['Time','esc_rate_total','Phi_global','P_surf','T_surf','M_planet','R_obs','p_xuv', 'R_xuv', 'atm_kg_per_mol', 'H_kg_atm','O_kg_atm','C_kg_atm','N_kg_atm','S_kg_atm', 'Si_kg_atm', 'Mg_kg_atm', 'Fe_kg_atm', 'Na_kg_atm', 'H2O_kg_atm','CO2_kg_atm', 'O2_kg_atm', 'H2_kg_atm', 'CH4_kg_atm', 'CO_kg_atm', 'N2_kg_atm', 'NH3_kg_atm', 'S2_kg_atm', 'SO2_kg_atm', 'H2S_kg_atm', 'SiO_kg_atm','SiO2_kg_atm', 'MgO_kg_atm', 'FeO2_kg_atm', 'runtime'] @@ -115,7 +115,9 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2)$ [IW]", "colormap": cm.coolwarm, "log_scale": False}, "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, - "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": False}, + "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": True}, + #"delivery.elements.SH_ratio": {"label": "S/H ratio", "colormap": cm.autumn, "log_scale": False}, + #"escape.reservoir": {"label": "Reservoir", "colormap": cm.viridis, "log_scale": False} } output_settings_single = { 'esc_rate_total': {"label": "Total escape rate [kg/s]", "log_scale": True, "scale": 1.0}, @@ -127,7 +129,7 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24}, 'H_kg_atm': {"label": r"[H$_{atm}$] [kg]", "log_scale": True, "scale": 1.0} } - ecdf_single_plots(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_single, output_settings=output_settings_single, plots_path=plots_path) + #ecdf_single_plots(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_single, output_settings=output_settings_single, plots_path=plots_path) # ECDF Grid Plot # The user needs to comment the parameters he didn't used in the grid/ add the ones non-listed here. Same for the outputs. @@ -138,15 +140,18 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2 / IW)$", "colormap": cm.coolwarm, "log_scale": False}, "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, - "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.winter, "log_scale": False}} + "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.winter, "log_scale": True}, + #"delivery.elements.SH_ratio": {"label": "S/H ratio", "colormap": cm.autumn, "log_scale": False}, + #"escape.reservoir": {"label": "Reservoir", "colormap": cm.viridis, "log_scale": False} + } output_settings_grid = { 'solidification_time': {"label": "Solidification [yr]", "log_scale": True, "scale": 1.0}, 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, - 'P_surf': {"label": "Surface pressure [bar]", "log_scale": True, "scale": 1.0}, + 'P_surf': {"label": r"P$_{surf}$ [bar]", "log_scale": True, "scale": 1.0}, 'esc_rate_total': {"label": "Escape rate [kg/s]", "log_scale": True, "scale": 1.0}, - 'atm_kg_per_mol': {"label": "MMW [g/mol]", "log_scale": False, "scale": 1000.0}, 'T_surf': {"label": r"T$_{surf}$ [K]", "log_scale": False, "scale": 1.0}, - 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24}, + 'atm_kg_per_mol': {"label": "MMW [g/mol]", "log_scale": False, "scale": 1000.0}, + # 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24}, 'H_kg_atm': {"label": r"[H$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, #'O_kg_atm': {"label": r"[O$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, #'C_kg_atm': {"label": r"[C$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, From cb1b177e8268a6e907b2fc8f69b62a559ce76629 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 26 Aug 2025 17:57:17 +0200 Subject: [PATCH 049/105] update input and grid submission files with latest main updates --- input/demos/escape_comparison.toml | 354 ++++++++++++++++++ input/demos/escape_grid_0_485Msun.toml | 354 ++++++++++++++++++ input/demos/escape_grid_1Msun.toml | 354 ++++++++++++++++++ input/ensembles/escape_comparison_on_off.toml | 29 ++ input/ensembles/escape_grid_0_485Msun.toml | 59 +++ input/ensembles/escape_grid_1Msun.toml | 59 +++ input/ensembles/grid_toi561b.toml | 46 +++ input/planets/toi561b.toml | 56 ++- src/proteus/grid/run_grid_analysis.py | 33 +- 9 files changed, 1327 insertions(+), 17 deletions(-) create mode 100644 input/demos/escape_comparison.toml create mode 100644 input/demos/escape_grid_0_485Msun.toml create mode 100644 input/demos/escape_grid_1Msun.toml create mode 100644 input/ensembles/escape_comparison_on_off.toml create mode 100644 input/ensembles/escape_grid_0_485Msun.toml create mode 100644 input/ensembles/escape_grid_1Msun.toml create mode 100644 input/ensembles/grid_toi561b.toml diff --git a/input/demos/escape_comparison.toml b/input/demos/escape_comparison.toml new file mode 100644 index 000000000..d2a60ac64 --- /dev/null +++ b/input/demos/escape_comparison.toml @@ -0,0 +1,354 @@ +# PROTEUS configuration file (version 2.0) + +# Root tables should be physical, with the exception of "params" +# Software related options should go within the appropriate physical table + +# The general structure is: +# [root] metadata +# [params] parameters for code execution, output files, time-stepping, convergence +# [star] stellar parameters, model selection +# [orbit] planetary orbital parameters +# [struct] planetary structure (mass, radius) +# [atmos] atmosphere parameters, model selection +# [escape] escape parameters, model selection +# [interior] magma ocean model selection and parameters +# [outgas] outgassing parameters (fO2) and included volatiles +# [delivery] initial volatile inventory, and delivery model selection +# [observe] synthetic observations + +# ---------------------------------------------------- +# Metadata +version = "2.0" +author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" + +# ---------------------------------------------------- +# Parameters +[params] + # output files + [params.out] + path = "scratch/escape_comparison_on_off" + logging = "INFO" + plot_mod = 5 # Plotting frequency, 0: wait until completion | n: every n iterations + plot_fmt = "pdf" # Plotting image file format, "png" or "pdf" recommended + write_mod = 1 # Write CSV frequency, 0: wait until completion | n: every n iterations + archive_mod = 0 # Archive frequency, 0: wait until completion | n: every n iterations | none: do not archive + remove_sf = true + + # time-stepping + [params.dt] + minimum = 3e2 # yr, minimum time-step + minimum_rel = 1e-5 # relative minimum time-step [dimensionless] + maximum = 3e7 # yr, maximum time-step + initial = 1e4 # yr, inital step size + starspec = 1e9 # yr, interval to re-calculate the stellar spectrum + starinst = 100 # yr, interval to re-calculate the instellation + method = "adaptive" # proportional | adaptive | maximum + + [params.dt.proportional] + propconst = 52.0 # Proportionality constant + + [params.dt.adaptive] + atol = 0.02 # Step size atol + rtol = 0.07 # Step size rtol + + # Termination criteria + # Set enabled=true/false in each section to enable/disable that termination criterion + [params.stop] + + # Require criteria to be satisfied twice before model will exit? + strict = false + + # required number of iterations + [params.stop.iters] + enabled = true + minimum = 5 + maximum = 9000 + + # required time constraints + [params.stop.time] + enabled = true + minimum = 1.0e3 # yr, model will certainly run to t > minimum + maximum = 4.567e+9 # yr, model will terminate when t > maximum + + # solidification + [params.stop.solid] + enabled = true + phi_crit = 0.005 # non-dim., model will terminate when global melt fraction < phi_crit + + # radiative equilibrium + [params.stop.radeqm] + enabled = true # Tim said false in the meeting, true is a try for this grid + atol = 0.2 # absolute tolerance [W m-2] + rtol = 1e-3 # relative tolerance + + # atmospheric escape + [params.stop.escape] + enabled = true + p_stop = 1.0 # bar, model will terminate with p_surf < p_stop + + +# ---------------------------------------------------- +# Star +[star] + + # Physical parameters + mass = 1.0 # M_sun + age_ini = 0.100 # Gyr, model initialisation/start age + + module = "mors" + [star.mors] + rot_pcntle = 50.0 # rotation percentile + # rot_period = 80.6 # rotation period [days] + tracks = "spada" # evolution tracks: spada | baraffe + age_now = 4.567 # Gyr, current age of star used for scaling + spec = "stellar_spectra/Named/sun.txt" # stellar spectrum + + [star.dummy] + radius = 1.0 # R_sun + calculate_radius = false # Calculate star radius using scaling from Teff? + Teff = 5772.0 # K + +# Orbital system +[orbit] + instellation_method = 'sma' # whether to define orbit using semi major axis ('sma') or instellation flux ('inst') + instellationflux = 1.0 # instellation flux received from the planet in [Earth units] + semimajoraxis = 1.0 # initial semi-major axis of planet's orbit [AU] + eccentricity = 0.0 # initial eccentricity of planet's orbit [dimensionless] + zenith_angle = 48.19 # characteristic zenith angle [degrees] + s0_factor = 0.375 # instellation scale factor [dimensionless] + + evolve = false # whether to evolve the SMaxis and eccentricity + module = "none" # module used to calculate tidal heating + + [orbit.dummy] + H_tide = 1e-11 # Fixed tidal power density [W kg-1] + Phi_tide = "<0.3" # Tidal heating applied when inequality locally satisfied + Imk2 = 0.0 # Fixed imaginary part of k2 love number, cannot be positive + + [orbit.lovepy] + visc_thresh = 1e9 # Minimum viscosity required for heating [Pa s] + +# Planetary structure - physics table +[struct] + mass_tot = 1.0 # Total planet mass [M_earth] + # radius_int = 1.0 # Radius at mantle-atmosphere boundary [R_earth] + corefrac = 0.55 # non-dim., radius fraction + core_density = 10738.33 # Core density [kg m-3] + core_heatcap = 880.0 # Core specific heat capacity [J K-1 kg-1] + + module = "self" # self | zalmoxis + + [struct.zalmoxis] + coremassfrac = 0.325 # core mass fraction [non-dim.] + inner_mantle_mass_fraction = 0 # inner mantle mass fraction [non-dim.] + weight_iron_frac = 0.325 # iron fraction in the planet [non-dim.] + num_levels = 100 # number of Zalmoxis radius layers + EOSchoice = "Tabulated:iron/silicate" # iron/silicate for super-Earths, water for water planets with Earth-like rocky cores + max_iterations_outer = 20 # max. iterations for the outer loop + tolerance_outer = 1e-3 # tolerance for the outer loop + max_iterations_inner = 100 # max. iterations for the inner loop + tolerance_inner = 1e-4 # tolerance for the inner loop + relative_tolerance = 1e-5 # relative tolerance for solve_ivp + absolute_tolerance = 1e-6 # absolute tolerance for solve_ivp + target_surface_pressure = 101325 # target surface pressure + pressure_tolerance = 1e11 # tolerance surface pressure + max_iterations_pressure = 200 # max. iterations for the innermost loop + pressure_adjustment_factor = 1.1 # factor for adjusting the pressure in the innermost loop + +# Atmosphere - physics table +[atmos_clim] + prevent_warming = false # do not allow the planet to heat up + surface_d = 0.01 # m, conductive skin thickness + surface_k = 2.0 # W m-1 K-1, conductive skin thermal conductivity + cloud_enabled = false # enable water cloud radiative effects + cloud_alpha = 0.0 # condensate retention fraction (1 -> fully retained) + surf_state = "skin" # surface scheme: "mixed_layer" | "fixed" | "skin" + surf_greyalbedo = 0.1 # surface grey albedo + albedo_pl = 0.0 # Bond albedo (scattering) + rayleigh = true # enable rayleigh scattering + tmp_minimum = 0.5 # temperature floor on solver + tmp_maximum = 5000.0 # temperature ceiling on solver + + module = "agni" # Which atmosphere module to use + + [atmos_clim.agni] + p_top = 1.0e-5 # bar, top of atmosphere grid pressure + spectral_group = "Honeyside" # which gas opacities to include + spectral_bands = "256" # how many spectral bands? + num_levels = 50 # Number of atmospheric grid levels + chemistry = "none" # "none" | "eq" + surf_material = "greybody" # surface material file for scattering + solve_energy = true # solve for energy-conserving atmosphere profile + solution_atol = 1e-3 # solver absolute tolerance + solution_rtol = 2e-2 # solver relative tolerance + overlap_method = "ee" # gas overlap method + condensation = true # volatile condensation + real_gas = true # use real-gas equations of state + + [atmos_clim.janus] + p_top = 1.0e-5 # bar, top of atmosphere grid pressure + p_obs = 1.0e-3 # bar, observed pressure level + spectral_group = "Honeyside" # which gas opacities to include + spectral_bands = "256" # how many spectral bands? + F_atm_bc = 0 # measure outgoing flux at: (0) TOA | (1) Surface + num_levels = 50 # Number of atmospheric grid levels + tropopause = "none" # none | skin | dynamic + overlap_method = "ee" # gas overlap method + + [atmos_clim.dummy] + gamma = 0.7 # atmosphere opacity between 0 and 1 + +# Volatile escape - physics table +[escape] + + module = "zephyrus" # Which escape module to use + reservoir = "outgas" # Escaping reservoir: "bulk", "outgas", "pxuv". + + + [escape.zephyrus] + Pxuv = 1e-2 # Pressure at which XUV radiation become opaque in the planetary atmosphere [bar] + efficiency = 1.0 # Escape efficiency factor + tidal = false # Tidal contribution enabled + + [escape.dummy] + rate = 2e-3 # Bulk unfractionated escape rate [kg s-1] + +# Interior - physics table +[interior] + grain_size = 0.1 # crystal settling grain size [m] + F_initial = 8.0E4 # Initial heat flux guess [W m-2] + radiogenic_heat = false # enable radiogenic heat production + tidal_heat = false # enable tidal heat production + rheo_phi_loc = 0.4 # Centre of rheological transition + rheo_phi_wid = 0.15 # Width of rheological transition + bulk_modulus = 260e9 # Bulk modulus [Pa] + + module = "spider" # Which interior module to use + + [interior.spider] + num_levels = 200 # Number of SPIDER grid levels + mixing_length = 2 # Mixing length parameterization + tolerance = 1.0e-10 # solver tolerance + tolerance_rel = 1.0e-8 # relative solver tolerance + solver_type = "bdf" # SUNDIALS solver method + tsurf_atol = 20.0 # tsurf_poststep_change + tsurf_rtol = 0.01 # tsurf_poststep_change_frac + ini_entropy = 3300.0 # Surface entropy conditions [J K-1 kg-1] + ini_dsdr = -4.698e-6 # Interior entropy gradient [J K-1 kg-1 m-1] + + [interior.aragog] + logging = "ERROR" + num_levels = 200 # Number of Aragog grid levels + tolerance = 1.0e-10 # solver tolerance + ini_tmagma = 3500.0 # Initial magma surface temperature [K] + inner_boundary_condition = 1 # 1 = core cooling model, 2 = prescribed heat flux, 3 = prescribed temperature + inner_boundary_value = 4000 # core temperature [K], if inner_boundary_condition = 3. CMB heat flux [W/m^2], if if inner_boundary_condition = 2 + conduction = true # enable conductive heat transfer + convection = true # enable convective heat transfer + gravitational_separation = false # enable gravitational separation + mixing = false # enable mixing + dilatation = false # enable dilatation source term + mass_coordinates = false # enable mass coordinates + tsurf_poststep_change = 30 # threshold of maximum change on surface temperature + event_triggering = true # enable events triggering to avoid abrupt jumps in surface temperature + + [interior.dummy] + ini_tmagma = 3500.0 # Initial magma surface temperature [K] + +# Outgassing - physics table +[outgas] + fO2_shift_IW = 0 # log10(ΔIW), atmosphere/interior boundary oxidation state + + module = "calliope" # Which outgassing module to use + + [outgas.calliope] + include_H2O = true # Include H2O compound + include_CO2 = true # Include CO2 compound + include_N2 = true # Include N2 compound + include_S2 = true # Include S2 compound + include_SO2 = true # Include SO2 compound + include_H2S = true # Include H2S compound + include_NH3 = true # Include NH3 compound + include_H2 = true # Include H2 compound + include_CH4 = true # Include CH4 compound + include_CO = true # Include CO compound + T_floor = 2300.0 # Temperature floor applied to outgassing calculation [K]. + + [outgas.atmodeller] + some_parameter = "some_value" + +# Volatile delivery - physics table +[delivery] + + # Radionuclide parameters + radio_tref = 4.55 # Reference age for concentrations [Gyr] + radio_K = 310.0 # ppmw of potassium (all isotopes) + radio_U = 0.031 # ppmw of uranium (all isotopes) + radio_Th = 0.124 # ppmw of thorium (all isotopes) + + # Which initial inventory to use? + initial = 'elements' # "elements" | "volatiles" + + # No module for accretion as of yet + module = "none" + + # Set initial volatile inventory by planetary element abundances + [delivery.elements] + use_metallicity = false # whether or not to specify the elemental abundances in terms of solar metallicity + metallicity = 1000 # metallicity relative to solar metallicity + + H_oceans = 1.0 # Hydrogen inventory in units of equivalent Earth oceans + # H_ppmw = 0.0 # Hydrogen inventory in ppmw relative to mantle mass + + CH_ratio = 1.0 # C/H mass ratio in mantle/atmosphere system + # C_ppmw = 0.0 # Carbon inventory in ppmw relative to mantle mass + + # NH_ratio = 0.0 # N/H mass ratio in mantle/atmosphere system + N_ppmw = 2.0 # Nitrogen inventory in ppmw relative to mantle mass + + # SH_ratio = 0.0 # S/H mass ratio in mantle/atmosphere system + S_ppmw = 200.0 # Sulfur inventory in ppmw relative to mantle mass + + # Set initial volatile inventory by partial pressures in atmosphere + [delivery.volatiles] + H2O = 30.0 # partial pressure of H2O + CO2 = 0.0 # partial pressure of CO2 + N2 = 0.0 # etc + S2 = 0.0 + SO2 = 0.0 + H2S = 0.0 + NH3 = 0.0 + H2 = 0.0 + CH4 = 0.0 + CO = 0.0 + +# Atmospheric chemistry postprocessing +[atmos_chem] + + module = "vulcan" # Atmospheric chemistry module + when = "offline" # When to run chemistry (manually, offline, online) + + # Physics flags + photo_on = true # Enable photochemistry + Kzz_on = true # Enable eddy diffusion + Kzz_const = "none" # Constant eddy diffusion coefficient (none => use profile) + moldiff_on = true # Enable molecular diffusion in the atmosphere + updraft_const = 0.0 # Set constant updraft velocity + + # Vulcan-specific atmospheric chemistry parameters + [atmos_chem.vulcan] + clip_fl = 1e-20 # Floor on stellar spectrum [erg s-1 cm-2 nm-1] + clip_vmr = 1e-10 # Neglect species with vmr < clip_vmr + make_funs = true # Generate reaction network functions + ini_mix = "profile" # Initial mixing ratios (profile, outgas) + fix_surf = false # Fixed surface mixing ratios + network = "SNCHO" # Class of chemical network to use (CHO, NCHO, SNCHO) + save_frames = true # Plot frames during iterations + yconv_cri = 0.05 # Convergence criterion, value of mixing ratios + slope_cri = 0.0001 # Convergence criterion, rate of change of mixing ratios + +# Calculate simulated observations +[observe] + + # Module with which to calculate the synthetic observables + synthesis = "none" diff --git a/input/demos/escape_grid_0_485Msun.toml b/input/demos/escape_grid_0_485Msun.toml new file mode 100644 index 000000000..220a86507 --- /dev/null +++ b/input/demos/escape_grid_0_485Msun.toml @@ -0,0 +1,354 @@ +# PROTEUS configuration file (version 2.0) + +# Root tables should be physical, with the exception of "params" +# Software related options should go within the appropriate physical table + +# The general structure is: +# [root] metadata +# [params] parameters for code execution, output files, time-stepping, convergence +# [star] stellar parameters, model selection +# [orbit] planetary orbital parameters +# [struct] planetary structure (mass, radius) +# [atmos] atmosphere parameters, model selection +# [escape] escape parameters, model selection +# [interior] magma ocean model selection and parameters +# [outgas] outgassing parameters (fO2) and included volatiles +# [delivery] initial volatile inventory, and delivery model selection +# [observe] synthetic observations + +# ---------------------------------------------------- +# Metadata +version = "2.0" +author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" + +# ---------------------------------------------------- +# Parameters +[params] + # output files + [params.out] + path = "scratch/escape_grid_0_485Msun" + logging = "INFO" + plot_mod = 5 # Plotting frequency, 0: wait until completion | n: every n iterations + plot_fmt = "pdf" # Plotting image file format, "png" or "pdf" recommended + write_mod = 1 # Write CSV frequency, 0: wait until completion | n: every n iterations + archive_mod = 0 # Archive frequency, 0: wait until completion | n: every n iterations | none: do not archive + remove_sf = true + + # time-stepping + [params.dt] + minimum = 3e2 # yr, minimum time-step + minimum_rel = 1e-5 # relative minimum time-step [dimensionless] + maximum = 3e7 # yr, maximum time-step + initial = 1e4 # yr, inital step size + starspec = 1e9 # yr, interval to re-calculate the stellar spectrum + starinst = 100 # yr, interval to re-calculate the instellation + method = "adaptive" # proportional | adaptive | maximum + + [params.dt.proportional] + propconst = 52.0 # Proportionality constant + + [params.dt.adaptive] + atol = 0.02 # Step size atol + rtol = 0.07 # Step size rtol + + # Termination criteria + # Set enabled=true/false in each section to enable/disable that termination criterion + [params.stop] + + # Require criteria to be satisfied twice before model will exit? + strict = false + + # required number of iterations + [params.stop.iters] + enabled = true + minimum = 5 + maximum = 9000 + + # required time constraints + [params.stop.time] + enabled = true + minimum = 1.0e3 # yr, model will certainly run to t > minimum + maximum = 8.8e+9 # yr, model will terminate when t > maximum + + # solidification + [params.stop.solid] + enabled = true + phi_crit = 0.005 # non-dim., model will terminate when global melt fraction < phi_crit + + # radiative equilibrium + [params.stop.radeqm] + enabled = true # Tim said false in the meeting, true is a try for this grid + atol = 0.2 # absolute tolerance [W m-2] + rtol = 1e-3 # relative tolerance + + # atmospheric escape + [params.stop.escape] + enabled = true + p_stop = 1.0 # bar, model will terminate with p_surf < p_stop + + +# ---------------------------------------------------- +# Star +[star] + + # Physical parameters + mass = 0.485 # M_sun + age_ini = 0.100 # Gyr, model initialisation/start age + + module = "mors" + [star.mors] + rot_pcntle = 10.0 # rotation percentile + # rot_period = 40 # rotation period [days] + tracks = "spada" # evolution tracks: spada | baraffe + age_now = 8.8 # Gyr, current age of star used for scaling + spec = "stellar_spectra/Named/gj176.txt" # stellar spectrum + + [star.dummy] + radius = 0.474 # R_sun + calculate_radius = false # Calculate star radius using scaling from Teff? + Teff = 3632.0 # K + +# Orbital system +[orbit] + instellation_method = 'sma' # whether to define orbit using semi major axis ('sma') or instellation flux ('inst') + instellationflux = 1.0 # instellation flux received from the planet in [Earth units] + semimajoraxis = 0.0188 # initial semi-major axis of planet's orbit [AU] + eccentricity = 0.0 # initial eccentricity of planet's orbit [dimensionless] + zenith_angle = 48.19 # characteristic zenith angle [degrees] + s0_factor = 0.375 # instellation scale factor [dimensionless] + + evolve = false # whether to evolve the SMaxis and eccentricity + module = "none" # module used to calculate tidal heating + + [orbit.dummy] + H_tide = 1e-11 # Fixed tidal power density [W kg-1] + Phi_tide = "<0.3" # Tidal heating applied when inequality locally satisfied + Imk2 = 0.0 # Fixed imaginary part of k2 love number, cannot be positive + + [orbit.lovepy] + visc_thresh = 1e9 # Minimum viscosity required for heating [Pa s] + +# Planetary structure - physics table +[struct] + mass_tot = 1.0 # Total planet mass [M_earth] + # radius_int = 1.0 # Radius at mantle-atmosphere boundary [R_earth] + corefrac = 0.55 # non-dim., radius fraction + core_density = 10738.33 # Core density [kg m-3] + core_heatcap = 880.0 # Core specific heat capacity [J K-1 kg-1] + + module = "self" # self | zalmoxis + + [struct.zalmoxis] + coremassfrac = 0.325 # core mass fraction [non-dim.] + inner_mantle_mass_fraction = 0 # inner mantle mass fraction [non-dim.] + weight_iron_frac = 0.325 # iron fraction in the planet [non-dim.] + num_levels = 100 # number of Zalmoxis radius layers + EOSchoice = "Tabulated:iron/silicate" # iron/silicate for super-Earths, water for water planets with Earth-like rocky cores + max_iterations_outer = 20 # max. iterations for the outer loop + tolerance_outer = 1e-3 # tolerance for the outer loop + max_iterations_inner = 100 # max. iterations for the inner loop + tolerance_inner = 1e-4 # tolerance for the inner loop + relative_tolerance = 1e-5 # relative tolerance for solve_ivp + absolute_tolerance = 1e-6 # absolute tolerance for solve_ivp + target_surface_pressure = 101325 # target surface pressure + pressure_tolerance = 1e11 # tolerance surface pressure + max_iterations_pressure = 200 # max. iterations for the innermost loop + pressure_adjustment_factor = 1.1 # factor for adjusting the pressure in the innermost loop + +# Atmosphere - physics table +[atmos_clim] + prevent_warming = false # do not allow the planet to heat up + surface_d = 0.01 # m, conductive skin thickness + surface_k = 2.0 # W m-1 K-1, conductive skin thermal conductivity + cloud_enabled = false # enable water cloud radiative effects + cloud_alpha = 0.0 # condensate retention fraction (1 -> fully retained) + surf_state = "skin" # surface scheme: "mixed_layer" | "fixed" | "skin" + surf_greyalbedo = 0.1 # surface grey albedo + albedo_pl = 0.0 # Bond albedo (scattering) + rayleigh = true # enable rayleigh scattering + tmp_minimum = 0.5 # temperature floor on solver + tmp_maximum = 5000.0 # temperature ceiling on solver + + module = "agni" # Which atmosphere module to use + + [atmos_clim.agni] + p_top = 1.0e-5 # bar, top of atmosphere grid pressure + spectral_group = "Honeyside" # which gas opacities to include + spectral_bands = "256" # how many spectral bands? + num_levels = 50 # Number of atmospheric grid levels + chemistry = "none" # "none" | "eq" + surf_material = "greybody" # surface material file for scattering + solve_energy = true # solve for energy-conserving atmosphere profile + solution_atol = 1e-3 # solver absolute tolerance + solution_rtol = 2e-2 # solver relative tolerance + overlap_method = "ee" # gas overlap method + condensation = true # volatile condensation + real_gas = true # use real-gas equations of state + + [atmos_clim.janus] + p_top = 1.0e-5 # bar, top of atmosphere grid pressure + p_obs = 1.0e-3 # bar, observed pressure level + spectral_group = "Honeyside" # which gas opacities to include + spectral_bands = "256" # how many spectral bands? + F_atm_bc = 0 # measure outgoing flux at: (0) TOA | (1) Surface + num_levels = 50 # Number of atmospheric grid levels + tropopause = "none" # none | skin | dynamic + overlap_method = "ee" # gas overlap method + + [atmos_clim.dummy] + gamma = 0.7 # atmosphere opacity between 0 and 1 + +# Volatile escape - physics table +[escape] + + module = "zephyrus" # Which escape module to use + reservoir = "outgas" # Escaping reservoir: "bulk", "outgas", "pxuv". + + + [escape.zephyrus] + Pxuv = 1e-3 # Pressure at which XUV radiation become opaque in the planetary atmosphere [bar] + efficiency = 1.0 # Escape efficiency factor + tidal = false # Tidal contribution enabled + + [escape.dummy] + rate = 2e-3 # Bulk unfractionated escape rate [kg s-1] + +# Interior - physics table +[interior] + grain_size = 0.1 # crystal settling grain size [m] + F_initial = 8.0E4 # Initial heat flux guess [W m-2] + radiogenic_heat = false # enable radiogenic heat production + tidal_heat = false # enable tidal heat production + rheo_phi_loc = 0.4 # Centre of rheological transition + rheo_phi_wid = 0.15 # Width of rheological transition + bulk_modulus = 260e9 # Bulk modulus [Pa] + + module = "spider" # Which interior module to use + + [interior.spider] + num_levels = 200 # Number of SPIDER grid levels + mixing_length = 2 # Mixing length parameterization + tolerance = 1.0e-10 # solver tolerance + tolerance_rel = 1.0e-8 # relative solver tolerance + solver_type = "bdf" # SUNDIALS solver method + tsurf_atol = 20.0 # tsurf_poststep_change + tsurf_rtol = 0.01 # tsurf_poststep_change_frac + ini_entropy = 3300.0 # Surface entropy conditions [J K-1 kg-1] + ini_dsdr = -4.698e-6 # Interior entropy gradient [J K-1 kg-1 m-1] + + [interior.aragog] + logging = "ERROR" + num_levels = 200 # Number of Aragog grid levels + tolerance = 1.0e-10 # solver tolerance + ini_tmagma = 3500.0 # Initial magma surface temperature [K] + inner_boundary_condition = 1 # 1 = core cooling model, 2 = prescribed heat flux, 3 = prescribed temperature + inner_boundary_value = 4000 # core temperature [K], if inner_boundary_condition = 3. CMB heat flux [W/m^2], if if inner_boundary_condition = 2 + conduction = true # enable conductive heat transfer + convection = true # enable convective heat transfer + gravitational_separation = false # enable gravitational separation + mixing = false # enable mixing + dilatation = false # enable dilatation source term + mass_coordinates = false # enable mass coordinates + tsurf_poststep_change = 30 # threshold of maximum change on surface temperature + event_triggering = true # enable events triggering to avoid abrupt jumps in surface temperature + + [interior.dummy] + ini_tmagma = 3500.0 # Initial magma surface temperature [K] + +# Outgassing - physics table +[outgas] + fO2_shift_IW = 0 # log10(ΔIW), atmosphere/interior boundary oxidation state + + module = "calliope" # Which outgassing module to use + + [outgas.calliope] + include_H2O = true # Include H2O compound + include_CO2 = true # Include CO2 compound + include_N2 = true # Include N2 compound + include_S2 = true # Include S2 compound + include_SO2 = true # Include SO2 compound + include_H2S = true # Include H2S compound + include_NH3 = true # Include NH3 compound + include_H2 = true # Include H2 compound + include_CH4 = true # Include CH4 compound + include_CO = true # Include CO compound + T_floor = 2300.0 # Temperature floor applied to outgassing calculation [K]. + + [outgas.atmodeller] + some_parameter = "some_value" + +# Volatile delivery - physics table +[delivery] + + # Radionuclide parameters + radio_tref = 4.55 # Reference age for concentrations [Gyr] + radio_K = 310.0 # ppmw of potassium (all isotopes) + radio_U = 0.031 # ppmw of uranium (all isotopes) + radio_Th = 0.124 # ppmw of thorium (all isotopes) + + # Which initial inventory to use? + initial = 'elements' # "elements" | "volatiles" + + # No module for accretion as of yet + module = "none" + + # Set initial volatile inventory by planetary element abundances + [delivery.elements] + use_metallicity = false # whether or not to specify the elemental abundances in terms of solar metallicity + metallicity = 1000 # metallicity relative to solar metallicity + + H_oceans = 1.0 # Hydrogen inventory in units of equivalent Earth oceans + # H_ppmw = 0.0 # Hydrogen inventory in ppmw relative to mantle mass + + CH_ratio = 1.0 # C/H mass ratio in mantle/atmosphere system + # C_ppmw = 0.0 # Carbon inventory in ppmw relative to mantle mass + + # NH_ratio = 0.0 # N/H mass ratio in mantle/atmosphere system + N_ppmw = 2.0 # Nitrogen inventory in ppmw relative to mantle mass + + # SH_ratio = 0.0 # S/H mass ratio in mantle/atmosphere system + S_ppmw = 200.0 # Sulfur inventory in ppmw relative to mantle mass + + # Set initial volatile inventory by partial pressures in atmosphere + [delivery.volatiles] + H2O = 30.0 # partial pressure of H2O + CO2 = 0.0 # partial pressure of CO2 + N2 = 0.0 # etc + S2 = 0.0 + SO2 = 0.0 + H2S = 0.0 + NH3 = 0.0 + H2 = 0.0 + CH4 = 0.0 + CO = 0.0 + +# Atmospheric chemistry postprocessing +[atmos_chem] + + module = "vulcan" # Atmospheric chemistry module + when = "offline" # When to run chemistry (manually, offline, online) + + # Physics flags + photo_on = true # Enable photochemistry + Kzz_on = true # Enable eddy diffusion + Kzz_const = "none" # Constant eddy diffusion coefficient (none => use profile) + moldiff_on = true # Enable molecular diffusion in the atmosphere + updraft_const = 0.0 # Set constant updraft velocity + + # Vulcan-specific atmospheric chemistry parameters + [atmos_chem.vulcan] + clip_fl = 1e-20 # Floor on stellar spectrum [erg s-1 cm-2 nm-1] + clip_vmr = 1e-10 # Neglect species with vmr < clip_vmr + make_funs = true # Generate reaction network functions + ini_mix = "profile" # Initial mixing ratios (profile, outgas) + fix_surf = false # Fixed surface mixing ratios + network = "SNCHO" # Class of chemical network to use (CHO, NCHO, SNCHO) + save_frames = true # Plot frames during iterations + yconv_cri = 0.05 # Convergence criterion, value of mixing ratios + slope_cri = 0.0001 # Convergence criterion, rate of change of mixing ratios + +# Calculate simulated observations +[observe] + + # Module with which to calculate the synthetic observables + synthesis = "none" diff --git a/input/demos/escape_grid_1Msun.toml b/input/demos/escape_grid_1Msun.toml new file mode 100644 index 000000000..47c007c2e --- /dev/null +++ b/input/demos/escape_grid_1Msun.toml @@ -0,0 +1,354 @@ +# PROTEUS configuration file (version 2.0) + +# Root tables should be physical, with the exception of "params" +# Software related options should go within the appropriate physical table + +# The general structure is: +# [root] metadata +# [params] parameters for code execution, output files, time-stepping, convergence +# [star] stellar parameters, model selection +# [orbit] planetary orbital parameters +# [struct] planetary structure (mass, radius) +# [atmos] atmosphere parameters, model selection +# [escape] escape parameters, model selection +# [interior] magma ocean model selection and parameters +# [outgas] outgassing parameters (fO2) and included volatiles +# [delivery] initial volatile inventory, and delivery model selection +# [observe] synthetic observations + +# ---------------------------------------------------- +# Metadata +version = "2.0" +author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" + +# ---------------------------------------------------- +# Parameters +[params] + # output files + [params.out] + path = "scratch/escape_grid_1Msun" + logging = "INFO" + plot_mod = 5 # Plotting frequency, 0: wait until completion | n: every n iterations + plot_fmt = "pdf" # Plotting image file format, "png" or "pdf" recommended + write_mod = 1 # Write CSV frequency, 0: wait until completion | n: every n iterations + archive_mod = 0 # Archive frequency, 0: wait until completion | n: every n iterations | none: do not archive + remove_sf = true + + # time-stepping + [params.dt] + minimum = 3e2 # yr, minimum time-step + minimum_rel = 1e-5 # relative minimum time-step [dimensionless] + maximum = 3e7 # yr, maximum time-step + initial = 1e4 # yr, inital step size + starspec = 1e9 # yr, interval to re-calculate the stellar spectrum + starinst = 100 # yr, interval to re-calculate the instellation + method = "adaptive" # proportional | adaptive | maximum + + [params.dt.proportional] + propconst = 52.0 # Proportionality constant + + [params.dt.adaptive] + atol = 0.02 # Step size atol + rtol = 0.07 # Step size rtol + + # Termination criteria + # Set enabled=true/false in each section to enable/disable that termination criterion + [params.stop] + + # Require criteria to be satisfied twice before model will exit? + strict = false + + # required number of iterations + [params.stop.iters] + enabled = true + minimum = 5 + maximum = 9000 + + # required time constraints + [params.stop.time] + enabled = true + minimum = 1.0e3 # yr, model will certainly run to t > minimum + maximum = 4.567e+9 # yr, model will terminate when t > maximum + + # solidification + [params.stop.solid] + enabled = true + phi_crit = 0.005 # non-dim., model will terminate when global melt fraction < phi_crit + + # radiative equilibrium + [params.stop.radeqm] + enabled = true # Tim said false in the meeting, true is a try for this grid + atol = 0.2 # absolute tolerance [W m-2] + rtol = 1e-3 # relative tolerance + + # atmospheric escape + [params.stop.escape] + enabled = true + p_stop = 1.0 # bar, model will terminate with p_surf < p_stop + + +# ---------------------------------------------------- +# Star +[star] + + # Physical parameters + mass = 1.0 # M_sun + age_ini = 0.100 # Gyr, model initialisation/start age + + module = "mors" + [star.mors] + rot_pcntle = 50.0 # rotation percentile + # rot_period = 80.6 # rotation period [days] + tracks = "spada" # evolution tracks: spada | baraffe + age_now = 4.567 # Gyr, current age of star used for scaling + spec = "stellar_spectra/Named/sun.txt" # stellar spectrum + + [star.dummy] + radius = 1.0 # R_sun + calculate_radius = false # Calculate star radius using scaling from Teff? + Teff = 5772.0 # K + +# Orbital system +[orbit] + instellation_method = 'sma' # whether to define orbit using semi major axis ('sma') or instellation flux ('inst') + instellationflux = 1.0 # instellation flux received from the planet in [Earth units] + semimajoraxis = 1.0 # initial semi-major axis of planet's orbit [AU] + eccentricity = 0.0 # initial eccentricity of planet's orbit [dimensionless] + zenith_angle = 48.19 # characteristic zenith angle [degrees] + s0_factor = 0.375 # instellation scale factor [dimensionless] + + evolve = false # whether to evolve the SMaxis and eccentricity + module = "none" # module used to calculate tidal heating + + [orbit.dummy] + H_tide = 1e-11 # Fixed tidal power density [W kg-1] + Phi_tide = "<0.3" # Tidal heating applied when inequality locally satisfied + Imk2 = 0.0 # Fixed imaginary part of k2 love number, cannot be positive + + [orbit.lovepy] + visc_thresh = 1e9 # Minimum viscosity required for heating [Pa s] + +# Planetary structure - physics table +[struct] + mass_tot = 1.0 # Total planet mass [M_earth] + # radius_int = 1.0 # Radius at mantle-atmosphere boundary [R_earth] + corefrac = 0.55 # non-dim., radius fraction + core_density = 10738.33 # Core density [kg m-3] + core_heatcap = 880.0 # Core specific heat capacity [J K-1 kg-1] + + module = "self" # self | zalmoxis + + [struct.zalmoxis] + coremassfrac = 0.325 # core mass fraction [non-dim.] + inner_mantle_mass_fraction = 0 # inner mantle mass fraction [non-dim.] + weight_iron_frac = 0.325 # iron fraction in the planet [non-dim.] + num_levels = 100 # number of Zalmoxis radius layers + EOSchoice = "Tabulated:iron/silicate" # iron/silicate for super-Earths, water for water planets with Earth-like rocky cores + max_iterations_outer = 20 # max. iterations for the outer loop + tolerance_outer = 1e-3 # tolerance for the outer loop + max_iterations_inner = 100 # max. iterations for the inner loop + tolerance_inner = 1e-4 # tolerance for the inner loop + relative_tolerance = 1e-5 # relative tolerance for solve_ivp + absolute_tolerance = 1e-6 # absolute tolerance for solve_ivp + target_surface_pressure = 101325 # target surface pressure + pressure_tolerance = 1e11 # tolerance surface pressure + max_iterations_pressure = 200 # max. iterations for the innermost loop + pressure_adjustment_factor = 1.1 # factor for adjusting the pressure in the innermost loop + +# Atmosphere - physics table +[atmos_clim] + prevent_warming = false # do not allow the planet to heat up + surface_d = 0.01 # m, conductive skin thickness + surface_k = 2.0 # W m-1 K-1, conductive skin thermal conductivity + cloud_enabled = false # enable water cloud radiative effects + cloud_alpha = 0.0 # condensate retention fraction (1 -> fully retained) + surf_state = "skin" # surface scheme: "mixed_layer" | "fixed" | "skin" + surf_greyalbedo = 0.1 # surface grey albedo + albedo_pl = 0.0 # Bond albedo (scattering) + rayleigh = true # enable rayleigh scattering + tmp_minimum = 0.5 # temperature floor on solver + tmp_maximum = 5000.0 # temperature ceiling on solver + + module = "agni" # Which atmosphere module to use + + [atmos_clim.agni] + p_top = 1.0e-5 # bar, top of atmosphere grid pressure + spectral_group = "Honeyside" # which gas opacities to include + spectral_bands = "256" # how many spectral bands? + num_levels = 50 # Number of atmospheric grid levels + chemistry = "none" # "none" | "eq" + surf_material = "greybody" # surface material file for scattering + solve_energy = true # solve for energy-conserving atmosphere profile + solution_atol = 1e-3 # solver absolute tolerance + solution_rtol = 2e-2 # solver relative tolerance + overlap_method = "ee" # gas overlap method + condensation = true # volatile condensation + real_gas = true # use real-gas equations of state + + [atmos_clim.janus] + p_top = 1.0e-5 # bar, top of atmosphere grid pressure + p_obs = 1.0e-3 # bar, observed pressure level + spectral_group = "Honeyside" # which gas opacities to include + spectral_bands = "256" # how many spectral bands? + F_atm_bc = 0 # measure outgoing flux at: (0) TOA | (1) Surface + num_levels = 50 # Number of atmospheric grid levels + tropopause = "none" # none | skin | dynamic + overlap_method = "ee" # gas overlap method + + [atmos_clim.dummy] + gamma = 0.7 # atmosphere opacity between 0 and 1 + +# Volatile escape - physics table +[escape] + + module = "zephyrus" # Which escape module to use + reservoir = "outgas" # Escaping reservoir: "bulk", "outgas", "pxuv". + + + [escape.zephyrus] + Pxuv = 1e-3 # Pressure at which XUV radiation become opaque in the planetary atmosphere [bar] + efficiency = 1.0 # Escape efficiency factor + tidal = false # Tidal contribution enabled + + [escape.dummy] + rate = 2e-3 # Bulk unfractionated escape rate [kg s-1] + +# Interior - physics table +[interior] + grain_size = 0.1 # crystal settling grain size [m] + F_initial = 8.0E4 # Initial heat flux guess [W m-2] + radiogenic_heat = false # enable radiogenic heat production + tidal_heat = false # enable tidal heat production + rheo_phi_loc = 0.4 # Centre of rheological transition + rheo_phi_wid = 0.15 # Width of rheological transition + bulk_modulus = 260e9 # Bulk modulus [Pa] + + module = "spider" # Which interior module to use + + [interior.spider] + num_levels = 200 # Number of SPIDER grid levels + mixing_length = 2 # Mixing length parameterization + tolerance = 1.0e-10 # solver tolerance + tolerance_rel = 1.0e-8 # relative solver tolerance + solver_type = "bdf" # SUNDIALS solver method + tsurf_atol = 20.0 # tsurf_poststep_change + tsurf_rtol = 0.01 # tsurf_poststep_change_frac + ini_entropy = 3300.0 # Surface entropy conditions [J K-1 kg-1] + ini_dsdr = -4.698e-6 # Interior entropy gradient [J K-1 kg-1 m-1] + + [interior.aragog] + logging = "ERROR" + num_levels = 200 # Number of Aragog grid levels + tolerance = 1.0e-10 # solver tolerance + ini_tmagma = 3500.0 # Initial magma surface temperature [K] + inner_boundary_condition = 1 # 1 = core cooling model, 2 = prescribed heat flux, 3 = prescribed temperature + inner_boundary_value = 4000 # core temperature [K], if inner_boundary_condition = 3. CMB heat flux [W/m^2], if if inner_boundary_condition = 2 + conduction = true # enable conductive heat transfer + convection = true # enable convective heat transfer + gravitational_separation = false # enable gravitational separation + mixing = false # enable mixing + dilatation = false # enable dilatation source term + mass_coordinates = false # enable mass coordinates + tsurf_poststep_change = 30 # threshold of maximum change on surface temperature + event_triggering = true # enable events triggering to avoid abrupt jumps in surface temperature + + [interior.dummy] + ini_tmagma = 3500.0 # Initial magma surface temperature [K] + +# Outgassing - physics table +[outgas] + fO2_shift_IW = 0 # log10(ΔIW), atmosphere/interior boundary oxidation state + + module = "calliope" # Which outgassing module to use + + [outgas.calliope] + include_H2O = true # Include H2O compound + include_CO2 = true # Include CO2 compound + include_N2 = true # Include N2 compound + include_S2 = true # Include S2 compound + include_SO2 = true # Include SO2 compound + include_H2S = true # Include H2S compound + include_NH3 = true # Include NH3 compound + include_H2 = true # Include H2 compound + include_CH4 = true # Include CH4 compound + include_CO = true # Include CO compound + T_floor = 2300.0 # Temperature floor applied to outgassing calculation [K]. + + [outgas.atmodeller] + some_parameter = "some_value" + +# Volatile delivery - physics table +[delivery] + + # Radionuclide parameters + radio_tref = 4.55 # Reference age for concentrations [Gyr] + radio_K = 310.0 # ppmw of potassium (all isotopes) + radio_U = 0.031 # ppmw of uranium (all isotopes) + radio_Th = 0.124 # ppmw of thorium (all isotopes) + + # Which initial inventory to use? + initial = 'elements' # "elements" | "volatiles" + + # No module for accretion as of yet + module = "none" + + # Set initial volatile inventory by planetary element abundances + [delivery.elements] + use_metallicity = false # whether or not to specify the elemental abundances in terms of solar metallicity + metallicity = 1000 # metallicity relative to solar metallicity + + H_oceans = 1.0 # Hydrogen inventory in units of equivalent Earth oceans + # H_ppmw = 0.0 # Hydrogen inventory in ppmw relative to mantle mass + + CH_ratio = 1.0 # C/H mass ratio in mantle/atmosphere system + # C_ppmw = 0.0 # Carbon inventory in ppmw relative to mantle mass + + # NH_ratio = 0.0 # N/H mass ratio in mantle/atmosphere system + N_ppmw = 2.0 # Nitrogen inventory in ppmw relative to mantle mass + + # SH_ratio = 0.0 # S/H mass ratio in mantle/atmosphere system + S_ppmw = 200.0 # Sulfur inventory in ppmw relative to mantle mass + + # Set initial volatile inventory by partial pressures in atmosphere + [delivery.volatiles] + H2O = 30.0 # partial pressure of H2O + CO2 = 0.0 # partial pressure of CO2 + N2 = 0.0 # etc + S2 = 0.0 + SO2 = 0.0 + H2S = 0.0 + NH3 = 0.0 + H2 = 0.0 + CH4 = 0.0 + CO = 0.0 + +# Atmospheric chemistry postprocessing +[atmos_chem] + + module = "vulcan" # Atmospheric chemistry module + when = "offline" # When to run chemistry (manually, offline, online) + + # Physics flags + photo_on = true # Enable photochemistry + Kzz_on = true # Enable eddy diffusion + Kzz_const = "none" # Constant eddy diffusion coefficient (none => use profile) + moldiff_on = true # Enable molecular diffusion in the atmosphere + updraft_const = 0.0 # Set constant updraft velocity + + # Vulcan-specific atmospheric chemistry parameters + [atmos_chem.vulcan] + clip_fl = 1e-20 # Floor on stellar spectrum [erg s-1 cm-2 nm-1] + clip_vmr = 1e-10 # Neglect species with vmr < clip_vmr + make_funs = true # Generate reaction network functions + ini_mix = "profile" # Initial mixing ratios (profile, outgas) + fix_surf = false # Fixed surface mixing ratios + network = "SNCHO" # Class of chemical network to use (CHO, NCHO, SNCHO) + save_frames = true # Plot frames during iterations + yconv_cri = 0.05 # Convergence criterion, value of mixing ratios + slope_cri = 0.0001 # Convergence criterion, rate of change of mixing ratios + +# Calculate simulated observations +[observe] + + # Module with which to calculate the synthetic observables + synthesis = "none" diff --git a/input/ensembles/escape_comparison_on_off.toml b/input/ensembles/escape_comparison_on_off.toml new file mode 100644 index 000000000..31f3037c1 --- /dev/null +++ b/input/ensembles/escape_comparison_on_off.toml @@ -0,0 +1,29 @@ +# Config file for running a grid of forward models + +# Path to output folder where grid will be saved (relative to PROTEUS output folder) +output = "scratch/escape_comparison_on_off/" + +# Make `output` a symbolic link to this absolute location. To disable: set to empty string. +symlink = "" + +# Path to base (reference) config file relative to PROTEUS root folder +ref_config = "input/demos/escape_comparison.toml" + +# Use SLURM? +use_slurm = true + +# Execution limits +max_jobs = 2 # maximum number of concurrent tasks (e.g. 500 on Habrok) +max_days = 3 # maximum number of days to run (e.g. 1) +max_mem = 3 # maximum memory per CPU in GB (e.g. 3) + +# Now define grid axes... +# Each axis must be a new section (table) in this file. +# Each table corresponds to the name of the parameter to be varied. +# Each table name must be written in double quotes. +# See examples below + +# Atmosphere module set directly +["escape.module"] + method = "direct" + values = ['none', 'zephyrus'] diff --git a/input/ensembles/escape_grid_0_485Msun.toml b/input/ensembles/escape_grid_0_485Msun.toml new file mode 100644 index 000000000..ce4c281d7 --- /dev/null +++ b/input/ensembles/escape_grid_0_485Msun.toml @@ -0,0 +1,59 @@ +# Config file for running a grid of forward models + +# Path to output folder where grid will be saved (relative to PROTEUS output folder) +output = "scratch/escape_grid_0_485Msun/" + +# Make `output` a symbolic link to this absolute location. To disable: set to empty string. +symlink = "" + +# Path to base (reference) config file relative to PROTEUS root folder +ref_config = "input/demos/escape_grid_0_485Msun.toml" + +# Use SLURM? +use_slurm = true + +# Execution limits +max_jobs = 500 # maximum number of concurrent tasks (e.g. 500 on Habrok) +max_days = 5 # maximum number of days to run (e.g. 1) +max_mem = 3 # maximum memory per CPU in GB (e.g. 3) + +# Now define grid axes... +# Each axis must be a new section (table) in this file. +# Each table corresponds to the name of the parameter to be varied. +# Each table name must be written in double quotes. +# See examples below + +# Atmosphere module set directly +["atmos_clim.module"] + method = "direct" + values = ['agni', 'janus'] + +# Semi-major axis set by direct +["orbit.semimajoraxis"] + method = "direct" + values = [0.0188, 0.0939, 0.1877] + +# Escape efficiency set by direct +["escape.zephyrus.efficiency"] + method = "direct" + values = [0.1, 0.5, 1.0] + +# XUV Pressure set by direct +["escape.zephyrus.Pxuv"] + method = "direct" + values = [1e-5, 1e1] + +# Oxygen fugacity set by direct +["outgas.fO2_shift_IW"] + method = "direct" + values = [-4, 0, 4] + +# Planet bulk C/H ratio set by direct +["delivery.elements.CH_ratio"] + method = "direct" + values = [0.1, 1.0, 2.0] + +# Hydrogen inventory set by direct +["delivery.elements.H_oceans"] + method = "direct" + values = [1.0, 5.0, 10.0] diff --git a/input/ensembles/escape_grid_1Msun.toml b/input/ensembles/escape_grid_1Msun.toml new file mode 100644 index 000000000..d076272b7 --- /dev/null +++ b/input/ensembles/escape_grid_1Msun.toml @@ -0,0 +1,59 @@ +# Config file for running a grid of forward models + +# Path to output folder where grid will be saved (relative to PROTEUS output folder) +output = "scratch/escape_grid_1Msun/" + +# Make `output` a symbolic link to this absolute location. To disable: set to empty string. +symlink = "" + +# Path to base (reference) config file relative to PROTEUS root folder +ref_config = "input/demos/escape_grid_1Msun.toml" + +# Use SLURM? +use_slurm = true + +# Execution limits +max_jobs = 500 # maximum number of concurrent tasks (e.g. 500 on Habrok) +max_days = 5 # maximum number of days to run (e.g. 1) +max_mem = 3 # maximum memory per CPU in GB (e.g. 3) + +# Now define grid axes... +# Each axis must be a new section (table) in this file. +# Each table corresponds to the name of the parameter to be varied. +# Each table name must be written in double quotes. +# See examples below + +# Atmosphere module set directly +["atmos_clim.module"] + method = "direct" + values = ['agni', 'janus'] + +# Semi-major axis set by direct +["orbit.semimajoraxis"] + method = "direct" + values = [0.1, 0.5, 1.0] + +# Escape efficiency set by direct +["escape.zephyrus.efficiency"] + method = "direct" + values = [0.1, 0.5, 1.0] + +# XUV Pressure set by direct +["escape.zephyrus.Pxuv"] + method = "direct" + values = [1e-5, 1e1] + +# Oxygen fugacity set by direct +["outgas.fO2_shift_IW"] + method = "direct" + values = [-4, 0, 4] + +# Planet bulk C/H ratio set by direct +["delivery.elements.CH_ratio"] + method = "direct" + values = [0.1, 1.0, 2.0] + +# Hydrogen inventory set by direct +["delivery.elements.H_oceans"] + method = "direct" + values = [1.0, 5.0, 10.0] diff --git a/input/ensembles/grid_toi561b.toml b/input/ensembles/grid_toi561b.toml new file mode 100644 index 000000000..96af1483b --- /dev/null +++ b/input/ensembles/grid_toi561b.toml @@ -0,0 +1,46 @@ +# Config file for running a grid of forward models + +# Path to output folder where grid will be saved (relative to PROTEUS output folder) +output = "scratch/toi561b_grid_sun_spectrum_updated" + +# Make `output` a symbolic link to this absolute location. To disable: set to empty string. +symlink = "" + +# Path to base (reference) config file relative to PROTEUS root folder +ref_config = "input/planets/toi561b.toml" + +# Use SLURM? +use_slurm = true + +# Execution limits +max_jobs = 100 # maximum number of concurrent tasks (e.g. 500 on Habrok) +max_days = 3 # maximum number of days to run (e.g. 1) +max_mem = 3 # maximum memory per CPU in GB (e.g. 3) + +# Now define grid axes... +# Each axis must be a new section (table) in this file. +# Each table corresponds to the name of the parameter to be varied. +# Each table name must be written in double quotes. +# See examples below + +# Escape efficiency factor set directly +["escape.zephyrus.efficiency"] + method = "direct" + values = [0.1, 1.0] + +# Planet bulk C/H ratio set directly +["delivery.elements.CH_ratio"] + method = "direct" + values = [0.1, 1.0, 2.0] + +# Planet bulk S/H ratio set directly +["delivery.elements.SH_ratio"] + method = "direct" + values = [0.216, 2.16, 21.6] + +# Hydrogen inventory set by arange +["delivery.elements.H_oceans"] + method = "logspace" + start = 1.0 + stop = 1000.0 + count = 4 diff --git a/input/planets/toi561b.toml b/input/planets/toi561b.toml index 029ba6d98..c613d1a8f 100644 --- a/input/planets/toi561b.toml +++ b/input/planets/toi561b.toml @@ -8,7 +8,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [params] # output files [params.out] - path = "toi561b" + path = "scratch/toi561b" logging = "DEBUG" plot_mod = 5 # Plotting frequency, 0: wait until completion | n: every n iterations plot_fmt = "png" # Plotting image file format, "png" or "pdf" recommended @@ -20,7 +20,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [params.dt] minimum = 3e2 # yr, minimum time-step minimum_rel = 1e-5 # relative minimum time-step [dimensionless] - maximum = 1e10 # yr, maximum time-step + maximum = 1.1e10 # yr, maximum time-step initial = 1e3 # yr, inital step size starspec = 1e9 # yr, interval to re-calculate the stellar spectrum starinst = 1e2 # yr, interval to re-calculate the instellation @@ -50,7 +50,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [params.stop.time] enabled = true minimum = 1.0e3 # yr, model will certainly run to t > minimum - maximum = 1.0e10 # yr, model will terminate when t > maximum + maximum = 1.1e10 # yr, model will terminate when t > maximum # solidification [params.stop.solid] @@ -77,8 +77,9 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" age_ini = 0.1 # Gyr, model initialisation/start age module = "mors" + [star.mors] - rot_pcntle = 20.0 # rotation percentile + rot_pcntle = 20.0 # rotation percentile -> slow rotator because the star is old ? rot_period = 'none' # rotation period [days] tracks = "spada" # evolution tracks: spada | baraffe age_now = 11 # [Gyr] from Lacedelli et al., 2022 @@ -86,32 +87,56 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [star.dummy] radius = 0.843 # from Lacedelli et al., 2022 [R_sun] + calculate_radius = false # Calculate star radius using scaling from Teff? Teff = 5372.0 # from Lacedelli et al., 2022 [K] # Orbital system [orbit] + instellation_method = 'sma' # whether to define orbit using semi major axis ('sma') or instellation flux ('inst') + instellationflux = 1.0 # instellation flux received from the planet in [Earth units] semimajoraxis = 0.0106 # AU from Patel et al. 2023 eccentricity = 0.0 # dimensionless from Brinkman et al. 2023 zenith_angle = 48.19 # degrees s0_factor = 0.375 # dimensionless + evolve = false # whether to evolve the SMaxis and eccentricity module = "none" [orbit.dummy] H_tide = 1e-11 # Fixed tidal power density [W kg-1] Phi_tide = "<0.3" # Tidal heating applied when inequality locally satisfied + Imk2 = 0.0 # Fixed imaginary part of k2 love number, cannot be positive [orbit.lovepy] visc_thresh = 1e9 # Minimum viscosity required for heating [Pa s] # Planetary structure - physics table [struct] - #mass_tot = 2.24 # M_earth from Brinkman et al. 2023 - radius_int = 1.4195 # R_earth from Patel et al. 2023 + mass_tot = 2.24 # M_earth from Brinkman et al. 2023 + #radius_int = 1.4195 # R_earth from Patel et al. 2023 corefrac = 0.55 # non-dim., radius fraction 0.20 from Brinkman et al. 2023 core_density = 10738.33 # Core density [kg m-3] core_heatcap = 880.0 # Core specific heat capacity [J K-1 kg-1] + module = "self" # self | zalmoxis + + [struct.zalmoxis] + coremassfrac = 0.325 # core mass fraction [non-dim.] + inner_mantle_mass_fraction = 0 # inner mantle mass fraction [non-dim.] + weight_iron_frac = 0.325 # iron fraction in the planet [non-dim.] + num_levels = 100 # number of Zalmoxis radius layers + EOSchoice = "Tabulated:iron/silicate" # iron/silicate for super-Earths, water for water planets with Earth-like rocky cores + max_iterations_outer = 20 # max. iterations for the outer loop + tolerance_outer = 1e-3 # tolerance for the outer loop + max_iterations_inner = 100 # max. iterations for the inner loop + tolerance_inner = 1e-4 # tolerance for the inner loop + relative_tolerance = 1e-5 # relative tolerance for solve_ivp + absolute_tolerance = 1e-6 # absolute tolerance for solve_ivp + target_surface_pressure = 101325 # target surface pressure + pressure_tolerance = 1e11 # tolerance surface pressure + max_iterations_pressure = 200 # max. iterations for the innermost loop + pressure_adjustment_factor = 1.1 # factor for adjusting the pressure in the innermost loop + # Atmosphere - physics table [atmos_clim] prevent_warming = false # do not allow the planet to heat up @@ -159,7 +184,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [escape] module = "zephyrus" # Which escape module to use - reservoir = "bulk" # Escaping reservoir: "bulk", "outgas", "pxuv". + reservoir = "outgas" # Escaping reservoir: "bulk", "outgas", "pxuv". [escape.zephyrus] Pxuv = 1e-2 # Pressure at which XUV radiation become opaque in the planetary atmosphere [bar] @@ -182,20 +207,30 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" module = "spider" # Which interior module to use [interior.spider] - num_levels = 220 # Number of SPIDER grid levels + num_levels = 100 # Number of SPIDER grid levels mixing_length = 2 # Mixing length parameterization tolerance = 1.0e-10 # solver tolerance tolerance_rel = 1.0e-8 # relative solver tolerance solver_type = "bdf" # SUNDIALS solver method tsurf_atol = 20.0 # tsurf_poststep_change tsurf_rtol = 0.01 # tsurf_poststep_change_frac - ini_entropy = 3300.0 # Surface entropy conditions [J K-1 kg-1] + ini_entropy = 4000.0 # Surface entropy conditions [J K-1 kg-1] ini_dsdr = -4.698e-6 # Interior entropy gradient [J K-1 kg-1 m-1] [interior.aragog] num_levels = 220 # Number of Aragog grid levels tolerance = 1.0e-10 # solver tolerance ini_tmagma = 3200.0 # Initial magma surface temperature [K] + inner_boundary_condition = 1 # 1 = core cooling model, 2 = prescribed heat flux, 3 = prescribed temperature + inner_boundary_value = 4000 # core temperature [K], if inner_boundary_condition = 3. CMB heat flux [W/m^2], if if inner_boundary_condition = 2 + conduction = true # enable conductive heat transfer + convection = true # enable convective heat transfer + gravitational_separation = false # enable gravitational separation + mixing = false # enable mixing + dilatation = false # enable dilatation source term + mass_coordinates = false # enable mass coordinates + tsurf_poststep_change = 30 # threshold of maximum change on surface temperature + event_triggering = true # enable events triggering to avoid abrupt jumps in surface temperature [interior.dummy] ini_tmagma = 3500.0 # Initial magma surface temperature [K] @@ -239,6 +274,9 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" # Set initial volatile inventory by planetary element abundances [delivery.elements] + use_metallicity = false # whether or not to specify the elemental abundances in terms of solar metallicity + metallicity = 1000 # metallicity relative to solar metallicity + H_oceans = 1.0 # Hydrogen inventory in units of equivalent Earth oceans #H_ppmw = 109.0 # Hydrogen inventory in ppmw relative to mantle mass diff --git a/src/proteus/grid/run_grid_analysis.py b/src/proteus/grid/run_grid_analysis.py index a442075d3..75ddc8025 100644 --- a/src/proteus/grid/run_grid_analysis.py +++ b/src/proteus/grid/run_grid_analysis.py @@ -8,9 +8,26 @@ # update the related plotting variables accordingly. This can be done in the `run_grid_analyze` function (see below). from __future__ import annotations -from proteus.grid.post_processing_grid import * +import os + import matplotlib.cm as cm +from proteus.grid.post_processing_grid import ( + ecdf_grid_plot, + ecdf_single_plots, + extract_grid_output, + extract_solidification_time, + get_grid_parameters, + group_output_by_parameter, + load_extracted_data, + load_grid_cases, + plot_dir_exists, + plot_grid_status, + save_error_running_cases, + save_grid_data_to_csv, +) + + def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True): """ Run the post-processing of a PROTEUS grid, extracting simulation data in a CSV file and generating plots. @@ -129,20 +146,20 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24}, 'H_kg_atm': {"label": r"[H$_{atm}$] [kg]", "log_scale": True, "scale": 1.0} } - #ecdf_single_plots(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_single, output_settings=output_settings_single, plots_path=plots_path) + ecdf_single_plots(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_single, output_settings=output_settings_single, plots_path=plots_path) # ECDF Grid Plot # The user needs to comment the parameters he didn't used in the grid/ add the ones non-listed here. Same for the outputs. param_settings_grid = { - "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, - "orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.plasma, "log_scale": False}, + #"atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, + #"orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.plasma, "log_scale": False}, "escape.zephyrus.efficiency": {"label": r"$\epsilon$", "colormap": cm.spring, "log_scale": False}, - "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, - "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2 / IW)$", "colormap": cm.coolwarm, "log_scale": False}, + #"escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, + #"outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2 / IW)$", "colormap": cm.coolwarm, "log_scale": False}, "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.winter, "log_scale": True}, - #"delivery.elements.SH_ratio": {"label": "S/H ratio", "colormap": cm.autumn, "log_scale": False}, - #"escape.reservoir": {"label": "Reservoir", "colormap": cm.viridis, "log_scale": False} + "delivery.elements.SH_ratio": {"label": "S/H ratio", "colormap": cm.autumn, "log_scale": False}, + "escape.reservoir": {"label": "Reservoir", "colormap": cm.viridis, "log_scale": False} } output_settings_grid = { 'solidification_time': {"label": "Solidification [yr]", "log_scale": True, "scale": 1.0}, From 9d8d21d3f3d9a0d7471f4ff8c2596a167f99a4d6 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 27 Aug 2025 10:50:51 +0200 Subject: [PATCH 050/105] update files for more efficient grid runs --- input/demos/escape_comparison.toml | 2 +- input/ensembles/grid_toi561b.toml | 4 +-- input/planets/toi561b.toml | 2 +- src/proteus/grid/post_processing_grid.py | 32 +++++++++++++------ src/proteus/grid/run_grid_analysis.py | 40 +++++++++++++----------- 5 files changed, 48 insertions(+), 32 deletions(-) diff --git a/input/demos/escape_comparison.toml b/input/demos/escape_comparison.toml index d2a60ac64..4f88cf77c 100644 --- a/input/demos/escape_comparison.toml +++ b/input/demos/escape_comparison.toml @@ -28,7 +28,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [params.out] path = "scratch/escape_comparison_on_off" logging = "INFO" - plot_mod = 5 # Plotting frequency, 0: wait until completion | n: every n iterations + plot_mod = 15 # Plotting frequency, 0: wait until completion | n: every n iterations plot_fmt = "pdf" # Plotting image file format, "png" or "pdf" recommended write_mod = 1 # Write CSV frequency, 0: wait until completion | n: every n iterations archive_mod = 0 # Archive frequency, 0: wait until completion | n: every n iterations | none: do not archive diff --git a/input/ensembles/grid_toi561b.toml b/input/ensembles/grid_toi561b.toml index 96af1483b..44716c2d6 100644 --- a/input/ensembles/grid_toi561b.toml +++ b/input/ensembles/grid_toi561b.toml @@ -13,8 +13,8 @@ ref_config = "input/planets/toi561b.toml" use_slurm = true # Execution limits -max_jobs = 100 # maximum number of concurrent tasks (e.g. 500 on Habrok) -max_days = 3 # maximum number of days to run (e.g. 1) +max_jobs = 75 # maximum number of concurrent tasks (e.g. 500 on Habrok) +max_days = 5 # maximum number of days to run (e.g. 1) max_mem = 3 # maximum memory per CPU in GB (e.g. 3) # Now define grid axes... diff --git a/input/planets/toi561b.toml b/input/planets/toi561b.toml index c613d1a8f..377711c5e 100644 --- a/input/planets/toi561b.toml +++ b/input/planets/toi561b.toml @@ -10,7 +10,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [params.out] path = "scratch/toi561b" logging = "DEBUG" - plot_mod = 5 # Plotting frequency, 0: wait until completion | n: every n iterations + plot_mod = 15 # Plotting frequency, 0: wait until completion | n: every n iterations plot_fmt = "png" # Plotting image file format, "png" or "pdf" recommended write_mod = 1 # Write CSV frequency, 0: wait until completion | n: every n iterations archive_mod = 0 # Archive frequency, 0: wait until completion | n: every n iterations | none: do not archive diff --git a/src/proteus/grid/post_processing_grid.py b/src/proteus/grid/post_processing_grid.py index a03c319db..13dc6faa8 100644 --- a/src/proteus/grid/post_processing_grid.py +++ b/src/proteus/grid/post_processing_grid.py @@ -15,7 +15,6 @@ import seaborn as sns import toml - ##### Functions for extracting grid data ##### def load_grid_cases(grid_dir: Path): @@ -330,8 +329,6 @@ def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dic csv_file = output_dir / f"{grid_name}_extracted_data.csv" # Write CSV file - num_cases = len(cases_data) - with open(csv_file, 'w', newline='') as csvfile: writer = csv.writer(csvfile) @@ -769,14 +766,16 @@ def ecdf_single_plots(grid_params: dict, grouped_data: dict, param_settings: dic norm = mpl.colors.LogNorm(vmin=min(tested_param), vmax=max(tested_param)) else: norm = mpl.colors.Normalize(vmin=min(tested_param), vmax=max(tested_param)) - color_func = lambda v: cmap(norm(v)) + def color_func(v): + return cmap(norm(v)) colorbar_needed = True else: # Categorical colormap: map each unique value to one color unique_vals = sorted(set(tested_param)) cats_cmap = mpl.colormaps.get_cmap(cmap.name).resampled(len(unique_vals)) color_map = {val: cats_cmap(i) for i, val in enumerate(unique_vals)} - color_func = lambda val: color_map[val] + def color_func(val): + return color_map[val] colorbar_needed = False # Create a new figure & axes @@ -871,18 +870,32 @@ def ecdf_grid_plot(grid_params: dict, grouped_data: dict, param_settings: dict, # Loop through parameters (rows) and outputs (columns) for i, param_name in enumerate(param_names): tested_param = grid_params.get(param_name, []) - settings = param_settings[param_name] + if not tested_param: + print(f"⚠️ Skipping {param_name} — no tested values found in grid_params") + continue + + settings = param_settings[param_name] + # Determine coloring is_numeric = np.issubdtype(np.array(tested_param).dtype, np.number) if is_numeric: - norm = mpl.colors.LogNorm(vmin=min(tested_param), vmax=max(tested_param)) if settings.get("log_scale", False) else mpl.colors.Normalize(vmin=min(tested_param), vmax=max(tested_param)) - color_func = lambda v: settings["colormap"](norm(v)) + vmin, vmax = min(tested_param), max(tested_param) + if vmin == vmax: + # avoid log/normalize errors with constant values + vmin, vmax = vmin - 1e-9, vmax + 1e-9 + if settings.get("log_scale", False): + norm = mpl.colors.LogNorm(vmin=vmin, vmax=vmax) + else: + norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax) + def color_func(v): + return settings["colormap"](norm(v)) colorbar_needed = True else: unique_vals = sorted(set(tested_param)) cmap = mpl.colormaps.get_cmap(settings["colormap"]).resampled(len(unique_vals)) color_map = {val: cmap(j) for j, val in enumerate(unique_vals)} - color_func = lambda v: color_map[v] + def color_func(v): + return color_map[v] colorbar_needed = False for j, output_name in enumerate(out_names): @@ -900,6 +913,7 @@ def ecdf_grid_plot(grid_params: dict, grouped_data: dict, param_settings: dict, log_scale=out_settings.get("log_scale", False), color=color_func(val), linewidth=4, + linestyle='-', ax=ax ) diff --git a/src/proteus/grid/run_grid_analysis.py b/src/proteus/grid/run_grid_analysis.py index 75ddc8025..6f7c47db5 100644 --- a/src/proteus/grid/run_grid_analysis.py +++ b/src/proteus/grid/run_grid_analysis.py @@ -126,15 +126,16 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) # Single ECDF Plots # The user needs to comment the parameters he didn't used in the grid/ add the ones non-listed here. Same for the outputs. param_settings_single = { - "orbit.semimajoraxis": {"label": "Semi-major axis [AU]", "colormap": cm.plasma, "log_scale": False}, - "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, - "escape.zephyrus.efficiency": {"label": r"Escape efficiency factor $\epsilon$", "colormap": cm.spring, "log_scale": False}, - "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2)$ [IW]", "colormap": cm.coolwarm, "log_scale": False}, - "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, - "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, - "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": True}, - #"delivery.elements.SH_ratio": {"label": "S/H ratio", "colormap": cm.autumn, "log_scale": False}, - #"escape.reservoir": {"label": "Reservoir", "colormap": cm.viridis, "log_scale": False} + # "orbit.semimajoraxis": {"label": "Semi-major axis [AU]", "colormap": cm.plasma, "log_scale": False}, + # "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, + # "escape.zephyrus.efficiency": {"label": r"Escape efficiency factor $\epsilon$", "colormap": cm.spring, "log_scale": False}, + # "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2)$ [IW]", "colormap": cm.coolwarm, "log_scale": False}, + # "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, + # "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, + # "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": False}, + # "delivery.elements.SH_ratio": {"label": "S/H ratio", "colormap": cm.autumn, "log_scale": False}, + # "escape.reservoir": {"label": "Reservoir", "colormap": cm.viridis, "log_scale": False} + "escape.module": {"label": "Escape module", "colormap": cm.RdYlGn, "log_scale": False} } output_settings_single = { 'esc_rate_total': {"label": "Total escape rate [kg/s]", "log_scale": True, "scale": 1.0}, @@ -151,22 +152,23 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) # ECDF Grid Plot # The user needs to comment the parameters he didn't used in the grid/ add the ones non-listed here. Same for the outputs. param_settings_grid = { - #"atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, - #"orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.plasma, "log_scale": False}, - "escape.zephyrus.efficiency": {"label": r"$\epsilon$", "colormap": cm.spring, "log_scale": False}, - #"escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, - #"outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2 / IW)$", "colormap": cm.coolwarm, "log_scale": False}, - "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, - "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.winter, "log_scale": True}, - "delivery.elements.SH_ratio": {"label": "S/H ratio", "colormap": cm.autumn, "log_scale": False}, - "escape.reservoir": {"label": "Reservoir", "colormap": cm.viridis, "log_scale": False} + # "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, + # "orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.plasma, "log_scale": False}, + # "escape.zephyrus.efficiency": {"label": r"$\epsilon$", "colormap": cm.spring, "log_scale": False}, + # "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, + # "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2 / IW)$", "colormap": cm.coolwarm, "log_scale": False}, + # "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, + # "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.winter, "log_scale": True}, + # "delivery.elements.SH_ratio": {"label": "S/H ratio", "colormap": cm.autumn, "log_scale": True}, + # "escape.reservoir": {"label": "Reservoir", "colormap": cm.viridis, "log_scale": False} + "escape.module": {"label": "Escape module", "colormap": cm.RdYlGn, "log_scale": False} } output_settings_grid = { 'solidification_time': {"label": "Solidification [yr]", "log_scale": True, "scale": 1.0}, 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, 'P_surf': {"label": r"P$_{surf}$ [bar]", "log_scale": True, "scale": 1.0}, 'esc_rate_total': {"label": "Escape rate [kg/s]", "log_scale": True, "scale": 1.0}, - 'T_surf': {"label": r"T$_{surf}$ [K]", "log_scale": False, "scale": 1.0}, + 'T_surf': {"label": r"T$_{surf}$ [$10^3$ K]", "log_scale": False, "scale": 1.0/1000.0}, 'atm_kg_per_mol': {"label": "MMW [g/mol]", "log_scale": False, "scale": 1000.0}, # 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24}, 'H_kg_atm': {"label": r"[H$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, From 8b2be294c2d12c1343594a075f2b4b26cd7eb8e2 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 27 Aug 2025 11:39:37 +0200 Subject: [PATCH 051/105] minor change toml files for grid runs --- input/demos/escape_comparison.toml | 8 ++++---- input/ensembles/escape_comparison_on_off.toml | 11 ++++++++--- input/planets/toi561b.toml | 2 +- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/input/demos/escape_comparison.toml b/input/demos/escape_comparison.toml index 4f88cf77c..5f75f5706 100644 --- a/input/demos/escape_comparison.toml +++ b/input/demos/escape_comparison.toml @@ -27,8 +27,8 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" # output files [params.out] path = "scratch/escape_comparison_on_off" - logging = "INFO" - plot_mod = 15 # Plotting frequency, 0: wait until completion | n: every n iterations + logging = "DEBUG" + plot_mod = 5 # Plotting frequency, 0: wait until completion | n: every n iterations plot_fmt = "pdf" # Plotting image file format, "png" or "pdf" recommended write_mod = 1 # Write CSV frequency, 0: wait until completion | n: every n iterations archive_mod = 0 # Archive frequency, 0: wait until completion | n: every n iterations | none: do not archive @@ -169,7 +169,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" tmp_minimum = 0.5 # temperature floor on solver tmp_maximum = 5000.0 # temperature ceiling on solver - module = "agni" # Which atmosphere module to use + module = "janus" # Which atmosphere module to use [atmos_clim.agni] p_top = 1.0e-5 # bar, top of atmosphere grid pressure @@ -202,7 +202,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [escape] module = "zephyrus" # Which escape module to use - reservoir = "outgas" # Escaping reservoir: "bulk", "outgas", "pxuv". + reservoir = "bulk" # Escaping reservoir: "bulk", "outgas", "pxuv". [escape.zephyrus] diff --git a/input/ensembles/escape_comparison_on_off.toml b/input/ensembles/escape_comparison_on_off.toml index 31f3037c1..802430466 100644 --- a/input/ensembles/escape_comparison_on_off.toml +++ b/input/ensembles/escape_comparison_on_off.toml @@ -13,8 +13,8 @@ ref_config = "input/demos/escape_comparison.toml" use_slurm = true # Execution limits -max_jobs = 2 # maximum number of concurrent tasks (e.g. 500 on Habrok) -max_days = 3 # maximum number of days to run (e.g. 1) +max_jobs = 5 # maximum number of concurrent tasks (e.g. 500 on Habrok) +max_days = 5 # maximum number of days to run (e.g. 1) max_mem = 3 # maximum memory per CPU in GB (e.g. 3) # Now define grid axes... @@ -23,7 +23,12 @@ max_mem = 3 # maximum memory per CPU in GB (e.g. 3) # Each table name must be written in double quotes. # See examples below -# Atmosphere module set directly +# Escape module set directly ["escape.module"] method = "direct" values = ['none', 'zephyrus'] + +# Atmosphere module set directly +["atmos_clim.module"] + method = "direct" + values = ['janus', 'agni'] diff --git a/input/planets/toi561b.toml b/input/planets/toi561b.toml index 377711c5e..c613d1a8f 100644 --- a/input/planets/toi561b.toml +++ b/input/planets/toi561b.toml @@ -10,7 +10,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [params.out] path = "scratch/toi561b" logging = "DEBUG" - plot_mod = 15 # Plotting frequency, 0: wait until completion | n: every n iterations + plot_mod = 5 # Plotting frequency, 0: wait until completion | n: every n iterations plot_fmt = "png" # Plotting image file format, "png" or "pdf" recommended write_mod = 1 # Write CSV frequency, 0: wait until completion | n: every n iterations archive_mod = 0 # Archive frequency, 0: wait until completion | n: every n iterations | none: do not archive From 8a25af0d9c9f32d42c8adf7060beb0ce3243ab01 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Thu, 28 Aug 2025 16:42:38 +0200 Subject: [PATCH 052/105] changes for toi561b grid with correct stellar spectrum --- grouped_data.pkl | Bin 84644 -> 0 bytes input/ensembles/grid_toi561b.toml | 2 +- input/planets/toi561b.toml | 2 +- src/proteus/grid/run_grid_analysis.py | 20 ++++++++++---------- 4 files changed, 12 insertions(+), 12 deletions(-) delete mode 100644 grouped_data.pkl diff --git a/grouped_data.pkl b/grouped_data.pkl deleted file mode 100644 index 22beb4ddd13234f93b080925a6d32c51406356f9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 84644 zcmd44c_3Ba_cz`(6J<=wRE9K3r9yJgjXIh|MI{+hLYXd7 zG?GLU3iU>V-`Zy%-kzr?zwh(>zR&M-`{V8PUejKC?X}n5_gvQD@Fnq$V2nWj{1P>y zrCdDO+iW~Ex2~GC*~4M`R`%v4=81?g%HG3nv%3x3ely#JZR1SQWOjP)aP`r&b8)xV z+~MiW-frXWZsU^}E#P5i<80%en2;FFPl>?KEo>iGdqm;cu9FxM9+qgDsIiPUnxUV# za^=dU{qT?a&$1EhL|a3lK16_jiJ>63DN%zYzJ%=wQ7kqkSz9h<6W}aKybMfT{V7C= zWC>Nt&W3*^D{e)45n{0z_k|A}IFLrNqzje;0VqP9iB(lq_etWLy}I=7H^wAO^@s-6 z3O^~OMaf81PsbF$^d_t&-!_?|5HE^FHNIVsAz7(snX#0x;O8hT9P;J{K~bbS=3|Pj z?bULMvcGaV`~+7!S}L>aL>7>&WKqW~inV)XmNf+!>m|3sKa%La#=%2a+ZmK27V*5D zz*^(mzEjS#gmj{T1JjXM5srcP?zk+9qVn|+MIqYn-`xF%JV~+y74Bhwf^ugxQZi9~ zL9Pf8-7|0Ev80%DvE>m@{IT%34|M1R<5yvdiqTaJHtqVrq14;%eO%z0T?Zb{nMT0tF^FYflXCF0H4Dvyc#JmPP&GgGRsq?0JA0@{EmkRI4fFC$w+1v zVVSs#kBkw8c;@#VQv^QbV671jk(lDak{~E8@hzs9vLWgdmhpe;U5;49=c>~Q%ZX_#m+|6>z1!?Ug*r`C)Jv4gIyV?QF0tlAc(M))cfQykW2y0-WQ~2h1@FJeL#if}@Px{0_zCLf-$O7E z;E_YF(wY1Ff6iRiXjctv!>#8n2dNUz9d=Uw5}&k6aOV1Ts}mHf>rMzH(`2YkDi?qN zLQ(XQ8$!>p8@`X0b0|vm08l>PK`)4O6J?P2 zIKL2QwdvJONPjz@i8QgP54t~N|vigcxrr!bm+zLDU z&}eIl6=WxoMFHM?f_8pUEe2zr#9-!g7i-+G-INcIBC+ zOi^~9G@}4f?^^@}@cBbr54E9*-$&@J$-5F)VWM;66r3P|C-kh5Fl%&wJ02pEw!<2; zUKKkwDtQ_PJ74)ymRV~O@;OM0_}V@SZy^5dxEc{3R+-|^@$_m_TEwq6(OBK5IL;br zv6MA!vGb!dYk&ZhF(VT(;QL5#@w#`na3TdVW}*3j7Ed*{v*TwnK~Z=mED(jxTx0&lM8sGAB78}R7;s^9moqjd_If3rx7n|fa?mD;+LLE+YcGnjJnt zAPVv0C%r#KH!EXSz;y3eN_dBRF#H6qU6Er63XFF3!3JgWEhwLfFLw;EtKpGjvDV0b zek@~Vv>&T08Awu=iCPI0T;H8ro??T%={A%w@%{w^gC7aYu^T*x>1xG!@spY<@g>>; zZ@XPGr#X~AqpIoBFLPrczgCUJ^=o88BeoMhAk1T5h~sSfp`RL^__DHqDjvjp_hon| zjSAU=_wmy*`l2PX<0aN|w(7@&dgu6CQ===sLstwG!x=0*WnRG`JUS=3j6<^Oma6o<3 z$Q0~qUm21G3TNQ~40uWr2QH#Aic2sE8?Y==J^{PxRIEaPG@ZFTND==1o4Kup^i&k3 zWU+?net8avz%9}_i^^r9rcVU#G)7eq4z_eDT~oaJc&w2yODu`bLC%=Ia*D#UmY%MN z&K%mZSv7_oXdiFhWA#WEdAPZf(-e}?!N z7Eu(Iw2K>_&g$111kxf}wHxtv3EdV%QCN&^*RdUi8POPcsV~k#!YttyI`WQ1>&hw0 z-rp+-1c>$*byPhdda0q1hw%bfUD}*3{le#8Qxu|=FGXa5YYu#N=?@2OuzHW;c< zVM0+DOX#|+z`$`PYHay%?dkQy`G9g?I`Sq{4HxB zAXgKn;q?-INRKW(#eib5LK;?;Q!+J0P?j~m)0xYMxbSZJ^fn5?PY`D8E~i`|Iz;c&OQe*ES4P@a9?O_68pWZ6 z6%^KE7LR-pc07#N5wnC}pT~L8s_`4oQuOO`tnOrezZ|I(HEC}6O2x2g#_C@8tQ!%F z#lwFaYlW6yASmJ9t000tI<~)gh?mH}SbFhKENm*igF{gmOWbgt%75#lSVXIs zGR{ijt2WrNnmvzkap_Gf!Ibv0`|x>61hPA#ww$u#H2OM00nrCzW}J7boA zlwuYo6UwQwMu4cfP4`G9+w;MTc+F;#*>+XAs(->cl4-r(aPz_BDqyWUGrn)MCK7%mvuMvspva{N z?J}JLGBRak{)D-EJRK__YwiXz-t(1Oa_0l(#?|ZV^-}@dfA11(BMB^F)#h1SeF5y+ z?6Dn1qur}j{`pie$hNqUNboEG;aeY?bbFD9@<(c;D^9==mojdA3DE^sYOQ>ZJer_K z)1zCHKOr|7G&0^ff0Oe|romsRCP_}Nc)0Tq8ZM)&7tHCS<%J!14*AvZv zQtEIzjRh0*xYSl>E?R-6W3;*RL;pB4kG@lhST zyK4bVS^AZ?I+|p@3r(FP69q1KoNKS+pJqui-*#DC(eh3KO7gs#@iETe=F4y!+ppVz z;{QbPyZs81`T56I_a9kGU}sawr5ztxl%fAeOl9Y~E@D~6ommQUT zA%4x-FXsQSKx56dR*K5F0$_F7*pY3&z{4h?Sv3yXK$(=2_O1w)=;OWxS!{o_N8hdX*w839xC$OeB$S2>u zfn}j~To@9y_RDN=qkP%NQUw%D>G2BbtN4KRRETZ4Zxn#qf(Er`2La?d ztA=Q!^i*m-U~pt9c=&mRmiC@mK$%8nIa@>>R7M2KL0Dok8&Ip+mTT5+rO?SW=X%Qr-VjCQ3=RYkn2Ue6fy@k_p{S zGM_8#cqs1id5AE3M^*Hk=|Uv4!eZXT-Rkp(uspsyZp!rHA3_-%oo#!036D-5SgsC# z;Hvk7WQN83w0ZYH9p;rgIv5T$a6Ys0aCo`=5cLTu9x%10c+iZWg)4M?}J*kV!1A{ zrxQWuZbdk7B8J_%9ZbA%1G@bXo?1%{^Br_$nTxGnD49P5L z-7hz*wvA-w)vtUrDtqP-J8YxVk})UrzzyaZgI-J6E`yf;1i1aKW~irwmUm@^+weRl%zpn_L8a%+b=-HAEcbnu1-rC2Rkw)dv|J&)ju!` zosbL@HvoIrmsHD~+&n}$L;dmzqt1y#C_YA_I&+;RNoHxeZpF=2vL$I~a1vnY3gRqEREnG^#)=VpNj_ZwwDpEM6a%{LL zT)o&6pDU&3l@3v7k7S2@cAG;oGfOP%JruQuusrT}GpEJEx%IA@@1F}Hc68?c@tBOs zo>xCRd?8FFceUOOlSfdSKIr}Cd3aS026SwmF1Hvtbw@;FN+wW}j;ziRje&83_dO$d zGY^u?#uJwxpAfJD;YCjfle>;UF-V%ZR`duun38{w6YK-^i}w9R8BkL`c$v=Q^AQDM z)#uBtGRsM(|GAx8%GC>jVx~5uMjlR$x9(Y5(J{A4<}3GTpYK~$kj%{Aua^b@$g|V0 z@`UQ}?n`19>sSU*@>eRFGhqXH^gP=Zwg~FZD_OM*?(N#(M)-SyQFGDgt=+EvH($c& z9#6m6{DOmJ$?ID`&P&uNp%#_q+aN1qYu8%Rj@#U&y5ZYt4A`(gnAKGX zZ>VK%ml4F-Bg)TPRiz4!dtJjI(U=Wj`DU5NdeucFvqdSWTc)LxWH!`4sXe|V3s^6^ z=A_I=*N>eUN%t+`aPoNlEPC8>e{ihWa+1f~AQ(%^dbMjkl)vDF(+|d0L!kU8RP)VG zUk?T+@>&Z#dI~X|wdwqo%P|m^-Ac!RL0 zBR{TyA*RX}-7AIaxZiq>#^Kpex?Y?3-F~Vs2r?SSj{W3i(3#6e7u~^!4NRYF<64`l z?|{GJJH3xLKq+a^QeHp*1)MB#8hppz7}WqpVTQ@7?Bj4kJ@WR|*$nrTJ!Y$JvNK`B z$F_%V=wZW^YH#Oai~f7yc$}4>yjj;ll9?REySFr59@O=^D-$i=lgvcTGvnm=8zG|a z8$uG#gu^*v^Un5G1uX2Il|1_^emo?Z8Kd^P|8oFBl38G2x=h^#)P=MITC1kRSf5`+ zjmSwRnF%eO+gdurz-MD>cu-%mb{A~#KtC2H2q9nmigUYo$4TZHZ_uZpolP?_rt?I2O zeG(z#8uUlltbFJOG3@T1ZK&b|uAVcjaZ)b_nVKi@nJr!I%W1^zYkiW{iQ#*r1YU`?OdSTwU?KzWKETj6QuIgRcqlJ#MRWdc@gEP&ZxX zyLLV-N}TcDI}Ks65QN2tGT&bv2bAp>s^2*tg=lE2bp06B4{or^6N}XDgTeJj<6o4b z4ZYBMQ}^57FxI3JSuxu(P&cj}+a$6HR%g+o;GHp3fHj*e5B~}&v1d_BnZye4IXpNf zgLxSoi<|fO_m*!fz_FQQ6)_PKNkAUJ5_oF{)Igco4+4GdhHcy02)~4NmGY7J= zK18y&t^xwdRuWhnoD3k~uDT%aCJ}IT<_$}c>(wwsUqz}+g$l?>cwgG4vk$Fqkl`g@Mt8gPW@#K-REr+z_Eb$RW)@ei5 z)e^_nZSX$|kI4i~HhXxwZ$-bJ_LoOwg#V+mA#xEt7+wArOvsS8+>CRh06od=5Nr?x)B-vA<&H~b$@r-;%%bDqndzfo^zL}Q z$rZbSC5s-+!=V0?=5N^`e;j}+%tj~;##-PUWtaq^^DNO?JIlrwSn30gDXX&0_9G8f z-(G&QAr!!P+CzVp0m|3`%L^)(ZK~P=GLv*Vv}+F|P--=Y=?6d^eJGm`LwrR~QX#7T z69(NVyniUD8QhpHvpjXPAg~n5VGJXnY=;oS@8=QzW2GheFY^Fp*UQ^XX-S|c=PR_N z)qugSL!Zk|Zv@IDnD9D20F&pTxByV6C%9|G;k)HQNNeJReof;K0J5FCl_pFFg94^& ztPAadG6_BGoedPzRtQHjP*gIndR3kSpdO7p12ADjyPZ-bfQb;M@y`LM${SA0h48v( z3k5oD&oL*v2UYWU+38Nt!kr}I=!sk3=MnAi0)csl9b1d`zB zUTw%>wvfb<7cY$+t6u>{eYfFn%Xk2zzwMupYzAOn=GycHLL{Nzed_xYtwf-V3u;z7 z30ivB-nR$|f-jsIO@YKk4n(W82PF?srZ2bq%2Fl?vs1fjesx^{H~Z=I z+m0iDV`eEH%%FTZF3MS|WyoSEb1Hhq#OiCqG8?r>%=gtbPN84Uv86Bh%}QC~hI zXb*r%?e~7jz-C<|PZt=@>}-BB^|8RxLc4bp0PXCZnx}8VQaU;^)DE8T>vUD6`PsumqsvxHMX31xXloKvfYvOA;1x=a=*vuY=JS(0R%+Yq|i70m6DS zH<}w91E6bwW=IlS1SBF0yk->DOPpuLQ zMj8)tj$T_!5+R-p{01@Gu4nYsfmymYF?J6oNj}NgdQ3s`_ficqD@= zfI7(7!I!G&7c{`2Ae0~kXkS9+C_+}BqF8vWpek@1O(n?H9Tj*`52iCNWIUZ9K@Xdu zct-(iKU!cz z|1>ck^98`Qkjsqhod9y_j2m<;WGP+og9W@`a80B>5GXmijKXvVEz?V2a|aBKfhmvZ z1wUZ%L&C=4(=Mo=g`5V;3i?C@b=sdB6L9r}(W}1}{DiSGYoQ=OE{9C17q?yU1dTp8 z)_@ykDR@JN=+wRF4*Lazs?q5gJYwSPj8SEvzLZ`sP^TlmZwp?ppckfJ3x2eM<>Nt; zJ5%<7@It!egqT3R6Re~Lr9C`FZ-v0QP>fMINoqz@=baN9^GiDbQu<3ckhkwii;J&ROV!Kpl=ONaVn+ZN~G4wsg} zA(G4nhH!Slw*_F~_@#!^`#8mKB~GGLr)9VEI}U;KD!MpygZ`u@(`WjiKS|qQ@Fk@n zJQaRe&4F1sd!q~iu*jVHb&G!km9pdm96{Yf97|=R8wwn0$;nr!Z+VGikwqv{pq@cT zBe|R|zJoshJNhCB23m{M1mp$m$kZVd#H_@A2?z!pv7kjSt`&V{8Z?+(z)$_k3X;Kb z{&#U>yJ2`Z8dG=m;Nvr8HGR!XDuFL~;dh#d)bJMkjWVdy+2WWKAfWmR=5^(d)!CE^ zcSA>#@SThfj@fkP{_%*6$ZLFy?{P_w~MoUxC2-pPomun zK=2k0N!~NyI74Ce{As2xd+jPo*zHAy5w=3^=ihK`(ghHMM2uigOX*6TTg*nu19o21 zW7!GB_NS=AO~7TG%XXt}I>1`j`vQE0(OuVWeaJ$tzDk3${0@M|$v9JT%;@m@Izq{` z@B!8{e(b!*FZ5l#F;E-`wC@3IWBNks15lthhpRNo@`)tGM;&tSH?VBcSvnbfz6w!E z&m)N=m+4z=UUUWZ?LQH?7w(`8I1KWH?&GN1Z%yOzR6|p?q1E%)lDKTS9ri0T_OAlr z4WiWV3=qDhZ_WOB@C*Vj(8_!2BBW>D4nKT}%BwZRH7>847S3L1gg-0f@r2>gb9WEO zh{U2Y0#U7?Z_suYbg9j8&coBmjj_j-Chr}c=6TfbdcXt&!vhNvM!td3Pf6p_<-QK; z^Uo4k0$asZUYo(gyhjo9mfb!I^NMfAg(|OMGG6YS4RqnmS7g^|Y57hRC?^7LH zpOQqu2`#h`AaiaK+MqxQq34|+nS;wjUP2)**%8I?0mSYr%%E;P-W-L3WAO>a$vz05 z7p8&wAv%|HJL)kjSLYGl6Jh1}aTAgMBo(vrXFP#5lLenY!wm&pKML||;b0Mk%Ten? zRI_2;V)?XVCvM_J3)UWsw^aV_YB;-0V5XON@e1dkXrVK%VF`|Lez`p)#zV0D&m5Gf zU~n9DcmWWhOH%$hx;lisrtO5?lGC~>)&?^@j&c#XA+!mVa~P|14*HM;2d?B>rAT33 zaP>z5u`;IsS*CYMc02mS2WwF%29!O?M0x@Q6YGF zws1tk$r|>k`FhX1^Prw-Ko^hJU`VeuFr6FdzN-kVv*2&=n96J$i5``oY zV@?;2LRaJpqU5N@l(YKD+gl*Vh zp_VY-3WX9TxU>~?JK_67bfxn|2VqFh7*G1#FI+%7ErUH}0NT%2JuYFyPk?=IGJL1)KH$en)gI=ZO z0;|9DgPFp6pnSV6vmy;13VL)OsX2CaGXncI%DM zs!NZcDCVop=7GAUo`|r0K7e1r6`f_3h?Om8*A6mX|NP^)ItLy_dv%Jo3jUanSSCe* zkG2B%c6$T=oio5PyEUHO`2n%KGoIeHLZHmJ!wa6&dVIV4k?4YF@gCpqzn;<$-wnW& zH#c2{u{^qq<2xT|Auu@Q@{Nz?z;jTq-Y$V>YyHLZ3NL*VX{2CwI!{^P(S0Uvm}46d%!%Zdd`_xaD}y{{1^$>EF} z15xzcZ0$)zp(EzaE6`vmVu>h0&VIe7V>g$D&pub%YNvo4Qz+KAlhSES{b0?p~q=L5k)Vp1fqYJ5Z7=#GdXNg;?xY zvtB|vu#;cz{&@Tqe8*7Kp83Lb0+LA^^F)6OP$t~=S(Xmj%}!FS+};7%#F<|`{>~K` zi=E_?ezRmZd^J-$_e3ji2vAb2qg&>L0Z4Wmugv<0D4*86-tq@!3h=HoE&!9_l{$(0&53e%JB% zO#wS){xj`%uLMf!s*`!Rdn`EE+q-^6rzgGWez>*hIjzkeYkq zuSsWjA>qizS&FJKx{s}M=q31Kh@E=crS3FxHCaB|FMk<;q-LV((chI*ENvpgb2K_)X?;7Q1 zR3pmi3o7Xc5LlbN@!oa_T|rCgquX_eBEimxgl{<5$t5O*E2x>$i?aWF+D}&vi0b~8 zzY?XtZA8MCFJ|kTf*S`uJ@}Ha2WiQZx4a7x(ATv~2NtYOqwk0D2uOeZ{tfbpoqTK~ zaTwO!f46U*_qwTw)mnIM#90LH?V02KgkGvs^flpS}XCfc=MBqWiDbKILX7;I@srR)27C%sL(K4N4$WxzNb6bPj;k%x| z`7JBkt{fR+dC;Fg#mN?PcBFvt^W@WO7T!YYJwCfyAy5BxEc5XDvJWtpPnM?8YQYTv z@^=1MoC^6hC^HBG&v_S3PJyWVy!MOagu}uPvIaq5q-FcEDQ2)Til)J%zQcDbdYQf&~$#xxmNdjU#Xf@(;alMa* zHAN}Ey`BK|YZ_)Rl_w3MfH2J}lYX-78tlf1OFbV&C&F|i$Efjp2!sRR3DC{k-UQ&< zl=^Y63jiDuSe3i}+R(9z+dX2_cY=Dch%wJDD4K(o2mKkOL`G*dl_(g(IWgCIi(wV) zq=?d-5vyK~9HJFb_9?zG<;9R8A|J$#cS>Fj!X*z<&AOKiVMP>0{!>nPBcJ&m%AbYf zETUAmM{zT(XQZYm=r0VgY$|gnH=ye0*=?nkVwPPY&2-zD0iWvez0?~`K+-sq-Uh|MbR}kKavS0i;r(lZ@%0ziw+4_6_&}c+lE6wZ;ksM+$ zQq^dYTPXr~+MRG|HVLd!ck?-}KZdAB zlvnS_N`c!{q_+8Yn{iMb|223$R`8oM+%1nh+`W48hrd25VX-}v=)7W@j5Wk_pdADnba3-(0C9f19r#9KKxRv3zAlGLoGV;k*AX0~dmyzb&d zK(UD{w?u#TkUeW9f9EEMXwo(rF&?*X02aJ_ZX*Jhqa#sEGEdKeYL#R?@|o$F6#%BX zRoD~evka0;fluXwagY(%Me)pqvP6CDZ8YjVSDS!p5jfiFU0E|gav%U(2AxSa$#!o{y zUXyId_JB#SYmbCiPf&{743w#*hJstcVD^S}`;M)Iot-3=n`6)n_lKmddA|jwTn5&7 z)$V}tuuMq{%=`2^cZ2%97P3nSj>hb{Wtug6;Rs36KR*3dBMP{s!dvO4O91RIZf{mv z1%PjwcAWWC0GjWY)+Hk6rQm7DQK%3}n)$D%w96oahkX2ue88ZsdrP$?OgXVP@a>XI zkoZYc44iw^pjIYr+9GVwh9Yk_#qCBM3~?k((Dh}drePbi-*(#v5zUbZ-=T`X5 z(NE^pD?bnXq5B_?${-Raw2$*aA4`ujk=r6{uDzKZ@lq`@k}$8te8L-IgkSZlMG+ zc(<)@=U1StX+Q9I0)UiHV@jSrM1ww=lX0LQ=AH8GG#RDY1u}`gJ}Zo01315Jt1d;p!{j&p-(_L6L{y43nWOY{^%cBnXve&W&vSq@8` zNMvV`;MbP706sqNFL#te6mq2ba#@fus9U!n2mHyo7IFX3$vOace{H{?BL=Lkd4AI# zyCALJ*M1Lm(O5T{WTQ&~+*!D{u&^1xlexxQy`~|VFFvc5L+B3h+2|C?B%mP_#~=C( z9;SSn_+YLk)TY#g99yN}S~NuQ(um6ppsXohc3wmWDAygnoLV~;C`@1H;?e3zOJmxB z78G!UX>T@jV6&!tJE)eV2icTr@NOrs1Jr|@d(5C-_A&sE^i!K+0)e$|QE2D=_ekA~ zpZOdTHs@~Egvwti2;a_Rsw$vBC2tc^BOw|&r;FJpvtgH{e2ca^m^K4gax)&++;Ii4 z;P~0|>hQXy@a7iyHSpYW zL*D?n9OdO%ul8}V0FV+}_-a3tfK;6qAF>ug>ZLlk&mf&(oYW-&{fg7CAn>XqqD29O zizBlFol%T+ez2z~+5)BI=jbADSgTaaksaP{u=Dd$cE50N;{{4~u8j(RB7m}SmPb+- z1JGTd<8l^7EBBOdaN{FHnfH8T9|STlzvp-#dk;`_R^^`+g&^hSiMg)*4L+w@PWDQF zUx0+iESE1qv1z!$j@VuWGR1ODYmCtnt*@OkbwdwOQo2ohvQS>=d@f39Y6Re5!8?J> z23EWYc>o*NZq?}!0hyZt53`+~0w|qw_RQTKz?ye! z`&9i~0CdFTGuOBPD|Hp?bly~?KF+i?C=5V)qH9besw+#<{k@-?1DJO_@sQvGB$E^8 zVwMG3hjlV7?im9`=jZ}uHi*SiaYpfKbWG&YA;-vE%gXsu$oHON?U zgtFiJAz^XW=ZFAcT~6`z<0}Tr27go6*tq~U?$o=jisr8KcG>pLu(wh-@CA|ZEhT_y z6|*w88P(L#I+>@6VfU@yN&2nQN#HxC_aXwV)R2{}cv9WC+lkK%l556j5 z<{cA#9a*{_SQ}(}T|15g<>I@BcKvhks7%Vk#d-Ud?OV6o*|4{}?A*-WzQcZ)UoB(F z{G|V+Q#x9k`bvo<(`$@@bc!lX`G@{XYUwBoniaVPgE9QY0~B#u_^0mxUn z28uZt7)mP40IL!Ko&VRegI5JK$O2dKw+6|dcmPIOe`hJG1$r> zFxWH?eu6O*+u1a5TwIPRTfT7s{O!XnYuUv{#3xzHF8`vz1%Ddcasm!wU84VPf7#VN z11#wQ)?S)&V-Za$TTFuzYZ{cU<^TdaHy+ZI3c8invdgCjWb}~??Ix9H@V|q9UBFsn zj~hR*jBY(af+TB=B6(dF0~v}Drz|x+;YWW&H9<; z6lKlkFEp^BTL&0b%3&F!o6~4enFO-P@(nu-*4XZ*Sryl4aN7v8So!4$&~xHL820ASm4hh-n+L!u8XSK|gTiJIL0Sd$o?{)KL$i)jo0uN?k2?y> zSl}^MH5O9A8pb?NF~#gNgJv-t20$(?Be0C7h|uvh=25^bvu|`_81n|xln>@u*sMd1 zrhJku2NKCLd$W|FfU&?j4hFn77?{1Kcd0S|58!~X@hCbijrp0hOgkMdv(|Q+(rJl- zF`qdC_MV>6($Lq_#?@YPx4o-_kGrRbrpYc(FY1ir#lJ%>63w%9NXJfxQU9xconIZ^ zhM(_gHW*wqF6q}veBqAfi`>ykkKm5BXaslkxZ;+EMO@K+29>}9L!s%0@}HXKj$;n) zV{U19%O%}Qxug{(m-H3nk|uFn((#Kc+Bebr_-Db_>J_#oL-sLubc|+kNBdtEcl2`Q zmd2c1(q)l5TIR91qW>AakM~HQB;V`~{&yb}+|hEKOZsQCxT6^|i%UA&az~p|Zs|G6 z6%7RGeVhir<9gsg$-n!U#TD-?;F1RPT+&UPOIkxi_egH&^9v1#xuIz*$t@i?9j4*u zVel#WkEdPS(sq+edMI*9Lp(0&ZpJ08s<@=@5LYyj`PgF#YXm7q>Ki=8`VHT+))2OZsbaNwY*Q>3qi(ZOs1mw2NChr*lgiZ0_i3 zJlOvAKii;|OS+A6N$W_i=mYq-r(N9f>H+v;P;TfW4}Sv44Nbv^bY1>Wc6f%rr{soq zyU*f3}A+mo((%lJ2TQJ4yZrTV--d-@`oa zXky0|9m)Row2NDspmRw_>k#f}PyBD&*Z*j*SuSZD$|YSOxuT`t-=206T=5nG=%UFD zE#aXdDmV1^rrI~-WB5PWteGUZqVq17v{8i~rrgmJ)4}2IPrIOlAXhZOF zr!JQ?Nad1lmt4}ikW2c|aYa+Hzdh}Odo@?|G3Sz|)LhbG7<#ATX21X0cClR2`;$u= zIdVl;zP~-~;*J*qz%NyCLsxgyewzzgbaP3+WiDx+3%y9Wp%W@JmgJ7MmJXqRf7W4SjXzs6l-~RS+fhF_b)Ry!A&jyi$jS`pry@8?8f1yPnYoIOP z|3*)|zkRx6Nhkkb?@Ma<-`yDV&qkBWZvD@FAC3OrNAZ8Fo8kXSE5W~A!CBJ(*E+Ns z{@?EvYVt=j&t?CG-kNLv-n5eS|Jo?>|6@B9W}P73oDtmBqL(j2B<3V?Jit+FmWE zSo|Mmagrsda1YA_<<4lNWW=kra5@NVs-lEh;T2w(<^R;X9LW%$t4`w~ zLfy)!QbD{NJCl0RJj>N$2?xorgckEsmWgg(eVm>FCj=TP7DK!a4=Rvvig&>CNIU9% znXFx2vRRZ2|2qPHf}Nc%Q4}D$tfDdK_cp?B$9y&^4hP9wOQR796Q8t7up55e>I6mU zx)VaZFOwynzX|*Etb8widF01)8R&JCBx}zZIgo|1Wc3xXKm88qC%n?M-WLTGEPsgW zp=Lq+KC+i$v3Pp5u^YeMMB|A06vtUpc1CwOV~W`8l~~y9S4lZ#=f}@2m?gSd8H0f7 z-Z+NuUodc1{zzDkSv-ervHGZxJ=poDWun+;nH?`ND=f!foU;5y6kds`SWFU6?H}Mb zHg`B@;TLe$^oig!XH@lI%Z%JIyaWmvn=x>*a-uwBDcE*kH+q|l!7$9d=AtoWhb7!X zV0Fi$by#@s?-c|k)9Z(wXRXd|rYJ1YA3@mX))hR|``TGfMTanpqnz{zX|bHF@0TM$ z)TFuLrDWJNQ$r9ndmiKI^rn?yf7;9LQ?Hk0IbMIrq0|{O1hMm8OE+T5>io`f#3I_w zUs8c0dY50KWttU%g9i0EMai&CO^;I)qIHrw6-=UL{~c;9mRJ6G0tpkXg0(cuO%lI# zzIQ*K7fbXYjt0@|cmvP0X6)xbSB~ee6v@lm$9W1Z=hsR$1F-__sT453Pkw@xgtPx&%BBALW(&TFVQ&Z8k``W zDg__{mM0$kjw8XJvI0l6OQ{y;g(OceX2~x>;SJ_=Mr#Y02vsFI^-Z}o(6h-kJ-BX@N-U0|U6$89>qbhL zhyOOV6Iy@yqxBiQxs{D z$=Dyq!%Ha{qJ6Izt~3nsXe{jc?NcKqEY&&-4NC801h;N&8M2R6k7(dM5q?rii_(y& zo{lMu^^!;$HtRJG-mKPk2IZ8@h$sG7X50sQmV)uCFeTuuD3+1TETYs|aTgyMQ(B|n z;%&zgSY3@-o6O=l6iZqg-z`~ztb4H(g%M5{dcij{@pPqv>61)CWHqJ)j0tX}gaujm zF(ur+%$lNj=lxcufLtUUt*{F(zaR?H`C>obC1c;xXItbURTD~h1m_mE6a3FP91U|Z z5rSex5A0gy^BvfEap@);Z1F|47>s!mht;D7PUzA7?N~VSq?s~hXUuw4EX?k_p($lSP5>UmDmA-dFT2Wb!R2gT$9R26z_Xkz->i;ZfCepk!{)f#O{? zl3Env#|##Bj8~983^>tc97>C`@BsXTgyktkvT!q#QCxxnXG~u?X04^K+#))2upR%Z zcFYoyWu$$#v_!v3e_lqrLvcaLL=^dv<*<(3vN>ORL}+@;SPFD>aTO4xtY1A+qbuGa7q z91}Lc=K#xbBZ~u+|KU9TGGrh7r8i*>`L@Yaw1^kQ`07PGZzu41JihHa_C-i18Vjo! zUB#i@wCe*hPZDkS@L@?j^ZSnHB=8{z3y*M!#1uK5^?1!=#dxUHBl@4y0kD*Qi-m>a zr((;l{)NiOGAm+1Ju4N9G8)o#T36S^&kqOchK^wxCTxQ?>S3RS2uK|`>lrx?Ib03i*lC>Bvu1l3LB zJIPW|SdUkkN4^M8FpQUeca8MopE%&A;yXB$I%A0&o<%69${JCKnoVigL+OcEfdE5< z8b~x!Kt4O3q7XH@WARc7FJa>$_SPNbP%@zm3j%|eUGNjs73ApMZ#p4{Ls1xg_=IMy z{z|_CNB9hV3kq^CSeGr}) zVcGIWYNIRQ^^t9tGH!ecfmi#reF{wTNpOTV8b2h|bgl`L%y!@C6FpTkKv@4>NKfWQ z08@`%*t-DUtJhSUn{cE7UWPbKFT3H|W4$`s;IFb1)TcMdC*OtF#jz&{&-8It8n%hY zFpVdN>18)edn}J(I_~_viN`RFCx_`}H%xo1FDN%4O_+an+!iEJX78H*ML?~H*LMh??>a+qFr!?ed5rsGcS_CCG^zVKc~e@*%II`gjsSQlZg zegF2~o^~Ak5zOA|+pR1F){B-HP6N40cIst$dC=ylax}(e&unWO!j>+m!~5 zj<==&&Z{jJ$y*LALy_tyg75~$w(=@NJqb2Yb}w%GG43kK{5VpbH?soXw(1dgL~sAJ z7+`5t?oV*(g_LQ{Z$AIeA)u&z^LK24*FJi@b7y1(9|g+fv5NWAyg^2&y6$Ktyo%A| zoN=e^tXvpErdO-}T_aFZtZEq@D|zksU|C4&s3CSz z?&VmNt}_GHEoHl7W)SB=f7nK+C1XzL0gHLYpw|-Km^sKwpWQ!qL@L^>3g^3=Jt|1% zA#r`XS%nbre9P;-F)LseYy-rQ<)KewnkzrI4^d}pp9^p@SO@B&R|}UCpF!BM zdAi(UM7blPF(p$D-W+^nb&hC^7Npg=nZI8zg*RsU=h^93c`k+R`z5i9bu0t4M!B4_ z-}HoJ?$#83ImO8tWF+UEaWawuJ2%Tb)~hZenJr2|-7+nm(EO(UN$v3^u#{|dzn34j zuOXQq5-;eLiq8TH4?8kH6W-1lr@?pZjZqBXEUrDtWd~i#1_u zuJ~Tx{2Es9)%1M~z9vY&hS_9!_*WQW&!Uzxi4~C8;lVK(%*!BL7oxJ(<&YvQoT2C~ z5o?gC=Tzj$TfKng=2(jsNkHtI@W79$avhQzP zQE>YoZr4Mec0m%I8Q(V=-1Mkj zReUBq9=7D#!3$L1iYOIGo`ph{dWyf*GYtlqPaO>o91A)r0GkP*Tovo;6=5A!XB@it4-zWhfoGtxe5EP z91MOlgvDM`wPE`Oh|i$RpP*2&f^dBe?*UFLbB^nchv^iKsT>YC;NGqc!Vk(W z*wtKyO`rF)ZD!{f$kWP#kV&})0A6;@NtvGxc6MeY-M55V=<)hl^tk2zB=b#z;@Y(t zQ1u)4Z*U5=hMBURMsMlRgV*-%F+Ul~&*kXf?i zZl5lS;nC_&&Lv-9jhx{TB`J$`?X^4&g|#FzdH3kpd=X1SSo-R;~YVYJef&}oF`T6UpG1S63ww76N z6Ix}a+rGq5h+)0T%uTKekeTdJANC8^Ri%`hR=c z#k9VuV|LOHPL8%>UE2wpjKQfUNsm#y4q(jjdC;tcayX&w;`nD4Kt$W!9Xa_MmV?ZV ztJl}-!<;BEUNj#QF7Y4yy)w=5AqB6594g)fLXvxS>`5U!KP(s9xTxr0;tQY9F3bV*zFVGik*` zCrbxv{QSt%+dFn^ZJ%#>G{1XxHua0i4*j0TkD;mrzrX?STTxYt!>ihXaF3 z(`X)CaQEvuMl@EwbJSnfFQjqB&uZM4Ue~(inE|&~PSrakh=3K*)z#_Ab`qSNWyW{BoB<%{FPhy{{@ekV@w8@=z zx)D$Njrgap*7iWKpG)c`>N|5^-s~2;?ufxw+gqot3#+o6{ziG!JpH#3Uun-)e&Ky) z$w7ynAMX5~NI~mMbNdJGG}sEq(iPYFy_sdHwT(y`cE`n>Cfz3;Ls|?dv01)`p}(sX z{!Tsj;P^kg!rM~S(wD}i+Q=mtSu;IPJ)n8i<>vd*zvy0gSC824`|t~m5Z))(bWA@( z2i5j?zi~MYfgy>*>btb@pjRNt9*s4x4j-hH`J8fCxd#^o!HnNZT}oIYn3e?^(J z+@j*H3(3@fR*ooa=J;s?C1Rk_!sPX4l$thKXV*Brq!WAi#A3heR?6d)PiEE~3u|p3 z44TnzV@n#tT+$}&a9=Zo8n@3ky4Mw&YVK!cwX&-{l^S>581eFG725OU18Fa&Y@w}M z*Z%aQ)E_Ew`mQfg_d3n%)x5*g9v7#qecCNBxh36G_r$~JU7OQjc6-d>WyzDe(y2DO zsk2L?<%O>K^&1x?`Zz0p{0Zq%paiuUUm&UC^IBBZbAausM`I}%TJHM!3odd-9t^B= zY$%mTPLulkrBWU@`SUi=zYR?zpVu9@T3p_9$9L)F60GP`Ty!o|@HkzTJC9XVRwCRy@30iWZ`}qQAp-+sg_4@0Z=Tn$m~UO<8G^dTt&i^o}D9 z<~LhHC38o*OqRFGbj>ikXF*p>=X79=RDimt%b~ZoN4Pej#!k~upYQORs!UpS9aeHa z1>>7LhXn7T%sxJ`!_VYv)aF|H(cQzkuFI(p0p>S!*W*Ui6-r`!1E$AsBGBCAvW zMcr@r!-*Q}+Kj3m7*0J`*Y{YZ;%|RZ;~xR@obRlslA7;B;?KXL{bY=?NIF5w3!O>T zYU_I!q&7E;=!PvTMZvB3$93;gcUYNSV*RNTWexhWd(GoTlWxhUy1(tz67Jy!RX6QY zZ@x9Ai*RS-qLJzE#~W1XLed=@lnbVQdiur0LQEDzusa@4dUL_7*7~Cm* zlfhQ;k6!w;*-C8|4o*rl-7(UjD!ks|?VUeTZ`%E)S%+QLrwpoe`9A~(-K1Fx=i_oI z%eHawc3KHNFZlI#0p`cfb zj3yVUqb;BLQ{(HqpSD^OQvKinXKEZbaqzdp)IA@KsMKrgh@(_eGp%5|^@C|$UCU^c z@zt9&?X?`f|H$DLwE3w14K}rUOO5+@oVnSaGE&!I>(-w}`DsRMx~EFzLNukhynO7p zX3tWZbVt7!b2{}4-AbF*MiY;EQLU7Xs7HjTGb3^G<3>}=@?;YLlJBy4gJH}FHTllv1pHtD3sKl>a_U%*} z26WYp2l$3kaBEBbYqx6BqoCXKn;nRKzmCr5z{<4~CzhthhZ|Vmds~xsxaj4GR@w4o zSij1q3Z)NHW23A^T@QOuRe^_gw_BUjex$7fhj?zGr(FV`%=NS@FL-j!t63%XguK#9 zp0(*A;^&iEY0jQf0}EDKO(l8pSji(SZ9U{R@)yH)wABatZJJ&n$qVtw1D^E!Z_6$( z9;;QSA(>-q5ZRIcvGJERT`91t*Q1%w{Jas5<|aFPm-N+2KIxaEw;Ufp)W)0*dzzFQ zNF_68&)@U=G?nt_CiLeyMu$d5Aky6*F(HvsTKDc;p37-i`l};T%Xe_orjMipP&? zrLhY~D%Uv=9xD%VCoja~zv0O|EW5mTtdcG2{X6%jR*L^^y2!ZBL#-6^De~0%m}hw+ z9(ll%d0BQ_t)0q@=|wJw?q25D>H%6QVRlf?lWHjsapyk~kNPU@AergDnT zq5Nyq`D*$2MikVb0^MP)v~bL%`+?hLQJebIIjeu7s`~B54KdqBL7fpBs}@;DRUMoI z?Lrn&NljEGT-xaKoT^%x_RxV5y#l&Droxfw#uUjemVQ&}B0P_1_NrzU>|bHfl=QRJ+sc z_6RwB>t|m0VY*){^*1{_WnuPxDrpr}$gy4}3R=p)jMIxVURG1O2buQNN_Ow)v6EKPFKbpM;;0)1i{w|A`sGv=p?;~A!aRE2t+2#`s-`-e ziF>z^N@}Z}D709YP3Kd5s486DDl|pCEZ%J_uAdxTo~nYgsOUQdbHCl73#^r5AG}kJ z6+35i&YM$Glx|t?$L+>Z$A9zN_3S=t3RcMZ+*vCv=)QaCppIKCC*AV$k@AvYLgR1BJ9G&iZRyUvm$xcxQJ_S3I*ly)UH-Knl^Dr4 z+5NB8wZYRa$+J4$M5#CBR&6JD>S+|zTc>n1&-x8h-D4>gT<5%g``zw01p)GPY)hkX zQu6^VM=y{Yb7t45qjOyTiAt)aP;0ueu9N8_(JPFSXC;$zg$~md^{gFnln-nUUA*U8~YD}LL&~ho1=24sQZADttH%MrGsJZ|`o7`K4 zbfJxMvcIpo#tKQIekS)g{nGRa^dIGD~eXT1tAF^=?qNi5;WnXzVl(WdM%7Wv6`7*eo@(XAiue zwd7}#R+=%!ytnH}O7YmM{?x1KV#aT8uDsqFx39jOl2qcS&J})d=;p{786S6xlC}ih z%8V;C&Vr<#FEpMn;$Jn_dRx-JO)u!T)+KFCM6V>r{iW+GFQF>(fo_aJ+b(3c}|`3Ur)Ov z&#E+o9H2$7s}c380GhWvYc`}gmh#54c4BgBnF|!;jMx%Q%x@!un>;B=C3T)1zCL6z z1y$7)_pGg!xXyiW#@3`9Rn>^AmcH%=CH<$3Z;tIAN+q?_yz#7KW=E?ztu#Aa&3>;c zbU${aYbBfGFVl+@wxk=U8C0cJlYImDi}m;*K)pSi2@|a(%d+N}8yv zfp%tz^B=r#u9e!{>ge2K@Eoct*QRXig0Hkvy9LD>xMk7`$IGULGIMxYca#_3T4{o@ ze1bGW_>HB(Pk!H+vQ1eS`=nnf>Dx2ET%snl&w@+&ZB|85;|1zD`yBT>oIh$hwTV#| z0iyBAnlum6)u0mPrujtNQr10wohUQqRfS(48o3Ie%{izt3{cuBNhT)C=|h#2TJ5Xu zM?X^?ve71FBK72M$utdH(Ep5}Yp8d2Q~HH|wOlhoE)i>QJ_HXHQ)tqY7nz|))ICr8 zrB8k8t)UEfp(bnW&|>syW>B_OnK8NZL@TNqn0=qlj7E`}114P2P_D-urBq)ZtCcn% zmeY@xaj`+E@_WLR*_}EoE4iS(Qj?{8*jsqK70YE7G`tXj=Qx z&>p)wL0P8wCprwjIQ9ee_mXjHW^X^G?Af<Nhi>sy=TK>j{S z*T%D9TX}m5>ZxmQPjhV-t9R7DT&@oI(riNB> z+(~1dyt=c=ynW_J8V&SbM%$Eca$e;s%F8<{DXX5LR||O=Yerwejn#vCnb-5$(5)Y} zDbVGX)#AYvG&p`FrtAg^8jpHJU)|-rQFGL*^^fkgt?5GgS*j~8%46mGvClaZu3jHsV^=C@YS^aH9c8^oJLEZ=2Zo*MO~Ygv@?2dC>PM(c zxG)d--qQjotbVl8&GlnwsVYwxw3V{b4slcmdVQBcIqxY_mAX_8>qZlzJYlT)qs|K0 zN4M#*8io%&H_+UoXTWucHzxRP=)y`nbI4AGdQX-ZGK1U#AR zX%_)G{|QgJ6}q?~?n4cF+Ljd&Uex;z{cKWrj!`|6d{pxJ(xXOy?6kJ7f09Q&JsKys zNnG{uYI8aGPjScfnt46vr}T)=B`a!#Sv7h9>)Ng2i=#2d^wcgiU-}r2ZF1Gl*$Wmt zpof{kGrn8Bre90)uUzA#XXdRrbYS|riY^LvD1Ha z)zd@uz)J0kb*kNtN+J$@^#7b_P$H`S8}Y~kp3KX#OS-Ig%%b1Kgs-zJ;A@KHW>}Kc zaM4|Voz^zMFTk#}uDwB3K%F^_dM&M;Q<8FKutYx@jahE8wdFZ#yvAu&xe4@Kdh^O2 zCx3ltNmV0k=T9v(oJz9meSBoJNNXE4rsuEcsr0NkFCMFWOmxTR6=?GbvA>NQ9j2|S z*2!8_l@75c+xuC!L-edTq)|ubZ*|{LFnnCmj&FZb@Za!c9+q8RJl1RTsvFGpp97!9iJ{}xK$Fg$vDPH2T9F$oV z(SHV2>3eH-$5QvxTUY6q;j<(U#6z8xa~?7o{!2#6n;4&m>*%K^%E*5mtG%R5`h3Q; z=Q?^KPU$%Ls@h=Q=-zQURrlL1Y(B&vq!ODh z4pxai293SacYwk71O2}^@NaNei=``50o=$e&EWrRVXkz+a|bZHU$=47M7ZOjDkj4 zU)pT(r}Vv?pk0|Hr`?#$gF712-3jT`#LYOIx=l#4>iq^J+ELJ9(D(Kq=TYMe>Do_4 zE>a%1Io9;XLb|yjV;jvnx$+QIEh#+nV3d5Uv0rq-UzSquZa>j_PKE81WEXmLuGgKe zM)bL}qZZAfQ;k07-n`SQZ`8&nq5GRe`ZY=4H*ncYOFEyhSNHy;OerrXtG4){9u=vo zv`NZ!2hx1cO{}hM)#jL30#H9B-2UMb9ccQLOE9wbhpMyqhSl5i&RBm?d z#<`i4TQMni3l`C@)UmOn`nu0>q980d;o))`jr1`)7x+H8N`ZCTd&R#8P*vQwZQphC zsl;kecE(fsbxUtu=512GRTOx%wd%9bg@W~KX0CWrn1XKWJ>BjNqM+@zH5&_epup;x z!{m{#DHu}FM@x$^3L2-j8gk*U)wRLXE=nn6lV)Gn zuV1J1jlNI8#}XNZf3%Y)=EcdIhfb2)_^&OdnIH#Y-3}e4p^YZyUN(`z)HrMZn?c!+ zGd$&4N7Q94H8hT9E(PV7Vlz zPeM<+`Pkv|FYA8JY(RtE66?xor5f4Lo-=}^locM-=G^(WgRH&i1%Q}o%X&1WE9&hv zVtv1|v_1%|)PD5+vLh&^PPfVM$)wBg-7dY?+avU*1kx^d;2kh5)1i^0oUARZ2RN;z z&105k?rcB_gvN_@ZT}O!pFl=vr>`Sr*Aot#iHAWc13EVe;$uAr`F3GtH;vs=ph~Yc9tA2P|$u`lhaQqOG1WKduHe- z()DPo*9{u4O{Y;`-?8yOBfZ{H&|%x&v;^vY(I*Oqr(PURB?Fu8KmRL)0;~J~Ms}is zQ$J~TGwoD5XMMLtf6SX4k*j*g?JYrD#m1JLIp8<-RsFDruIpdMQ%TIbe_Je}iB3Pn zv2fBu>fy1Q6Bo3+wSr{L;%x~lHuzjBq|Dw>_;zJAtN+>DGL4=XlI zF4=G>pCv8oT(Z{gvz%I7YwMNTEyLy$<&7?6U4`rI=>n{%dB)vg{Ybf_&e_TDXw1-s zd>%U6BPYeJFaMjFlbXM87x17fx}wmAsAFSj!qA1d{c%};kVuklO=LQEwwjuC8W^&!(IAuUkn^EKJF4DN-pB$a9_XE0y zXcO>bj>R>TW|Wcn{;B6$d=9la_IlNmu=^AoTu@-=L+V!k$9@%x-bsTHwnE^ak~S?e zY`i>G5Bf5?$|Xvvmc0sn96o?jYNBb$0<-AoAsrsv8(qwps+w5qOui>kN&EJx&9~A( zwK;J1ipgsz6+$Y6t{ZifI(3M7=*gdUdnp*L&0JKUX4a5`ja#oAWI;jGm)&o#iKC!A zeU;sEoC1rw+Qho96bz4?QR94Fd8K`BNWE2Ms0(*o<~wHIISMA~P4;yvOG&op{M+x- zDgVM=7hK;(>Px|wRI^q`sMkeb?q7Vy=iOBDD*KVg6>GWi$)^>Ak|{XXX<qHyZbAQdp22Za%OzLFRf+VatnHnDVEy3Qa>>TdGuP~w10QFP>~C`8)Gq@w=+3xgT`sX`&uBR~ zR%$8wtsiIcrMQbM;&t9tGT&40QO_w}ce=FxiiR-5e^Mt+ok$2?n% zo!5)1^u3clWz#`*q5rH2wLB{ajo*yUM{~bUf39?cvK8o?ZrbMBx6WOYtIV{g-4f)W zz4h4jl*hE^Cky_qNxmwaD3genB%D=)G;e zxzEj@gJLW9h)pk#{YxFzJ;t)Br5R=R!XA6PLrthVh&EAEPgzx5W>A7EuS@xN_`8hD zNhyVk4Y>BF-M)Wlb>e^M+p7|_E>lWXs9VUzm@*)sz#{9|$uvMCH7Wnptu6;wof=Ag z+5bp#pv4JF4y0y4rP=NGj-wGP(B;U4-|Z-&v6cVv`deL<}361 zHvG4hoSNMtc3V=n3K>=WZodjWsw@1=lU6UPF$aZHY zZM8J5X7vCXi9;-s?Fvtz^NAWVaM{*g^l?I(#yx73X-aJ_Y;^bN>`qlN!I_gMKgLhpR=bRzu=%BP-Z)iBt_s?8=t>y>0q(aioD>QybU$+cj`-jFteTS5r zXE(lW1y6n(vo{RG#>azej%@7;sWd2PYbRtcu7sp@Kb{nHh4 zk`3&#_~WIQ@}6A_+1{hoN!aJTUyc-)v;W-BlKHz({zYH)Ejzms-P72lpQU%X(GK;) ziobK4MV~7+Y~KAzvy7-@@GVP=A=Kw%!?OB)i=?qoZ}nzZ*i%ZG*i(zHL`}U(L2{$L zd(KazsZJz-)@3wr{1;Kx))J2bk_S`Ckc!ze-%(z~ z9(?gQv;97L+9lx0Tu;0H7d*+PVr~=gf53H2sO8or^Dq?!1(=6dm&;NEoG9QwSaegm zrqGKBM5rBn;*}E!ffAG=C_rJ+g-~1CN>G5gEIOE!LRlkZe9Mk64+ILrIu7Z zr3Q6ot5xN@dQn7;>x@@Rs_s`y%C}ZZtj#^tpsE`58PiHB5f)u;obqHBrH#YMD@xG$ z^mQ+a=-*loZjq7`bUrzj{#Py;GtVn0&}^Ha2Js`6z&^kF*&RR6S4)h1IT)=3s#o{d z9AxmFklfGkeDoh((Lg6Pz-9YYP%Q~+tOTY%*Qmj__G<7wQVo7{<3PR-zv?JecKO

      Wmr^B^%G3i&oBqQ- zC{>+LBq}AH&sJ7TPBm78)Z6$+^bGzlo9Y+W-a8l?t@cPl~X6Y4)$ z??3W=3Kfx;c&4*b1uj(+-`gvv%D%u&rOMveK?xlH9i#^D>UvQ`7qXOk(;PnzQ%edC zl7qjxror-v22>RwFbUW~?Pwn+fj^bN^y?k9M9*FGyLxt}Kg`ss0KSl+Pt=lS50t?4 z+bFeUZO-e})X6F@QcCQ~E%Ty?1j>><)RHnAloIQz{nZlF0ZK`yk6cwjy;prEcuCZV zuJKVdB^{FcDvceI+bcomb3sbbf2=+wpWL`}>Mf;26DJo?2_#?5wijyE7ImbuFSb@` zFAbbqzy-Uiu2Mp$ESlrTp>|ygm2$&%BU~mEPVFd&x zro^b8hE*=X`oINYB~vhaaJm5fKnh_u6Y!ppyO{nFb_h$Fg5rba1q=w92-}*#`82G1 zX}|-qFCszPWUfyG?~B184T~cZ`l0WY zv@zz2B=$};tbc+J)UZD$03#MqH1I$f6og2o0cn(O3ahLM zL=6-*Fidb{Ur0 z3hl~ySxp1}mHkK!19U>Ls(!vx&g)yz+u3 z&{(LGVC&#su>vhIBMXO8Kz5L@%*$6)-cAB63z@QjO54H4(zu?AdXjUyLCwOdWb6)F zmh`;{?O1+6n(ET@6%D zuzsow_SZD>O|X8_18=>Onyh3?%+sA*n4)fHGp>^cMAIN!^WJCqJg9hikB@< zV1Sy{+uzCGt*j#fPwQW<~%?U{h6VA!IJ6KcvVQ2*%ztZ!h!tc^z*hz!e8f%(AHlG~&zI4$52#X&h8BtW#i#_|pNf$AK3^KNWo9ax<@- zbB0C^&X_$^i9top;*aZ6avH-Y2XM?nss_ZO>+G^MsX!N9>vfIH*)t4t zMvx^as?0D-H^vmqS%yG0*D#B$VOLeajQ!>X(T*v***Om%%s84`cKG%Rw z7WOLRjYW@+R;EW-=)jbru`=f<{!U1GR3=$D269Id&32%b*^CO#vbbg%FGx93((0kC_#PO9>}#y)*;D6b2#?HK8!ENi0xPRuRFf!DV{8d(yI{<>eRT zm^p=$2xv`6P3#s6_!M#?(`&GsAQ^)~6_z4nY|xxw8v{p$t|)jm_MLPC19c-i-y)0+ zTq?XpfNc<;>PN48v5Q_~1E4AdMkd@~KtV(XsS1-3s2fx$UG?W{J8teyduC)6N+Xyz zj6J!7+-@V<1QvQ;CESW$auN_LOOBx6(Ae$d^h$O6)7yriSy2TXF1IcPtTngxh|P7E z4}1ysr8l&|x59tS4an*2i~TOpRem2U7wC!|!la75)9as7Q7x!(F5)#eGKbB9zCw)z zJ15$7E@9QinOo}ldVNC+7gLVb`8F(!8 zN$_>>uXO#Z4hlU;Z&rbmg+~d{4kDIIpod$X+7}IgSqPON?qFnjrWUX6I+M;B zC8#`DTP|-WRJff$FJ^(R74{|LdC<3DQ3JDuh6zp&9+wOK?Vx=V>CI2@w(v0l>_O;S z(Xwe>?=B(Kkr=#%mouvb!-RzoBM`9EgswP-I zxL!iJW+oTTCZIl5`uc13FnE4Nj9G85=;0U0%!7N)aITOyneGE*4+$HTE-X%g`PFP3 zIqN&Ur3)Kd;JU1Hg7*{HJ!{$Ec7@l;;2-#V;b$|z3&9fvAV7SG+8}vhdIAO1wc4=u zpATMiuh`fYNG~g&U;kz5)Y)VrN)NN*a;eHA@5R5)M+<<%`fr1iz_oip_*7I8$ z$lRcPVS@q}^s^myu$?Kr;s~o7qZ$HmgSrOKiACJ#wbeegjfI2wlvKiC;=Lb60n6RK>$V=4rv%ehG>HzH!KX=aBk$%@de^u8A##S z3RWqw!m7zzM@2PszRC>wTkKJ^@%aB_^Kk6EFx$V?# z6TKu4Nu4QXVYxB}ss3pM>Klr!PJx0m)2C2$*Z(sQ313bCRS<5N$Pi#8i*q2Wgn0ur zg9%9(bON!0f&)gQKBJOQ>A+gS#bH=uZa&!1%3cLy*_h&v!VS`{F{r!_$hEZ~9A5(W<=9$rjZ z3KNe}EvP(#^rT z5V#V7ce4HmK1}#S448oPga-&v3?dOolUN2mAp{Cy3`P+nCm=sz1_B*}S_Jk42q+Xm zFl2Cy1cbsYsBi@tCWCAQ8fD4V=O@0k-ESZbf+~Y`1S|z~DD1%m#>=1|flmP@Dl|go z%-|sbREcL^6h0wWu-Kg;W{O)Sa-hQpT%99n-V0D}moZ&w?clj8T6~9)zs~kK&5U1+W(4CCEIOTU$n{Mkokd7??oxpl}J38mKL+m|*qb za;=)6o(7m(IGKRUb& zYv8y-*96a3QRVrBb`36 zWo^*B!uDhw(A}A4$Cq&i);6$SXrF=$WISK!+t5DQ76c#&PCvKD4G|QiAk7sju+Kub zAbxWng6A%n-D0X?H5BubZ(Y?8r#9-h8fJj86(B6w^R$VMW38B<1(g{4Ld6^* zkAp6TeF{9$|6m#N6weY#$rd>CBaAT_z9b{|H!#hCB12UL%NQTjLqThyoC_wIomD`M2`yxAwIU(u z?4AQo7V;|7jp7d(u!IguSy-%$IVSXXQqXo`qcg4yofW)ee6p%%s#-b`kP5d9ua*9u z!v76?gcv8tDK=|>UKkRPiXjJmMkqF5Fc^iz4h>i;xHc+`VZcPfnFCY>=|-h807n|g zbD*nW->7&7C`nj!0<&TbM{pWpqk_^cm(Y`4*69Rq#Xb&z4Z%&qt7Gs=h&doPLKvWA z!43qLFm!-$fTU#84k(sTb(rM_2Am&B{ zzR7x@;J?@p5&#J}PWJo+2qr`!l_U{BCkuZd!e9&mDFN9D^G~2*P=~;l5Gxee^pk9g zs0yH9#MmJMWCGR`E+AkrArpZ%VdzsxfuP3NDgt&AeLk}0*5{sb5( zG(o|U!7~Dg0v;5;AOmGWIAS72&tNFTK|z+Wc?2{Cgs3nG88H(I5;H2Xhr{N06ig&E z1u7x4W^j>ISmm#8AO_E`Y+WaR`2J}MT_nMbW~ol00dxi_Np*t-Jj!w*=rhm#^wW4TBlW}pC=8`RfR^y04&_7&BncB2Gc77xfxbN2u_&?#32}Fh3p6#4u|k( zX&F?js4~Ff)ES2DW#Fwse`G!m{uDH3aIV6G1SrQM6?|p@uMi?Z%)zJ%8NTqJ7E6=p zw|S7SFe8D^v04Sg83?RUB$=UuV+GY2EUa)P8K#456@AjSpxfRzG!iou3uzKm9jq(; zng#AX?H|#o#&j&~NrBf1{c8L_4;s+$1xywiB{(~HSVD(pUe>^;RDnAoWPRQfQKuRK zTL5Q;Sjpra%q)G|{n-}-=#>``v@k3Mg2#%M{#cb^oAq5~{fbdqsFq;y*wqrsG?-d; zE&=61+7j+G!?lnvLFWm3YlvsRnZa#ja!aLbg@wtOo~m*EHG3F5zarqy|KW*Qg=NhWeRXl<>w9jY5=&Zpb3J{&FGTP=>r>R5@XRCB(8y}siJ%! z>S0_1$%V2Brca>ttX_l7g}Vv3PZ##O|D$pL(1ZX78<1|ODsh7H3-s+{+%IAFUoy5z z>%!&~xSyczRVy3VuFyJ}`vZOtJsbQk{7wM?p7^zFQGa^r(w#kRe2c(n4H6JQer{nK z#wUW@2OF{ zO&JwV1#%F^Fi?Rgia#m9 zCOJ@JsHk8S^`R4s)z!*-W~E#($Lyp6TGUT^l%8;kU`*kbcLRK^%Bg~0wC-RvV=v81 zLNW)13`>E?YwA@;vu<3-Bs3LV57AbRUe}b)JNKyE8 zOj8OG5RhGFnsVb*8c?fIX}iDHC>5Jh6H+8Ae4YP?Z_0=y5X897%F#8faSEv~UcGd}mo&bnG=|t|&rqi$e8m5u zAsVc2H6}r=m3@Fdg>NdvNkEB$MSQ7j1-jG3-%BlFDAAM+PZRivOm~{Fk0_i}HYn}d zj#2hw4KNj$Cu5YtPz2^_UPs*ugevI7;rHb;qx%WoGdEZXOfeVuDV$0;pJ!E+7KCbN s$e(Z)6HY1TqXDnTmZe~78C Date: Thu, 9 Oct 2025 10:01:22 +0200 Subject: [PATCH 053/105] need to run the grid paper 1 and toi561b --- folder_list.txt | 958 ++++++++++++++++++ input/demos/escape_comparison.toml | 16 +- ...85Msun.toml => escape_grid_0_194Msun.toml} | 24 +- input/demos/escape_grid_1Msun.toml | 8 +- input/ensembles/escape_comparison_on_off.toml | 2 +- ...85Msun.toml => escape_grid_0_194Msun.toml} | 6 +- input/ensembles/escape_grid_1Msun.toml | 2 +- input/ensembles/grid_toi561b.toml | 2 + input/planets/toi561b.toml | 12 +- src/proteus/grid/post_processing_grid.py | 2 +- src/proteus/grid/run_grid_analysis.py | 36 +- 11 files changed, 1016 insertions(+), 52 deletions(-) create mode 100644 folder_list.txt rename input/demos/{escape_grid_0_485Msun.toml => escape_grid_0_194Msun.toml} (95%) rename input/ensembles/{escape_grid_0_485Msun.toml => escape_grid_0_194Msun.toml} (91%) diff --git a/folder_list.txt b/folder_list.txt new file mode 100644 index 000000000..9f99473a5 --- /dev/null +++ b/folder_list.txt @@ -0,0 +1,958 @@ +case_000000 +case_000001 +case_000002 +case_000003 +case_000004 +case_000005 +case_000006 +case_000007 +case_000008 +case_000009 +case_000010 +case_000011 +case_000012 +case_000013 +case_000014 +case_000015 +case_000016 +case_000017 +case_000018 +case_000019 +case_000020 +case_000021 +case_000022 +case_000023 +case_000024 +case_000025 +case_000026 +case_000027 +case_000028 +case_000029 +case_000030 +case_000031 +case_000032 +case_000033 +case_000034 +case_000035 +case_000036 +case_000037 +case_000038 +case_000039 +case_000040 +case_000041 +case_000042 +case_000043 +case_000044 +case_000045 +case_000046 +case_000047 +case_000048 +case_000049 +case_000050 +case_000051 +case_000052 +case_000053 +case_000054 +case_000055 +case_000056 +case_000057 +case_000058 +case_000059 +case_000060 +case_000061 +case_000062 +case_000063 +case_000064 +case_000065 +case_000066 +case_000067 +case_000068 +case_000069 +case_000070 +case_000071 +case_000072 +case_000073 +case_000074 +case_000075 +case_000076 +case_000077 +case_000078 +case_000079 +case_000080 +case_000081 +case_000082 +case_000083 +case_000084 +case_000085 +case_000086 +case_000087 +case_000088 +case_000089 +case_000090 +case_000091 +case_000092 +case_000093 +case_000094 +case_000095 +case_000096 +case_000097 +case_000098 +case_000099 +case_000100 +case_000101 +case_000102 +case_000103 +case_000104 +case_000105 +case_000106 +case_000107 +case_000108 +case_000109 +case_000110 +case_000111 +case_000112 +case_000113 +case_000114 +case_000115 +case_000116 +case_000117 +case_000118 +case_000119 +case_000120 +case_000121 +case_000122 +case_000123 +case_000124 +case_000125 +case_000126 +case_000127 +case_000128 +case_000129 +case_000130 +case_000131 +case_000132 +case_000133 +case_000134 +case_000135 +case_000136 +case_000137 +case_000138 +case_000139 +case_000140 +case_000141 +case_000142 +case_000143 +case_000144 +case_000145 +case_000146 +case_000147 +case_000148 +case_000149 +case_000150 +case_000151 +case_000152 +case_000153 +case_000154 +case_000155 +case_000156 +case_000157 +case_000158 +case_000159 +case_000160 +case_000161 +case_000162 +case_000163 +case_000164 +case_000165 +case_000166 +case_000167 +case_000168 +case_000169 +case_000170 +case_000171 +case_000172 +case_000173 +case_000174 +case_000175 +case_000176 +case_000177 +case_000178 +case_000179 +case_000180 +case_000181 +case_000182 +case_000183 +case_000184 +case_000185 +case_000186 +case_000187 +case_000188 +case_000189 +case_000190 +case_000191 +case_000192 +case_000193 +case_000194 +case_000195 +case_000196 +case_000197 +case_000198 +case_000199 +case_000200 +case_000201 +case_000202 +case_000203 +case_000204 +case_000205 +case_000206 +case_000207 +case_000208 +case_000209 +case_000210 +case_000211 +case_000212 +case_000213 +case_000214 +case_000215 +case_000216 +case_000217 +case_000218 +case_000219 +case_000220 +case_000221 +case_000222 +case_000223 +case_000224 +case_000225 +case_000226 +case_000227 +case_000228 +case_000229 +case_000230 +case_000231 +case_000232 +case_000233 +case_000234 +case_000235 +case_000236 +case_000237 +case_000238 +case_000239 +case_000240 +case_000241 +case_000242 +case_000243 +case_000244 +case_000245 +case_000246 +case_000247 +case_000248 +case_000249 +case_000250 +case_000251 +case_000252 +case_000253 +case_000254 +case_000255 +case_000256 +case_000257 +case_000258 +case_000259 +case_000260 +case_000261 +case_000262 +case_000263 +case_000264 +case_000265 +case_000266 +case_000267 +case_000268 +case_000269 +case_000270 +case_000271 +case_000272 +case_000273 +case_000274 +case_000275 +case_000276 +case_000277 +case_000278 +case_000279 +case_000280 +case_000281 +case_000282 +case_000283 +case_000284 +case_000285 +case_000286 +case_000287 +case_000288 +case_000289 +case_000290 +case_000291 +case_000292 +case_000293 +case_000294 +case_000295 +case_000296 +case_000297 +case_000298 +case_000299 +case_000300 +case_000301 +case_000302 +case_000303 +case_000304 +case_000305 +case_000306 +case_000307 +case_000308 +case_000309 +case_000310 +case_000311 +case_000312 +case_000313 +case_000314 +case_000315 +case_000316 +case_000317 +case_000318 +case_000319 +case_000320 +case_000321 +case_000322 +case_000323 +case_000324 +case_000325 +case_000326 +case_000327 +case_000328 +case_000329 +case_000330 +case_000331 +case_000332 +case_000333 +case_000334 +case_000335 +case_000336 +case_000337 +case_000338 +case_000339 +case_000340 +case_000341 +case_000342 +case_000343 +case_000344 +case_000345 +case_000346 +case_000347 +case_000348 +case_000349 +case_000350 +case_000351 +case_000352 +case_000353 +case_000354 +case_000355 +case_000356 +case_000357 +case_000358 +case_000359 +case_000360 +case_000361 +case_000362 +case_000363 +case_000364 +case_000365 +case_000366 +case_000367 +case_000368 +case_000369 +case_000370 +case_000371 +case_000372 +case_000373 +case_000374 +case_000375 +case_000376 +case_000377 +case_000378 +case_000379 +case_000380 +case_000381 +case_000382 +case_000383 +case_000384 +case_000385 +case_000386 +case_000387 +case_000388 +case_000389 +case_000390 +case_000391 +case_000392 +case_000393 +case_000394 +case_000395 +case_000396 +case_000397 +case_000398 +case_000399 +case_000400 +case_000401 +case_000402 +case_000403 +case_000404 +case_000405 +case_000406 +case_000407 +case_000408 +case_000409 +case_000410 +case_000411 +case_000412 +case_000413 +case_000414 +case_000415 +case_000416 +case_000417 +case_000418 +case_000419 +case_000420 +case_000421 +case_000422 +case_000423 +case_000424 +case_000425 +case_000426 +case_000427 +case_000428 +case_000429 +case_000430 +case_000431 +case_000432 +case_000433 +case_000434 +case_000435 +case_000436 +case_000437 +case_000438 +case_000439 +case_000440 +case_000441 +case_000442 +case_000443 +case_000444 +case_000445 +case_000446 +case_000447 +case_000448 +case_000449 +case_000450 +case_000451 +case_000452 +case_000453 +case_000454 +case_000455 +case_000456 +case_000457 +case_000458 +case_000459 +case_000460 +case_000461 +case_000462 +case_000463 +case_000464 +case_000465 +case_000466 +case_000467 +case_000468 +case_000469 +case_000470 +case_000471 +case_000472 +case_000473 +case_000474 +case_000475 +case_000476 +case_000477 +case_000478 +case_000479 +case_000480 +case_000481 +case_000482 +case_000483 +case_000484 +case_000485 +case_000486 +case_000487 +case_000488 +case_000489 +case_000490 +case_000491 +case_000492 +case_000493 +case_000494 +case_000495 +case_000496 +case_000497 +case_000498 +case_000499 +case_000500 +case_000501 +case_000502 +case_000503 +case_000504 +case_000505 +case_000506 +case_000507 +case_000508 +case_000509 +case_000510 +case_000511 +case_000512 +case_000513 +case_000514 +case_000515 +case_000516 +case_000517 +case_000518 +case_000519 +case_000520 +case_000521 +case_000522 +case_000523 +case_000524 +case_000525 +case_000526 +case_000527 +case_000528 +case_000529 +case_000530 +case_000531 +case_000532 +case_000533 +case_000534 +case_000535 +case_000536 +case_000537 +case_000538 +case_000539 +case_000540 +case_000541 +case_000542 +case_000543 +case_000544 +case_000545 +case_000546 +case_000547 +case_000548 +case_000549 +case_000550 +case_000551 +case_000552 +case_000553 +case_000554 +case_000555 +case_000556 +case_000557 +case_000558 +case_000559 +case_000560 +case_000561 +case_000562 +case_000563 +case_000564 +case_000565 +case_000566 +case_000567 +case_000569 +case_000573 +case_000574 +case_000575 +case_000576 +case_000577 +case_000579 +case_000580 +case_000582 +case_000583 +case_000584 +case_000586 +case_000587 +case_000588 +case_000589 +case_000591 +case_000592 +case_000593 +case_000594 +case_000595 +case_000596 +case_000597 +case_000598 +case_000599 +case_000600 +case_000601 +case_000602 +case_000603 +case_000604 +case_000605 +case_000606 +case_000607 +case_000608 +case_000609 +case_000610 +case_000611 +case_000612 +case_000613 +case_000614 +case_000615 +case_000616 +case_000617 +case_000618 +case_000619 +case_000620 +case_000621 +case_000622 +case_000623 +case_000624 +case_000625 +case_000626 +case_000627 +case_000628 +case_000629 +case_000630 +case_000631 +case_000632 +case_000633 +case_000634 +case_000635 +case_000636 +case_000637 +case_000638 +case_000639 +case_000640 +case_000641 +case_000642 +case_000643 +case_000644 +case_000645 +case_000646 +case_000647 +case_000648 +case_000649 +case_000650 +case_000651 +case_000652 +case_000653 +case_000654 +case_000655 +case_000656 +case_000657 +case_000658 +case_000659 +case_000660 +case_000661 +case_000662 +case_000663 +case_000664 +case_000665 +case_000666 +case_000667 +case_000668 +case_000669 +case_000670 +case_000671 +case_000672 +case_000673 +case_000674 +case_000675 +case_000676 +case_000677 +case_000678 +case_000679 +case_000680 +case_000681 +case_000683 +case_000684 +case_000685 +case_000687 +case_000688 +case_000689 +case_000690 +case_000692 +case_000693 +case_000694 +case_000695 +case_000696 +case_000697 +case_000698 +case_000699 +case_000701 +case_000702 +case_000703 +case_000704 +case_000706 +case_000707 +case_000708 +case_000709 +case_000710 +case_000711 +case_000713 +case_000714 +case_000715 +case_000716 +case_000717 +case_000718 +case_000719 +case_000720 +case_000721 +case_000722 +case_000723 +case_000724 +case_000725 +case_000727 +case_000728 +case_000732 +case_000733 +case_000734 +case_000735 +case_000736 +case_000737 +case_000738 +case_000739 +case_000740 +case_000741 +case_000742 +case_000743 +case_000744 +case_000745 +case_000746 +case_000747 +case_000748 +case_000749 +case_000750 +case_000751 +case_000752 +case_000753 +case_000754 +case_000755 +case_000756 +case_000757 +case_000758 +case_000759 +case_000760 +case_000761 +case_000762 +case_000763 +case_000764 +case_000765 +case_000766 +case_000767 +case_000768 +case_000769 +case_000770 +case_000771 +case_000772 +case_000773 +case_000774 +case_000775 +case_000776 +case_000777 +case_000778 +case_000779 +case_000780 +case_000781 +case_000782 +case_000783 +case_000784 +case_000785 +case_000786 +case_000787 +case_000788 +case_000789 +case_000790 +case_000791 +case_000792 +case_000793 +case_000794 +case_000795 +case_000796 +case_000797 +case_000798 +case_000799 +case_000800 +case_000801 +case_000802 +case_000803 +case_000804 +case_000805 +case_000806 +case_000807 +case_000808 +case_000809 +case_000810 +case_000811 +case_000812 +case_000813 +case_000814 +case_000815 +case_000816 +case_000817 +case_000818 +case_000819 +case_000820 +case_000821 +case_000822 +case_000823 +case_000824 +case_000825 +case_000826 +case_000827 +case_000828 +case_000829 +case_000830 +case_000831 +case_000832 +case_000833 +case_000834 +case_000835 +case_000836 +case_000837 +case_000838 +case_000839 +case_000840 +case_000841 +case_000842 +case_000843 +case_000844 +case_000845 +case_000846 +case_000847 +case_000848 +case_000849 +case_000850 +case_000851 +case_000852 +case_000853 +case_000854 +case_000855 +case_000856 +case_000857 +case_000858 +case_000859 +case_000860 +case_000861 +case_000862 +case_000863 +case_000864 +case_000865 +case_000866 +case_000867 +case_000868 +case_000869 +case_000870 +case_000871 +case_000872 +case_000873 +case_000874 +case_000875 +case_000876 +case_000877 +case_000878 +case_000879 +case_000880 +case_000881 +case_000882 +case_000883 +case_000884 +case_000885 +case_000886 +case_000887 +case_000888 +case_000889 +case_000890 +case_000891 +case_000892 +case_000893 +case_000894 +case_000895 +case_000896 +case_000897 +case_000898 +case_000899 +case_000900 +case_000901 +case_000902 +case_000903 +case_000904 +case_000905 +case_000906 +case_000907 +case_000908 +case_000909 +case_000910 +case_000911 +case_000912 +case_000913 +case_000914 +case_000915 +case_000916 +case_000917 +case_000918 +case_000919 +case_000920 +case_000921 +case_000922 +case_000923 +case_000924 +case_000925 +case_000926 +case_000927 +case_000928 +case_000929 +case_000930 +case_000931 +case_000932 +case_000933 +case_000934 +case_000935 +case_000936 +case_000937 +case_000938 +case_000939 +case_000940 +case_000941 +case_000942 +case_000943 +case_000944 +case_000945 +case_000946 +case_000947 +case_000948 +case_000949 +case_000950 +case_000951 +case_000952 +case_000953 +case_000954 +case_000955 +case_000956 +case_000957 +case_000958 +case_000959 +case_000961 +case_000963 +case_000964 +case_000965 +case_000966 +case_000967 +case_000968 +case_000969 +case_000971 +cfgs +copy.grid.toml +logs +manager.log +post_processing_grid +ref_config.toml +slurm_dispatch.sh diff --git a/input/demos/escape_comparison.toml b/input/demos/escape_comparison.toml index 5f75f5706..b6dc5e798 100644 --- a/input/demos/escape_comparison.toml +++ b/input/demos/escape_comparison.toml @@ -26,7 +26,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [params] # output files [params.out] - path = "scratch/escape_comparison_on_off" + path = "scratch/escape_comparison_on_off_0_1_AU" logging = "DEBUG" plot_mod = 5 # Plotting frequency, 0: wait until completion | n: every n iterations plot_fmt = "pdf" # Plotting image file format, "png" or "pdf" recommended @@ -40,7 +40,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" minimum_rel = 1e-5 # relative minimum time-step [dimensionless] maximum = 3e7 # yr, maximum time-step initial = 1e4 # yr, inital step size - starspec = 1e9 # yr, interval to re-calculate the stellar spectrum + starspec = 1e8 # yr, interval to re-calculate the stellar spectrum starinst = 100 # yr, interval to re-calculate the instellation method = "adaptive" # proportional | adaptive | maximum @@ -68,7 +68,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [params.stop.time] enabled = true minimum = 1.0e3 # yr, model will certainly run to t > minimum - maximum = 4.567e+9 # yr, model will terminate when t > maximum + maximum = 9e9 # yr, model will terminate when t > maximum # solidification [params.stop.solid] @@ -112,7 +112,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [orbit] instellation_method = 'sma' # whether to define orbit using semi major axis ('sma') or instellation flux ('inst') instellationflux = 1.0 # instellation flux received from the planet in [Earth units] - semimajoraxis = 1.0 # initial semi-major axis of planet's orbit [AU] + semimajoraxis = 0.1 # initial semi-major axis of planet's orbit [AU] eccentricity = 0.0 # initial eccentricity of planet's orbit [dimensionless] zenith_angle = 48.19 # characteristic zenith angle [degrees] s0_factor = 0.375 # instellation scale factor [dimensionless] @@ -257,7 +257,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" # Outgassing - physics table [outgas] - fO2_shift_IW = 0 # log10(ΔIW), atmosphere/interior boundary oxidation state + fO2_shift_IW = 6 # log10(ΔIW), atmosphere/interior boundary oxidation state module = "calliope" # Which outgassing module to use @@ -272,7 +272,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" include_H2 = true # Include H2 compound include_CH4 = true # Include CH4 compound include_CO = true # Include CO compound - T_floor = 2300.0 # Temperature floor applied to outgassing calculation [K]. + T_floor = 700.0 # Temperature floor applied to outgassing calculation [K]. [outgas.atmodeller] some_parameter = "some_value" @@ -297,7 +297,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" use_metallicity = false # whether or not to specify the elemental abundances in terms of solar metallicity metallicity = 1000 # metallicity relative to solar metallicity - H_oceans = 1.0 # Hydrogen inventory in units of equivalent Earth oceans + H_oceans = 10.0 # Hydrogen inventory in units of equivalent Earth oceans # H_ppmw = 0.0 # Hydrogen inventory in ppmw relative to mantle mass CH_ratio = 1.0 # C/H mass ratio in mantle/atmosphere system @@ -326,7 +326,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [atmos_chem] module = "vulcan" # Atmospheric chemistry module - when = "offline" # When to run chemistry (manually, offline, online) + when = "manually" # When to run chemistry (manually, offline, online) # Physics flags photo_on = true # Enable photochemistry diff --git a/input/demos/escape_grid_0_485Msun.toml b/input/demos/escape_grid_0_194Msun.toml similarity index 95% rename from input/demos/escape_grid_0_485Msun.toml rename to input/demos/escape_grid_0_194Msun.toml index 220a86507..f4bbb24f3 100644 --- a/input/demos/escape_grid_0_485Msun.toml +++ b/input/demos/escape_grid_0_194Msun.toml @@ -26,7 +26,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [params] # output files [params.out] - path = "scratch/escape_grid_0_485Msun" + path = "scratch/escape_0_194Msun" logging = "INFO" plot_mod = 5 # Plotting frequency, 0: wait until completion | n: every n iterations plot_fmt = "pdf" # Plotting image file format, "png" or "pdf" recommended @@ -40,7 +40,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" minimum_rel = 1e-5 # relative minimum time-step [dimensionless] maximum = 3e7 # yr, maximum time-step initial = 1e4 # yr, inital step size - starspec = 1e9 # yr, interval to re-calculate the stellar spectrum + starspec = 1e8 # yr, interval to re-calculate the stellar spectrum starinst = 100 # yr, interval to re-calculate the instellation method = "adaptive" # proportional | adaptive | maximum @@ -68,7 +68,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [params.stop.time] enabled = true minimum = 1.0e3 # yr, model will certainly run to t > minimum - maximum = 8.8e+9 # yr, model will terminate when t > maximum + maximum = 5e+9 # yr, model will terminate when t > maximum # solidification [params.stop.solid] @@ -92,21 +92,21 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [star] # Physical parameters - mass = 0.485 # M_sun + mass = 0.194 # M_sun age_ini = 0.100 # Gyr, model initialisation/start age module = "mors" [star.mors] - rot_pcntle = 10.0 # rotation percentile - # rot_period = 40 # rotation period [days] + rot_pcntle = 50.0 # rotation percentile + # rot_period = 122 # rotation period [days] tracks = "spada" # evolution tracks: spada | baraffe - age_now = 8.8 # Gyr, current age of star used for scaling - spec = "stellar_spectra/Named/gj176.txt" # stellar spectrum + age_now = 5 # Gyr, current age of star used for scaling + spec = "stellar_spectra/Named/gj1132.txt" # stellar spectrum [star.dummy] - radius = 0.474 # R_sun + radius = 0.2211 # R_sun calculate_radius = false # Calculate star radius using scaling from Teff? - Teff = 3632.0 # K + Teff = 3229.0 # K # Orbital system [orbit] @@ -272,7 +272,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" include_H2 = true # Include H2 compound include_CH4 = true # Include CH4 compound include_CO = true # Include CO compound - T_floor = 2300.0 # Temperature floor applied to outgassing calculation [K]. + T_floor = 700.0 # Temperature floor applied to outgassing calculation [K]. [outgas.atmodeller] some_parameter = "some_value" @@ -326,7 +326,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [atmos_chem] module = "vulcan" # Atmospheric chemistry module - when = "offline" # When to run chemistry (manually, offline, online) + when = "manually" # When to run chemistry (manually, offline, online) # Physics flags photo_on = true # Enable photochemistry diff --git a/input/demos/escape_grid_1Msun.toml b/input/demos/escape_grid_1Msun.toml index 47c007c2e..aeb4e25f5 100644 --- a/input/demos/escape_grid_1Msun.toml +++ b/input/demos/escape_grid_1Msun.toml @@ -40,7 +40,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" minimum_rel = 1e-5 # relative minimum time-step [dimensionless] maximum = 3e7 # yr, maximum time-step initial = 1e4 # yr, inital step size - starspec = 1e9 # yr, interval to re-calculate the stellar spectrum + starspec = 100e6 # yr, interval to re-calculate the stellar spectrum starinst = 100 # yr, interval to re-calculate the instellation method = "adaptive" # proportional | adaptive | maximum @@ -157,7 +157,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" # Atmosphere - physics table [atmos_clim] - prevent_warming = false # do not allow the planet to heat up + prevent_warming = false # do not allow the planet to heat up surface_d = 0.01 # m, conductive skin thickness surface_k = 2.0 # W m-1 K-1, conductive skin thermal conductivity cloud_enabled = false # enable water cloud radiative effects @@ -272,7 +272,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" include_H2 = true # Include H2 compound include_CH4 = true # Include CH4 compound include_CO = true # Include CO compound - T_floor = 2300.0 # Temperature floor applied to outgassing calculation [K]. + T_floor = 700.0 # Temperature floor applied to outgassing calculation [K]. [outgas.atmodeller] some_parameter = "some_value" @@ -326,7 +326,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [atmos_chem] module = "vulcan" # Atmospheric chemistry module - when = "offline" # When to run chemistry (manually, offline, online) + when = "manually" # When to run chemistry (manually, offline, online) # Physics flags photo_on = true # Enable photochemistry diff --git a/input/ensembles/escape_comparison_on_off.toml b/input/ensembles/escape_comparison_on_off.toml index 802430466..b31f8ff63 100644 --- a/input/ensembles/escape_comparison_on_off.toml +++ b/input/ensembles/escape_comparison_on_off.toml @@ -1,7 +1,7 @@ # Config file for running a grid of forward models # Path to output folder where grid will be saved (relative to PROTEUS output folder) -output = "scratch/escape_comparison_on_off/" +output = "scratch/escape_comparison_on_off_0_1_AU_fo2_6_H_oceans_10/" # Make `output` a symbolic link to this absolute location. To disable: set to empty string. symlink = "" diff --git a/input/ensembles/escape_grid_0_485Msun.toml b/input/ensembles/escape_grid_0_194Msun.toml similarity index 91% rename from input/ensembles/escape_grid_0_485Msun.toml rename to input/ensembles/escape_grid_0_194Msun.toml index ce4c281d7..c99f52433 100644 --- a/input/ensembles/escape_grid_0_485Msun.toml +++ b/input/ensembles/escape_grid_0_194Msun.toml @@ -1,13 +1,13 @@ # Config file for running a grid of forward models # Path to output folder where grid will be saved (relative to PROTEUS output folder) -output = "scratch/escape_grid_0_485Msun/" +output = "scratch/escape_grid_0_194Msun/" # Make `output` a symbolic link to this absolute location. To disable: set to empty string. symlink = "" # Path to base (reference) config file relative to PROTEUS root folder -ref_config = "input/demos/escape_grid_0_485Msun.toml" +ref_config = "input/demos/escape_grid_0_194Msun.toml" # Use SLURM? use_slurm = true @@ -31,7 +31,7 @@ max_mem = 3 # maximum memory per CPU in GB (e.g. 3) # Semi-major axis set by direct ["orbit.semimajoraxis"] method = "direct" - values = [0.0188, 0.0939, 0.1877] + values = [0.006618, 0.033091, 0.066182] # Escape efficiency set by direct ["escape.zephyrus.efficiency"] diff --git a/input/ensembles/escape_grid_1Msun.toml b/input/ensembles/escape_grid_1Msun.toml index d076272b7..445d385d2 100644 --- a/input/ensembles/escape_grid_1Msun.toml +++ b/input/ensembles/escape_grid_1Msun.toml @@ -13,7 +13,7 @@ ref_config = "input/demos/escape_grid_1Msun.toml" use_slurm = true # Execution limits -max_jobs = 500 # maximum number of concurrent tasks (e.g. 500 on Habrok) +max_jobs = 1000 # maximum number of concurrent tasks (e.g. 500 on Habrok) max_days = 5 # maximum number of days to run (e.g. 1) max_mem = 3 # maximum memory per CPU in GB (e.g. 3) diff --git a/input/ensembles/grid_toi561b.toml b/input/ensembles/grid_toi561b.toml index cd139314f..925d64df3 100644 --- a/input/ensembles/grid_toi561b.toml +++ b/input/ensembles/grid_toi561b.toml @@ -44,3 +44,5 @@ max_mem = 3 # maximum memory per CPU in GB (e.g. 3) start = 1.0 stop = 1000.0 count = 4 + +# Add CMF diff --git a/input/planets/toi561b.toml b/input/planets/toi561b.toml index bc00f3f43..94d5f2f0e 100644 --- a/input/planets/toi561b.toml +++ b/input/planets/toi561b.toml @@ -22,7 +22,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" minimum_rel = 1e-5 # relative minimum time-step [dimensionless] maximum = 1.1e10 # yr, maximum time-step initial = 1e3 # yr, inital step size - starspec = 1e9 # yr, interval to re-calculate the stellar spectrum + starspec = 1e8 # yr, interval to re-calculate the stellar spectrum starinst = 1e2 # yr, interval to re-calculate the instellation method = "adaptive" # proportional | adaptive | maximum @@ -42,9 +42,9 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" # required number of iterations [params.stop.iters] - enabled = true + enabled = false minimum = 5 - maximum = 9000 + maximum = 5e6 # required time constraints [params.stop.time] @@ -79,7 +79,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" module = "mors" [star.mors] - rot_pcntle = 20.0 # rotation percentile -> slow rotator because the star is old ? + rot_pcntle = 50.0 # rotation percentile rot_period = 'none' # rotation period [days] tracks = "spada" # evolution tracks: spada | baraffe age_now = 11 # [Gyr] from Lacedelli et al., 2022 @@ -252,7 +252,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" include_H2 = true # Include H2 compound include_CH4 = true # Include CH4 compound include_CO = true # Include CO compound - T_floor = 2310.0 # Temperature floor applied to outgassing calculation [K]. + T_floor = 700 .0 # Temperature floor applied to outgassing calculation [K]. [outgas.atmodeller] some_parameter = "some_value" @@ -306,7 +306,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [atmos_chem] module = "vulcan" # Atmospheric chemistry module - when = "offline" # When to run chemistry (manually, offline, online) + when = "manually" # When to run chemistry (manually, offline, online) # Physics flags photo_on = true # Enable photochemistry diff --git a/src/proteus/grid/post_processing_grid.py b/src/proteus/grid/post_processing_grid.py index 13dc6faa8..900fd4b8c 100644 --- a/src/proteus/grid/post_processing_grid.py +++ b/src/proteus/grid/post_processing_grid.py @@ -651,7 +651,7 @@ def plot_grid_status(cases_data, plot_dir: Path, grid_name: str, status_colors: for formatted, original in zip(formatted_status_keys, status_counts.index)} else: palette = sns.color_palette("Accent", len(status_counts)) - formatted_status_keys = [s.replace("d (", "d \n (") for s in status_counts.index] + formatted_status_keys = [s.replace(" (", " \n (") for s in status_counts.index] palette = dict(zip(formatted_status_keys, palette)) # Prepare dataframe for plotting diff --git a/src/proteus/grid/run_grid_analysis.py b/src/proteus/grid/run_grid_analysis.py index c73ae2f5b..1cd05cb61 100644 --- a/src/proteus/grid/run_grid_analysis.py +++ b/src/proteus/grid/run_grid_analysis.py @@ -64,7 +64,7 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) # User choose the output to extract from 'runtime_helpfile.csv' of each case (always the [-1] element of the column). # For the units, check the file src/proteus/utils/coupler.py, lines 348-400 (keys) - output_to_extract = ['Time','esc_rate_total','Phi_global','P_surf','T_surf','M_planet','R_obs','p_xuv', 'R_xuv', 'atm_kg_per_mol', + output_to_extract = ['Time','esc_rate_total','Phi_global','P_surf','T_surf','T_eqm','M_planet','R_obs','p_xuv', 'R_xuv', 'F_xuv', 'atm_kg_per_mol', 'H_kg_atm','O_kg_atm','C_kg_atm','N_kg_atm','S_kg_atm', 'Si_kg_atm', 'Mg_kg_atm', 'Fe_kg_atm', 'Na_kg_atm', 'H2O_kg_atm','CO2_kg_atm', 'O2_kg_atm', 'H2_kg_atm', 'CH4_kg_atm', 'CO_kg_atm', 'N2_kg_atm', 'NH3_kg_atm', 'S2_kg_atm', 'SO2_kg_atm', 'H2S_kg_atm', 'SiO_kg_atm','SiO2_kg_atm', 'MgO_kg_atm', 'FeO2_kg_atm', 'runtime'] @@ -126,14 +126,14 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) # Single ECDF Plots # The user needs to comment the parameters he didn't used in the grid/ add the ones non-listed here. Same for the outputs. param_settings_single = { - # "orbit.semimajoraxis": {"label": "Semi-major axis [AU]", "colormap": cm.plasma, "log_scale": False}, - # "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, + "orbit.semimajoraxis": {"label": "Semi-major axis [AU]", "colormap": cm.plasma, "log_scale": False}, + "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, "escape.zephyrus.efficiency": {"label": r"Escape efficiency factor $\epsilon$", "colormap": cm.spring, "log_scale": False}, - # "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2)$ [IW]", "colormap": cm.coolwarm, "log_scale": False}, - # "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, + "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2)$ [IW]", "colormap": cm.coolwarm, "log_scale": False}, + "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": False}, - "delivery.elements.SH_ratio": {"label": "S/H ratio", "colormap": cm.autumn, "log_scale": False}, + # "delivery.elements.SH_ratio": {"label": "S/H ratio", "colormap": cm.autumn, "log_scale": False}, # "escape.reservoir": {"label": "Reservoir", "colormap": cm.viridis, "log_scale": False} # "escape.module": {"label": "Escape module", "colormap": cm.RdYlGn, "log_scale": False} } @@ -145,33 +145,37 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) 'solidification_time': {"label": "Solidification time [yr]", "log_scale": True, "scale": 1.0}, 'T_surf': {"label": r"T$_{surf}$ [K]", "log_scale": False, "scale": 1.0}, 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24}, - 'H_kg_atm': {"label": r"[H$_{atm}$] [kg]", "log_scale": True, "scale": 1.0} + 'H_kg_atm': {"label": r"[H$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, + 'F_xuv': {"label": r"F$_{XUV}$ [W/m$^2$]", "log_scale": True, "scale": 1.0}, + #'T_eqm': {"label": r"T$_{eqm}$ [K]", "log_scale": False, "scale": 1.0} } ecdf_single_plots(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_single, output_settings=output_settings_single, plots_path=plots_path) # ECDF Grid Plot # The user needs to comment the parameters he didn't used in the grid/ add the ones non-listed here. Same for the outputs. param_settings_grid = { - # "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, - # "orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.plasma, "log_scale": False}, + "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, + "orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.plasma, "log_scale": False}, "escape.zephyrus.efficiency": {"label": r"$\epsilon$", "colormap": cm.spring, "log_scale": False}, - # "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, - # "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2 / IW)$", "colormap": cm.coolwarm, "log_scale": False}, + "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, + "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2 / IW)$", "colormap": cm.coolwarm, "log_scale": False}, "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, - "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.winter, "log_scale": True}, - "delivery.elements.SH_ratio": {"label": "S/H ratio", "colormap": cm.autumn, "log_scale": True}, + "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.winter, "log_scale": False}, + # "delivery.elements.SH_ratio": {"label": "S/H ratio", "colormap": cm.autumn, "log_scale": True}, # "escape.reservoir": {"label": "Reservoir", "colormap": cm.viridis, "log_scale": False} # "escape.module": {"label": "Escape module", "colormap": cm.RdYlGn, "log_scale": False} } output_settings_grid = { 'solidification_time': {"label": "Solidification [yr]", "log_scale": True, "scale": 1.0}, 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, - 'P_surf': {"label": r"P$_{surf}$ [bar]", "log_scale": True, "scale": 1.0}, - 'esc_rate_total': {"label": "Escape rate [kg/s]", "log_scale": True, "scale": 1.0}, 'T_surf': {"label": r"T$_{surf}$ [$10^3$ K]", "log_scale": False, "scale": 1.0/1000.0}, + 'F_xuv': {"label": r"F$_{XUV}$ [W/m$^2$]", "log_scale": True, "scale": 1.0}, + #'T_eqm': {"label": r"T$_{eqm}$ [K]", "log_scale": False, "scale": 1.0}, + 'esc_rate_total': {"label": "Escape rate [kg/s]", "log_scale": True, "scale": 1.0}, + 'P_surf': {"label": r"P$_{surf}$ [bar]", "log_scale": True, "scale": 1.0}, 'atm_kg_per_mol': {"label": "MMW [g/mol]", "log_scale": False, "scale": 1000.0}, - # 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24}, 'H_kg_atm': {"label": r"[H$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, + # 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24}, #'O_kg_atm': {"label": r"[O$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, #'C_kg_atm': {"label": r"[C$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, #'N_kg_atm': {"label": r"[N$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, From 581e55462ab9cde667eb6d5a4c2f47552049767b Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Mon, 20 Oct 2025 17:51:47 +0200 Subject: [PATCH 054/105] update to fix stellar spectrum error with janus --- src/proteus/atmos_clim/wrapper.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/proteus/atmos_clim/wrapper.py b/src/proteus/atmos_clim/wrapper.py index f0b3c06c0..34be7f74d 100644 --- a/src/proteus/atmos_clim/wrapper.py +++ b/src/proteus/atmos_clim/wrapper.py @@ -6,7 +6,7 @@ from typing import TYPE_CHECKING import pandas as pd -from numpy import pi +from numpy import pi, unique if TYPE_CHECKING: from proteus.config import Config @@ -86,7 +86,12 @@ def run_atmosphere(atmos_o:Atmos_t, config:Config, dirs:dict, loop_counter:dict, UpdateStatusfile(dirs, 20) raise FileNotFoundError( "Spectral file does not exist at '%s'" % spectral_file_nostar) - InitStellarSpectrum(dirs, wl, fl, spectral_file_nostar) + + idx = unique(wl, return_index=True)[1] + wl_un = wl[idx] + fl_un = fl[idx] + + InitStellarSpectrum(dirs, wl_un, fl_un, spectral_file_nostar) atmos_o._atm = InitAtm(dirs, config) atm_output = RunJANUS(atmos_o._atm, dirs, config, hf_row, hf_all) From 5b4fa57720b5ada1376820ba4104085857ee07a5 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Sun, 26 Oct 2025 18:34:25 +0100 Subject: [PATCH 055/105] create a new csv file with only completed cases and add number in the panels fro the ecdf gridplot. Update the toml file for comparison escape on off --- input/demos/escape_comparison.toml | 54 ++------ input/ensembles/escape_comparison_on_off.toml | 2 +- src/proteus/grid/post_processing_grid.py | 120 +++++++++++++++++- src/proteus/grid/run_grid_analysis.py | 28 ++-- 4 files changed, 147 insertions(+), 57 deletions(-) diff --git a/input/demos/escape_comparison.toml b/input/demos/escape_comparison.toml index b6dc5e798..66ef04d1a 100644 --- a/input/demos/escape_comparison.toml +++ b/input/demos/escape_comparison.toml @@ -26,12 +26,12 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [params] # output files [params.out] - path = "scratch/escape_comparison_on_off_0_1_AU" - logging = "DEBUG" + path = "scratch/escape_on_off_janus_agni_1Msun/" # Path to output folder relative to PROTEUS output folder + logging = "INFO" plot_mod = 5 # Plotting frequency, 0: wait until completion | n: every n iterations plot_fmt = "pdf" # Plotting image file format, "png" or "pdf" recommended write_mod = 1 # Write CSV frequency, 0: wait until completion | n: every n iterations - archive_mod = 0 # Archive frequency, 0: wait until completion | n: every n iterations | none: do not archive + archive_mod = 'none' # Archive frequency, 0: wait until completion | n: every n iterations | none: do not archive remove_sf = true # time-stepping @@ -40,7 +40,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" minimum_rel = 1e-5 # relative minimum time-step [dimensionless] maximum = 3e7 # yr, maximum time-step initial = 1e4 # yr, inital step size - starspec = 1e8 # yr, interval to re-calculate the stellar spectrum + starspec = 100e6 # yr, interval to re-calculate the stellar spectrum starinst = 100 # yr, interval to re-calculate the instellation method = "adaptive" # proportional | adaptive | maximum @@ -68,7 +68,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [params.stop.time] enabled = true minimum = 1.0e3 # yr, model will certainly run to t > minimum - maximum = 9e9 # yr, model will terminate when t > maximum + maximum = 4.567e+9 # yr, model will terminate when t > maximum # solidification [params.stop.solid] @@ -112,7 +112,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [orbit] instellation_method = 'sma' # whether to define orbit using semi major axis ('sma') or instellation flux ('inst') instellationflux = 1.0 # instellation flux received from the planet in [Earth units] - semimajoraxis = 0.1 # initial semi-major axis of planet's orbit [AU] + semimajoraxis = 0.5 # initial semi-major axis of planet's orbit [AU] eccentricity = 0.0 # initial eccentricity of planet's orbit [dimensionless] zenith_angle = 48.19 # characteristic zenith angle [degrees] s0_factor = 0.375 # instellation scale factor [dimensionless] @@ -138,26 +138,9 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" module = "self" # self | zalmoxis - [struct.zalmoxis] - coremassfrac = 0.325 # core mass fraction [non-dim.] - inner_mantle_mass_fraction = 0 # inner mantle mass fraction [non-dim.] - weight_iron_frac = 0.325 # iron fraction in the planet [non-dim.] - num_levels = 100 # number of Zalmoxis radius layers - EOSchoice = "Tabulated:iron/silicate" # iron/silicate for super-Earths, water for water planets with Earth-like rocky cores - max_iterations_outer = 20 # max. iterations for the outer loop - tolerance_outer = 1e-3 # tolerance for the outer loop - max_iterations_inner = 100 # max. iterations for the inner loop - tolerance_inner = 1e-4 # tolerance for the inner loop - relative_tolerance = 1e-5 # relative tolerance for solve_ivp - absolute_tolerance = 1e-6 # absolute tolerance for solve_ivp - target_surface_pressure = 101325 # target surface pressure - pressure_tolerance = 1e11 # tolerance surface pressure - max_iterations_pressure = 200 # max. iterations for the innermost loop - pressure_adjustment_factor = 1.1 # factor for adjusting the pressure in the innermost loop - # Atmosphere - physics table [atmos_clim] - prevent_warming = false # do not allow the planet to heat up + prevent_warming = false # do not allow the planet to heat up surface_d = 0.01 # m, conductive skin thickness surface_k = 2.0 # W m-1 K-1, conductive skin thermal conductivity cloud_enabled = false # enable water cloud radiative effects @@ -207,7 +190,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [escape.zephyrus] Pxuv = 1e-2 # Pressure at which XUV radiation become opaque in the planetary atmosphere [bar] - efficiency = 1.0 # Escape efficiency factor + efficiency = 0.5 # Escape efficiency factor tidal = false # Tidal contribution enabled [escape.dummy] @@ -236,28 +219,9 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" ini_entropy = 3300.0 # Surface entropy conditions [J K-1 kg-1] ini_dsdr = -4.698e-6 # Interior entropy gradient [J K-1 kg-1 m-1] - [interior.aragog] - logging = "ERROR" - num_levels = 200 # Number of Aragog grid levels - tolerance = 1.0e-10 # solver tolerance - ini_tmagma = 3500.0 # Initial magma surface temperature [K] - inner_boundary_condition = 1 # 1 = core cooling model, 2 = prescribed heat flux, 3 = prescribed temperature - inner_boundary_value = 4000 # core temperature [K], if inner_boundary_condition = 3. CMB heat flux [W/m^2], if if inner_boundary_condition = 2 - conduction = true # enable conductive heat transfer - convection = true # enable convective heat transfer - gravitational_separation = false # enable gravitational separation - mixing = false # enable mixing - dilatation = false # enable dilatation source term - mass_coordinates = false # enable mass coordinates - tsurf_poststep_change = 30 # threshold of maximum change on surface temperature - event_triggering = true # enable events triggering to avoid abrupt jumps in surface temperature - - [interior.dummy] - ini_tmagma = 3500.0 # Initial magma surface temperature [K] - # Outgassing - physics table [outgas] - fO2_shift_IW = 6 # log10(ΔIW), atmosphere/interior boundary oxidation state + fO2_shift_IW = 4 # log10(ΔIW), atmosphere/interior boundary oxidation state module = "calliope" # Which outgassing module to use diff --git a/input/ensembles/escape_comparison_on_off.toml b/input/ensembles/escape_comparison_on_off.toml index b31f8ff63..fdfd38647 100644 --- a/input/ensembles/escape_comparison_on_off.toml +++ b/input/ensembles/escape_comparison_on_off.toml @@ -1,7 +1,7 @@ # Config file for running a grid of forward models # Path to output folder where grid will be saved (relative to PROTEUS output folder) -output = "scratch/escape_comparison_on_off_0_1_AU_fo2_6_H_oceans_10/" +output = "scratch/escape_on_off_janus_agni_1Msun_solid/" # Make `output` a symbolic link to this absolute location. To disable: set to empty string. symlink = "" diff --git a/src/proteus/grid/post_processing_grid.py b/src/proteus/grid/post_processing_grid.py index 900fd4b8c..bfeb7d567 100644 --- a/src/proteus/grid/post_processing_grid.py +++ b/src/proteus/grid/post_processing_grid.py @@ -459,6 +459,103 @@ def save_error_running_cases(grid_name: str, cases_data: List[Dict[str, Any]], g print(f"→ Error/Running (and missing‐output) CSV saved to: {err_csv}") +def save_completed_cases( + grid_name: str, + cases_data: List[Dict[str, Any]], + grid_parameters: Dict[str, List[Any]], + case_params: Dict[int, Dict[str, Any]], + extracted_value: Dict[str, List[Any]], + output_to_extract: List[str], + output_dir: Path, +) -> None: + """ + Save all cases whose status starts with 'Completed' into a separate CSV file + named '{grid_name}_filtered.csv'. + + Parameters + ---------- + grid_name : str + Name of the grid. + + cases_data : list + List of dictionaries containing simulation data. + + grid_parameters : dict + A dictionary where each key is a parameter name, and its corresponding values + used for the entire grid is a list. + + case_params : dict + Dictionary containing each case number with the name and values of the + tested parameters in this grid. + + extracted_value : dict + A dictionary containing the extracted values of the specified parameter + for all cases of the grid. + + output_to_extract : list + List of output values extracted from each simulation in the grid. + + output_dir : Path + Directory where the generated CSV file will be saved. + Created if it does not exist. + """ + output_dir = Path(output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + # Identify indices where status starts with "Completed" (case-insensitive, trimmed) + completed_indices = [] + for idx, case_data in enumerate(cases_data): + status_raw = case_data.get("status", "") + # Normalize status string safely + if not isinstance(status_raw, str): + continue + status_clean = status_raw.strip().lower().replace('\xa0', ' ') # replace non-breaking spaces if any + if status_clean.startswith("completed"): + completed_indices.append(idx) + + if not completed_indices: + print("→ No cases with status starting with 'Completed' found; skipping filtered CSV.") + return + + # Build output path + filtered_csv = output_dir / f"{grid_name}_filtered.csv" + + with open(filtered_csv, mode="w", newline="") as csvfile: + writer = csv.writer(csvfile) + + # Header block + writer.writerow(["#############################################################################################################"]) + writer.writerow([f"Grid name: {grid_name}"]) + writer.writerow([f"Total number of 'Completed*' cases: {len(completed_indices)}"]) + writer.writerow(["----------------------------------------------------------"]) + writer.writerow( + ["Case number", "Status"] + + list(grid_parameters.keys()) + + list(extracted_value.keys()) + ) + writer.writerow(["#############################################################################################################"]) + writer.writerow([]) + + # Write each completed case row + for case_index in completed_indices: + status = cases_data[case_index].get("status", "Unknown") or "Unknown" + row = [case_index, f"'{status}'"] + + # Add grid parameter values + for param in grid_parameters.keys(): + row.append(case_params.get(case_index, {}).get(param, "NA")) + + # Add extracted outputs + for param in extracted_value.keys(): + vals = extracted_value[param] + if case_index < len(vals): + row.append(vals[case_index]) + else: + row.append("NA") + + writer.writerow(row) + + print(f"→ Completed* cases CSV saved to: {filtered_csv}") ##### Functions for plotting grid data results ##### @@ -902,6 +999,19 @@ def color_func(v): ax = axes[i][j] out_settings = output_settings[output_name] + # Add panel number in upper-left corner + panel_number = i * n_cols + j + 1 # number of panels left-to-right, top-to-bottom + ax.text( + 0.02, 0.98, # relative position in axes coordinates + str(panel_number), # text to display + transform=ax.transAxes, # use axis-relative coordinates + fontsize=18, + fontweight='bold', + va='top', # vertical alignment + ha='left', # horizontal alignment + color='black' + ) + # Plot one ECDF per tested parameter value for val in tested_param: data_key = f"{output_name}_per_{param_name}" @@ -945,8 +1055,13 @@ def color_func(v): rightmost_ax = axes[i, -1] cbar = fig.colorbar(sm,ax=rightmost_ax,pad=0.03,aspect=10) cbar.set_label(settings["label"], fontsize=24) - # (you can remove or tweak the label‐coords line if it ends up too far to the right) - cbar.ax.yaxis.set_label_coords(5.5, 0.5) + # This is for plot 0.194Msun + # if param_name == "orbit.semimajoraxis": + # cbar.ax.yaxis.set_label_coords(9.5, 0.5) + # else: + # cbar.ax.yaxis.set_label_coords(6, 0.5) + # This is for 1Msun + cbar.ax.yaxis.set_label_coords(6, 0.5) ticks = sorted(set(tested_param)) cbar.set_ticks(ticks) cbar.ax.tick_params(labelsize=22) @@ -962,6 +1077,7 @@ def color_func(v): filename = "ecdf_grid_plot.png" out_path = os.path.join(plots_path, filename) fig.savefig(out_path, dpi=300) + #fig.savefig('/home2/p315557/PROTEUS/nogit_files/nogit_code/paper1_plots/plots/1Msun_ecdf_grid_plot.png', dpi=300) plt.close(fig) print(f"Grid ECDF plot saved at {out_path}") diff --git a/src/proteus/grid/run_grid_analysis.py b/src/proteus/grid/run_grid_analysis.py index 1cd05cb61..548271f1e 100644 --- a/src/proteus/grid/run_grid_analysis.py +++ b/src/proteus/grid/run_grid_analysis.py @@ -23,6 +23,7 @@ load_grid_cases, plot_dir_exists, plot_grid_status, + save_completed_cases, save_error_running_cases, save_grid_data_to_csv, ) @@ -102,6 +103,15 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) extracted_value=extracted_value, output_to_extract=output_to_extract, output_dir=data_dir) + + save_completed_cases( + grid_name=grid_name, + cases_data=cases_data, + grid_parameters=grid_parameters, + case_params=case_init_param, + extracted_value=extracted_value, + output_to_extract=output_to_extract, + output_dir=data_dir) else: print('-----------------------------------------------------------') print(f'Step 1 : Skipped (CSV already exists at {csv_file})') @@ -154,11 +164,11 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) # ECDF Grid Plot # The user needs to comment the parameters he didn't used in the grid/ add the ones non-listed here. Same for the outputs. param_settings_grid = { - "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, + "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.Paired, "log_scale": False}, "orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.plasma, "log_scale": False}, - "escape.zephyrus.efficiency": {"label": r"$\epsilon$", "colormap": cm.spring, "log_scale": False}, - "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, - "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2 / IW)$", "colormap": cm.coolwarm, "log_scale": False}, + "escape.zephyrus.efficiency": {"label": r"$\rm \epsilon$", "colormap": cm.spring, "log_scale": False}, + "escape.zephyrus.Pxuv": {"label": r"P$_{\rm XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, + "outgas.fO2_shift_IW": {"label": r"$\rm \log_{10} fO_2 [IW]$", "colormap": cm.coolwarm, "log_scale": False}, "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.winter, "log_scale": False}, # "delivery.elements.SH_ratio": {"label": "S/H ratio", "colormap": cm.autumn, "log_scale": True}, @@ -168,13 +178,13 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) output_settings_grid = { 'solidification_time': {"label": "Solidification [yr]", "log_scale": True, "scale": 1.0}, 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, - 'T_surf': {"label": r"T$_{surf}$ [$10^3$ K]", "log_scale": False, "scale": 1.0/1000.0}, - 'F_xuv': {"label": r"F$_{XUV}$ [W/m$^2$]", "log_scale": True, "scale": 1.0}, + 'T_surf': {"label": r"T$_{\rm surf}$ [$10^3$ K]", "log_scale": False, "scale": 1.0/1000.0}, + #'F_xuv': {"label": r"F$_{XUV}$ [W/m$^2$]", "log_scale": True, "scale": 1.0}, #'T_eqm': {"label": r"T$_{eqm}$ [K]", "log_scale": False, "scale": 1.0}, - 'esc_rate_total': {"label": "Escape rate [kg/s]", "log_scale": True, "scale": 1.0}, - 'P_surf': {"label": r"P$_{surf}$ [bar]", "log_scale": True, "scale": 1.0}, + 'P_surf': {"label": r"P$_{\rm surf}$ [bar]", "log_scale": True, "scale": 1.0}, 'atm_kg_per_mol': {"label": "MMW [g/mol]", "log_scale": False, "scale": 1000.0}, - 'H_kg_atm': {"label": r"[H$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, + 'esc_rate_total': {"label": "Escape rate [kg/s]", "log_scale": True, "scale": 1.0}, + #'H_kg_atm': {"label": r"[H$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, # 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24}, #'O_kg_atm': {"label": r"[O$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, #'C_kg_atm': {"label": r"[C$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, From ee39aa6b52953f8669731244658be105bb37a518 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 28 Oct 2025 11:57:57 +0100 Subject: [PATCH 056/105] changes for plots paper 1 add ecdf species grid plot --- src/proteus/grid/run_grid_analysis.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/proteus/grid/run_grid_analysis.py b/src/proteus/grid/run_grid_analysis.py index 548271f1e..a67436853 100644 --- a/src/proteus/grid/run_grid_analysis.py +++ b/src/proteus/grid/run_grid_analysis.py @@ -192,6 +192,17 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) #'S_kg_atm': {"label": r"[S$_{atm}$] [kg]", "log_scale": True, "scale": 1.0} } ecdf_grid_plot(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_grid, output_settings=output_settings_grid, plots_path=plots_path) + output_settings_grid_species = { + 'atm_kg_per_mol': {"label": "MMW [g/mol]", "log_scale": False, "scale": 1000.0}, + 'H_kg_atm': {"label": r"[H$_{\rm atm}$] [kg]", "log_scale": True, "scale": 1.0}, + 'O_kg_atm': {"label": r"[O$_{\rm atm}$] [kg]", "log_scale": True, "scale": 1.0}, + 'C_kg_atm': {"label": r"[C$_{\rm atm}$] [kg]", "log_scale": True, "scale": 1.0}, + 'N_kg_atm': {"label": r"[N$_{\rm atm}$] [kg]", "log_scale": True, "scale": 1.0}, + 'S_kg_atm': {"label": r"[S$_{\rm atm}$] [kg]", "log_scale": True, "scale": 1.0} + } + ecdf_grid_plot(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_grid, output_settings=output_settings_grid_species, plots_path=plots_path) + print("Values N in atm :", df['N_kg_atm'].values) + print("Values N in atm 1 < N < 1e12 :", 1 < df['N_kg_atm'].values[df['N_kg_atm'].values < 1e13]) print('-----------------------------------------------------------') print(f'Plots saved in {plots_path}') From c9ca7e4746b5fa5c6d64b22e68a419613961047b Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 28 Oct 2025 11:59:33 +0100 Subject: [PATCH 057/105] update ecdf grid function to plot only values > 0 for species plot --- src/proteus/grid/post_processing_grid.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/proteus/grid/post_processing_grid.py b/src/proteus/grid/post_processing_grid.py index bfeb7d567..a5755be3b 100644 --- a/src/proteus/grid/post_processing_grid.py +++ b/src/proteus/grid/post_processing_grid.py @@ -1018,6 +1018,11 @@ def color_func(v): if val not in grouped_data.get(data_key, {}): continue raw = np.array(grouped_data[data_key][val]) * out_settings.get("scale", 1.0) + # Plot ECDf if output == df['H_kg_atm'] then plot only values > 1e10 + if output_name.endswith('_kg_atm'): + raw = raw[raw > 0] + else: + raw = raw sns.ecdfplot( data=raw, log_scale=out_settings.get("log_scale", False), @@ -1074,10 +1079,10 @@ def color_func(v): # Tweak layout and save plt.tight_layout(rect=[0.08, 0.02, 1, 0.97]) - filename = "ecdf_grid_plot.png" - out_path = os.path.join(plots_path, filename) - fig.savefig(out_path, dpi=300) - #fig.savefig('/home2/p315557/PROTEUS/nogit_files/nogit_code/paper1_plots/plots/1Msun_ecdf_grid_plot.png', dpi=300) + #filename = "ecdf_grid_plot.png" + #out_path = os.path.join(plots_path, filename) + #fig.savefig(out_path, dpi=300) + fig.savefig('/home2/p315557/PROTEUS/nogit_files/nogit_code/paper1_plots/plots/1Msun_ecdf_grid_plot_species.png', dpi=300) plt.close(fig) - print(f"Grid ECDF plot saved at {out_path}") + #print(f"Grid ECDF plot saved at {out_path}") From bb957c0b98871a02ad921b8e9b26614cf8bcb2ce Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 10 Dec 2025 11:49:41 +0100 Subject: [PATCH 058/105] mini updates before mergin main --- src/proteus/grid/post_processing_grid.py | 16 +++++++++------- src/proteus/grid/run_grid_analysis.py | 21 ++++++++++----------- 2 files changed, 19 insertions(+), 18 deletions(-) diff --git a/src/proteus/grid/post_processing_grid.py b/src/proteus/grid/post_processing_grid.py index a5755be3b..e8b0a71c6 100644 --- a/src/proteus/grid/post_processing_grid.py +++ b/src/proteus/grid/post_processing_grid.py @@ -779,7 +779,7 @@ def plot_grid_status(cases_data, plot_dir: Path, grid_name: str, status_colors: ax.text( i, count + 1, f"{count} ({percentage:.1f}%)", - ha='center', va='bottom', fontsize=10 + ha='center', va='bottom', fontsize=15 ) # Boxed total in upper right @@ -1018,11 +1018,14 @@ def color_func(v): if val not in grouped_data.get(data_key, {}): continue raw = np.array(grouped_data[data_key][val]) * out_settings.get("scale", 1.0) - # Plot ECDf if output == df['H_kg_atm'] then plot only values > 1e10 + # Plot ECDf if output == df['H_kg_atm'] then plot only values > 1e10 AND psurf > 1 bar if output_name.endswith('_kg_atm'): - raw = raw[raw > 0] + raw = np.clip(raw, 1e15, None) + elif output_name.endswith('P_surf'): + raw = np.clip(raw, 1, None) else: raw = raw + sns.ecdfplot( data=raw, log_scale=out_settings.get("log_scale", False), @@ -1079,10 +1082,9 @@ def color_func(v): # Tweak layout and save plt.tight_layout(rect=[0.08, 0.02, 1, 0.97]) - #filename = "ecdf_grid_plot.png" - #out_path = os.path.join(plots_path, filename) - #fig.savefig(out_path, dpi=300) - fig.savefig('/home2/p315557/PROTEUS/nogit_files/nogit_code/paper1_plots/plots/1Msun_ecdf_grid_plot_species.png', dpi=300) + filename = "ecdf_grid_plot.png" + out_path = os.path.join(plots_path, filename) + fig.savefig(out_path, dpi=300) plt.close(fig) #print(f"Grid ECDF plot saved at {out_path}") diff --git a/src/proteus/grid/run_grid_analysis.py b/src/proteus/grid/run_grid_analysis.py index a67436853..12043cbda 100644 --- a/src/proteus/grid/run_grid_analysis.py +++ b/src/proteus/grid/run_grid_analysis.py @@ -192,17 +192,16 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) #'S_kg_atm': {"label": r"[S$_{atm}$] [kg]", "log_scale": True, "scale": 1.0} } ecdf_grid_plot(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_grid, output_settings=output_settings_grid, plots_path=plots_path) - output_settings_grid_species = { - 'atm_kg_per_mol': {"label": "MMW [g/mol]", "log_scale": False, "scale": 1000.0}, - 'H_kg_atm': {"label": r"[H$_{\rm atm}$] [kg]", "log_scale": True, "scale": 1.0}, - 'O_kg_atm': {"label": r"[O$_{\rm atm}$] [kg]", "log_scale": True, "scale": 1.0}, - 'C_kg_atm': {"label": r"[C$_{\rm atm}$] [kg]", "log_scale": True, "scale": 1.0}, - 'N_kg_atm': {"label": r"[N$_{\rm atm}$] [kg]", "log_scale": True, "scale": 1.0}, - 'S_kg_atm': {"label": r"[S$_{\rm atm}$] [kg]", "log_scale": True, "scale": 1.0} - } - ecdf_grid_plot(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_grid, output_settings=output_settings_grid_species, plots_path=plots_path) - print("Values N in atm :", df['N_kg_atm'].values) - print("Values N in atm 1 < N < 1e12 :", 1 < df['N_kg_atm'].values[df['N_kg_atm'].values < 1e13]) + + # output_settings_grid_species = { + # 'atm_kg_per_mol': {"label": "MMW [g/mol]", "log_scale": False, "scale": 1000.0}, + # 'H_kg_atm': {"label": r"[H$_{\rm atm}$] [kg]", "log_scale": True, "scale": 1.0}, + # 'O_kg_atm': {"label": r"[O$_{\rm atm}$] [kg]", "log_scale": True, "scale": 1.0}, + # 'C_kg_atm': {"label": r"[C$_{\rm atm}$] [kg]", "log_scale": True, "scale": 1.0}, + # 'N_kg_atm': {"label": r"[N$_{\rm atm}$] [kg]", "log_scale": True, "scale": 1.0}, + # 'S_kg_atm': {"label": r"[S$_{\rm atm}$] [kg]", "log_scale": True, "scale": 1.0} + # } + # ecdf_grid_plot(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_grid, output_settings=output_settings_grid_species, plots_path=plots_path) print('-----------------------------------------------------------') print(f'Plots saved in {plots_path}') From 2ecbd7c643e25424ad3a76738133514a51628776 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Thu, 11 Dec 2025 09:37:50 +0100 Subject: [PATCH 059/105] test to update the post-processing script to new proteus version. need to fix the phi_crit for solidification function --- input/ensembles/grid_toi561b.toml | 23 +- input/planets/toi561b.toml | 103 +++++-- src/proteus/grid/post_processing_grid.py | 7 +- src/proteus/grid/post_processing_updated.py | 301 ++++++++++++++++++++ src/proteus/grid/test_new_pp_grid.py | 42 +++ 5 files changed, 447 insertions(+), 29 deletions(-) create mode 100644 src/proteus/grid/post_processing_updated.py create mode 100644 src/proteus/grid/test_new_pp_grid.py diff --git a/input/ensembles/grid_toi561b.toml b/input/ensembles/grid_toi561b.toml index ac5b814c1..bfca8fbd4 100644 --- a/input/ensembles/grid_toi561b.toml +++ b/input/ensembles/grid_toi561b.toml @@ -1,7 +1,7 @@ # Config file for running a grid of forward models # Path to output folder where grid will be saved (relative to PROTEUS output folder) -output = "scratch/toi561b_grid" +output = "scratch/grid_toi561b" # Make `output` a symbolic link to this absolute location. To disable: set to empty string. symlink = "" @@ -13,7 +13,7 @@ ref_config = "input/planets/toi561b.toml" use_slurm = true # Execution limits -max_jobs = 75 # maximum number of concurrent tasks (e.g. 500 on Habrok) +max_jobs = 500 # maximum number of concurrent tasks (e.g. 500 on Habrok) max_days = 5 # maximum number of days to run (e.g. 1) max_mem = 3 # maximum memory per CPU in GB (e.g. 3) @@ -26,7 +26,7 @@ max_mem = 3 # maximum memory per CPU in GB (e.g. 3) # Escape efficiency factor set directly ["escape.zephyrus.efficiency"] method = "direct" - values = [0.1, 0.15, 0.2] + values = [0.1, 0.3] # Planet bulk C/H ratio set directly ["delivery.elements.CH_ratio"] @@ -45,6 +45,17 @@ max_mem = 3 # maximum memory per CPU in GB (e.g. 3) stop = 1000.0 count = 4 -# Add CMF -# Period form 0.44 days to 2 days -# O2 rich star +# Core-Mass Fraction (CMF) set directly +["struct.corefrac"] + method = "direct" + values = [0.25, 0.40, 0.55] + +# Bond albedo set directly +["atmos_clim.albedo_pl"] + method = "direct" + values = [0.0, 0.2, 0.4, 0.6] + +# # Semi-major axis set directly to have period = 0.44 days (correct) and test at 2 days +# ["orbit.semimajoraxis"] +# method = "direct" +# values = [0.0106, 0.0287] diff --git a/input/planets/toi561b.toml b/input/planets/toi561b.toml index 94d5f2f0e..8690348f8 100644 --- a/input/planets/toi561b.toml +++ b/input/planets/toi561b.toml @@ -9,18 +9,18 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" # output files [params.out] path = "scratch/toi561b" - logging = "DEBUG" + logging = "INFO" plot_mod = 5 # Plotting frequency, 0: wait until completion | n: every n iterations plot_fmt = "png" # Plotting image file format, "png" or "pdf" recommended write_mod = 1 # Write CSV frequency, 0: wait until completion | n: every n iterations - archive_mod = 0 # Archive frequency, 0: wait until completion | n: every n iterations | none: do not archive - remove_sf = true + archive_mod = 0 # Archive frequency, 0: wait until completion | n: every n iterations | none: do not archive + remove_sf = false # Remove SOCRATES spectral file when simulation ends. # time-stepping [params.dt] minimum = 3e2 # yr, minimum time-step minimum_rel = 1e-5 # relative minimum time-step [dimensionless] - maximum = 1.1e10 # yr, maximum time-step + maximum = 1e7 # yr, maximum time-step initial = 1e3 # yr, inital step size starspec = 1e8 # yr, interval to re-calculate the stellar spectrum starinst = 1e2 # yr, interval to re-calculate the instellation @@ -64,8 +64,16 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" rtol = 1e-3 # relative tolerance [params.stop.escape] + enabled = false + p_stop = 1.0 # Stop surface pressure is less than this value + + # disintegration + [params.stop.disint] enabled = false - p_stop = 1.0 # bar, model will terminate with p_surf < p_stop + roche_enabled = false + offset_roche = 0 # correction to calculated Roche limit [m] + spin_enabled = false + offset_spin = 0 # correction to calculated Breakup period [s] # ---------------------------------------------------- @@ -100,7 +108,12 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" s0_factor = 0.375 # dimensionless evolve = false # whether to evolve the SMaxis and eccentricity - module = "none" + module = "lovepy" # module used to calculate tidal heating + + axial_period = "none" # planet's initial day length [hours]; will use orbital period if 'none' + satellite = false # include satellite (moon)? + mass_sat = 7.347e+22 # mass of satellite [kg] + semimajoraxis_sat = 3e8 # initial SMA of satellite's orbit [m] [orbit.dummy] H_tide = 1e-11 # Fixed tidal power density [W kg-1] @@ -121,6 +134,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" module = "self" # self | zalmoxis [struct.zalmoxis] + verbose = false # verbose printing? coremassfrac = 0.325 # core mass fraction [non-dim.] inner_mantle_mass_fraction = 0 # inner mantle mass fraction [non-dim.] weight_iron_frac = 0.325 # iron fraction in the planet [non-dim.] @@ -154,18 +168,41 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" module = "agni" # Which atmosphere module to use [atmos_clim.agni] + verbosity = 1 # output verbosity for agni (0:none, 1:info, 2:debug) p_top = 1.0e-5 # bar, top of atmosphere grid pressure + p_obs = 0.02 # bar, level probed in transmission spectral_group = "Honeyside" # which gas opacities to include - spectral_bands = "256" # how many spectral bands? + spectral_bands = "256" # how many spectral bands? num_levels = 50 # Number of atmospheric grid levels chemistry = "none" # "none" | "eq" surf_material = "greybody" # surface material file for scattering solve_energy = true # solve for energy-conserving atmosphere profile - solution_atol = 1e-2 # solver absolute tolerance - solution_rtol = 5e-2 # solver relative tolerance - overlap_method = "ee" # gas overlap method - condensation = true # volatile condensation - real_gas = true # use real-gas equations of state + solution_atol = 0.01 # solver absolute tolerance + solution_rtol = 0.05 # solver relative tolerance + overlap_method = "ee" # gas overlap method + surf_roughness = 1e-3 # characteristic surface roughness [m] + surf_windspeed = 2.0 # characteristic surface wind speed [m/s]. + rainout = true # include volatile condensation/evaporation aloft + latent_heat = false # include latent heat release when `rainout=true`? + oceans = true # form liquid oceans at planet surface? + convection = true # include convective heat transport, with MLT + conduction = true # include conductive heat transport, with Fourier's law + sens_heat = true # include sensible heat flux near surface, with TKE scheme + real_gas = true # use real-gas equations of state + psurf_thresh = 0.1 # bar, surface pressure where we switch to 'transparent' mode + dx_max_ini = 300.0 # initial maximum temperature step [kelvin] allowed by solver + dx_max = 35.0 # maximum temperature step [kelvin] allowed by solver + max_steps = 70 # max steps allowed by solver during each iteration + perturb_all = true # updated entire jacobian each step? + mlt_criterion = "s" # MLT convection stability criterion; (l)edoux or (s)chwarzschild + fastchem_floor = 150.0 # Minimum temperature allowed to be sent to FC + fastchem_maxiter_chem = 60000 # Maximum FC iterations (chemistry) + fastchem_maxiter_solv = 20000 # Maximum FC iterations (internal solver) + fastchem_xtol_chem = 1e-4 # FC solver tolerance (chemistry) + fastchem_xtol_elem = 1e-4 # FC solver tolerance (elemental) + ini_profile = 'isothermal' # Initial guess for temperature profile shape + ls_default = 2 # Default linesearch method (0:none, 1:gs, 2:bt) + fdo = 2 # finite-difference order (options: 2, 4) [atmos_clim.janus] p_top = 1.0e-5 # bar, top of atmosphere grid pressure @@ -179,6 +216,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [atmos_clim.dummy] gamma = 0.7 # atmosphere opacity between 0 and 1 + height_factor = 3.0 # observed height is this times the scale height # Volatile escape - physics table [escape] @@ -199,15 +237,17 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" grain_size = 0.1 # crystal settling grain size [m] F_initial = 1e5 # Initial heat flux guess [W m-2] radiogenic_heat = true # enable radiogenic heat production - tidal_heat = false # enable tidal heat production + tidal_heat = true # enable tidal heat production rheo_phi_loc = 0.4 # Centre of rheological transition rheo_phi_wid = 0.15 # Width of rheological transition bulk_modulus = 260e9 # Bulk modulus [Pa] + melting_dir = "Monteux-600" # Name of folder constaining melting curves + lookup_dir = "1TPa-dK09-elec-free/MgSiO3_Wolf_Bower_2018_1TPa" # Name of folder with EOS tables, etc. module = "spider" # Which interior module to use [interior.spider] - num_levels = 100 # Number of SPIDER grid levels + num_levels = 60 # Number of SPIDER grid levels mixing_length = 2 # Mixing length parameterization tolerance = 1.0e-10 # solver tolerance tolerance_rel = 1.0e-8 # relative solver tolerance @@ -216,11 +256,19 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" tsurf_rtol = 0.01 # tsurf_poststep_change_frac ini_entropy = 4000.0 # Surface entropy conditions [J K-1 kg-1] ini_dsdr = -4.698e-6 # Interior entropy gradient [J K-1 kg-1 m-1] + conduction = true # enable conductive heat transfer + convection = true # enable convective heat transfer + gravitational_separation = true # enable gravitational separation + mixing = true # enable mixing + matprop_smooth_width = 1e-2 # melt-fraction window width over which to smooth material properties [interior.aragog] + logging = "ERROR" # Aragog log verbosity num_levels = 220 # Number of Aragog grid levels tolerance = 1.0e-10 # solver tolerance + initial_condition = 3 # Initial T(p); 1: linear, 2: user defined, 3: adiabat ini_tmagma = 3200.0 # Initial magma surface temperature [K] + basal_temperature = 7000.0 # CMB temperature when initial boundary = 1 inner_boundary_condition = 1 # 1 = core cooling model, 2 = prescribed heat flux, 3 = prescribed temperature inner_boundary_value = 4000 # core temperature [K], if inner_boundary_condition = 3. CMB heat flux [W/m^2], if if inner_boundary_condition = 2 conduction = true # enable conductive heat transfer @@ -231,13 +279,21 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" mass_coordinates = false # enable mass coordinates tsurf_poststep_change = 30 # threshold of maximum change on surface temperature event_triggering = true # enable events triggering to avoid abrupt jumps in surface temperature + bulk_modulus = 260e9 # Adiabatic bulk modulus AW-EOS parameter [Pa]. [interior.dummy] - ini_tmagma = 3500.0 # Initial magma surface temperature [K] + ini_tmagma = 3300.0 # Initial magma surface temperature [K] + tmagma_atol = 30.0 # Max absolute Tsurf change in each step + tmagma_rtol = 0.05 # Max relative Tsurf change in each step + mantle_tliq = 2700.0 # Liquidus temperature + mantle_tsol = 1700.0 # Solidus temperature + mantle_rho = 4550.0 # Mantle density [kg m-3] + mantle_cp = 1792.0 # Mantle heat capacity [J K-1 kg-1] + H_ratio = 0.0 # Radiogenic heating [W/kg] # Outgassing - physics table [outgas] - fO2_shift_IW = 0 # log10(ΔIW), atmosphere/interior boundary oxidation state + fO2_shift_IW = 4 # log10(ΔIW), atmosphere/interior boundary oxidation state module = "calliope" # Which outgassing module to use @@ -252,7 +308,10 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" include_H2 = true # Include H2 compound include_CH4 = true # Include CH4 compound include_CO = true # Include CO compound - T_floor = 700 .0 # Temperature floor applied to outgassing calculation [K]. + T_floor = 700.0 # Temperature floor applied to outgassing calculation [K]. + rtol = 0.0001 # Relative mass tolerance + xtol = 1e-06 # Absolute mass tolerance + solubility = true # Enable solubility? [outgas.atmodeller] some_parameter = "some_value" @@ -261,7 +320,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [delivery] # Radionuclide parameters - radio_tref = 4.55 # Reference age for concentrations [Gyr] 4.55 before need to discuss w Tim + radio_tref = 4.55 # Reference age for concentrations [Gyr] radio_K = 310.0 # ppmw of potassium (all isotopes) radio_U = 0.031 # ppmw of uranium (all isotopes) radio_Th = 0.124 # ppmw of thorium (all isotopes) @@ -279,15 +338,19 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" H_oceans = 1.0 # Hydrogen inventory in units of equivalent Earth oceans #H_ppmw = 109.0 # Hydrogen inventory in ppmw relative to mantle mass + # H_kg = 1e20 # Hydrogen inventory in kg CH_ratio = 1.0 # C/H mass ratio in mantle/atmosphere system #C_ppmw = 109.0 # Carbon inventory in ppmw relative to mantle mass + # C_kg = 1e20 # Carbon inventory in kg # NH_ratio = 0.018 # N/H mass ratio in mantle/atmosphere system N_ppmw = 20.1 # Nitrogen inventory in ppmw relative to mantle mass + # N_kg = 1e20 # Nitrogen inventory in kg SH_ratio = 2.16 # S/H mass ratio in mantle/atmosphere system #S_ppmw = 235.0 # Sulfur inventory in ppmw relative to mantle mass + # S_kg = 1e20 # Sulfur inventory in kg # Set initial volatile inventory by partial pressures in atmosphere [delivery.volatiles] @@ -332,3 +395,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" # Module with which to calculate the synthetic observables synthesis = "none" + + [observe.platon] + downsample = 8 # Factor to downsample opacities + clip_vmr = 1e-8 # Minimum VMR for a species to be included diff --git a/src/proteus/grid/post_processing_grid.py b/src/proteus/grid/post_processing_grid.py index e8b0a71c6..9bfbbf219 100644 --- a/src/proteus/grid/post_processing_grid.py +++ b/src/proteus/grid/post_processing_grid.py @@ -81,17 +81,14 @@ def load_grid_cases(grid_dir: Path): else: print(f"WARNING : Missing status file in {case.name}") - # THIS IS ONLY FOR MY CURRENT GRID ON HABROK - # if status in ('Unknown', 'Empty'): - # status = 'Disk quota exceeded' - + # Combine all info about simulations into a list of dictionaries combined_data.append({ 'init_parameters': init_params, 'output_values' : df, 'status' : status }) - # --- summary printout --- + # statuses = [c['status'] for c in combined_data] status_counts = pd.Series(statuses).value_counts().sort_values(ascending=False) diff --git a/src/proteus/grid/post_processing_updated.py b/src/proteus/grid/post_processing_updated.py new file mode 100644 index 000000000..37ab82919 --- /dev/null +++ b/src/proteus/grid/post_processing_updated.py @@ -0,0 +1,301 @@ +from __future__ import annotations + +import ast +import os +import re +from pathlib import Path + +import numpy as np +import pandas as pd +import toml + + +def load_grid_cases(grid_dir: Path): + """ + Load information for each simulation of a PROTEUS grid. + Read 'runtime_helpfile.csv', 'init_coupler.toml' and status + files for each simulation of the grid. + + Parameters + ---------- + grid_dir : Path or str + Path to the grid directory containing the 'case_*' folders + + Returns + ---------- + combined_data : list + List of dictionaries, each containing: + - 'init_parameters' (dict): Parameters loaded from `init_coupler.toml`. + - 'output_values' (pandas.DataFrame): Data from `runtime_helpfile.csv`. + - 'status' (str): Status string from the `status` file, or 'Unknown' if unavailable. + """ + + combined_data = [] + grid_dir = Path(grid_dir) + + # Collect and sort the case directories + case_dirs = list(grid_dir.glob('case_*')) + case_dirs.sort(key=lambda p: int(p.name.split('_')[1])) + + for case in case_dirs: + runtime_file = case / 'runtime_helpfile.csv' + init_file = case / 'init_coupler.toml' + status_file = case / 'status' + + # Load init parameters + init_params = {} + if init_file.exists(): + try: + init_params = toml.load(open(init_file)) + except Exception as e: + print(f"Error reading init file in {case.name}: {e}") + + # Read runtime_helpfile.csv if available + df = None + if runtime_file.exists(): + try: + df = pd.read_csv(runtime_file, sep='\t') + except Exception as e: + print(f"WARNING : Error reading runtime_helpfile.csv for {case.name}: {e}") + + # Read status file if available + status = 'Unknown' + if status_file.exists(): + try: + raw_lines = [ln.strip() for ln in status_file.read_text(encoding='utf-8').splitlines() if ln.strip()] + if len(raw_lines) >= 2: + status = raw_lines[1] + elif raw_lines: + status = raw_lines[0] + else: + status = 'Empty' + except Exception as e: + print(f"WARNING : Error reading status file in {case.name}: {e}") + else: + print(f"WARNING : Missing status file in {case.name}") + + # Combine all info about simulations into a list of dictionaries + combined_data.append({ + 'init_parameters': init_params, + 'output_values' : df, + 'status' : status + }) + + # + statuses = [c['status'] for c in combined_data] + status_counts = pd.Series(statuses).value_counts().sort_values(ascending=False) + + print('-----------------------------------------------------------') + print(f"Total number of simulations: {len(statuses)}") + print('-----------------------------------------------------------') + print("Number of simulations per status:") + for st, count in status_counts.items(): + print(f" - {st:<45} : {count}") + print('-----------------------------------------------------------') + + return combined_data + +def get_grid_parameters_from_toml(toml_path: str): + """ + Extract grid parameter names and values from a PROTEUS ensemble TOML file. + + Parameters + ---------- + toml_path : str + Path to the ensemble TOML file (e.g. input/ensembles/blabla.toml) + + Returns + ------- + param_grid : dict + Dictionary where each key is a parameter name and the value is a list + of all grid values for that parameter. + """ + + if not os.path.exists(toml_path): + print(f"Error: TOML file not found at {toml_path}") + return {} + + with open(toml_path, "r") as f: + lines = f.readlines() + + param_grid = {} + + # Regex patterns + section_pattern = re.compile(r'^\s*\["(.+?)"\]\s*$') + method_pattern = re.compile(r'^\s*method\s*=\s*"(.+?)"\s*$') + values_pattern = re.compile(r'^\s*values\s*=\s*(\[.*\])\s*$') + start_pattern = re.compile(r'^\s*start\s*=\s*([0-9.eE+-]+)\s*$') + stop_pattern = re.compile(r'^\s*stop\s*=\s*([0-9.eE+-]+)\s*$') + count_pattern = re.compile(r'^\s*count\s*=\s*(\d+)\s*$') + + current_param = None + current_method = None + start = stop = count = None + + for line in lines: + line = line.strip() + + # Skip empty lines and comments + if not line or line.startswith("#"): + continue + + # Detect new parameter block + section_match = section_pattern.match(line) + if section_match: + current_param = section_match.group(1) + current_method = None + start = stop = count = None + continue + + if current_param is None: + continue + + # Detect method + method_match = method_pattern.match(line) + if method_match: + current_method = method_match.group(1) + continue + + # Direct method values + values_match = values_pattern.match(line) + if values_match and current_method == "direct": + try: + values = ast.literal_eval(values_match.group(1)) + param_grid[current_param] = values + except Exception as e: + print(f"Error parsing values for {current_param}: {e}") + continue + + # Logspace parameters + start_match = start_pattern.match(line) + if start_match: + start = float(start_match.group(1)) + continue + + stop_match = stop_pattern.match(line) + if stop_match: + stop = float(stop_match.group(1)) + continue + + count_match = count_pattern.match(line) + if count_match: + count = int(count_match.group(1)) + + # Once all three exist, generate logspace + if current_method == "logspace" and start is not None and stop is not None: + values = np.logspace(np.log10(start), np.log10(stop), count).tolist() + param_grid[current_param] = values + + return param_grid + +def extract_grid_output(cases_data: list, parameter_name: str): + """ + Extract a specific parameter from the 'output_values' of each simulation case. + + Parameters + ---------- + cases_data : list + List of dictionaries containing simulation data. + + parameter_name : str + The name of the parameter to extract from 'output_values'. + + Returns + ------- + parameter_values : list + A list containing the extracted values of the specified parameter for all cases of the grid. + """ + + parameter_values = [] + columns_printed = False # Flag to print columns only once + + for case_index, case in enumerate(cases_data): + df = case['output_values'] + if df is None: + print(f"Warning: No output values found for case number '{case_index}'") + parameter_values.append(np.nan) # Append NaN if no output values + continue # Skip cases with no output + if parameter_name in df.columns: + parameter_value = df[parameter_name].iloc[-1] + parameter_values.append(parameter_value) + else: + if not columns_printed: + print(f"Warning: Parameter '{parameter_name}' does not exist in case '{case['init_parameters'].get('name', 'Unknown')}'") + print(f"Available columns in this case: {', '.join(df.columns)}") + columns_printed = True + + # Print the extracted output values for the specified parameter + print(f"Extracted output (at last time step) : {parameter_name} ") + + return parameter_values + +def extract_solidification_time(cases_data: list): + """ + Extract the solidification time at the time step where the condition + 'Phi_global' < phi_crit is first satisfied for each planet. + + Parameters + ---------- + cases_data : list + List of dictionaries containing simulation data. + + phi_crit : float + The critical melt fraction value below which a planet is considered solidified. + A typical value is 0.005. + + Returns + ------- + solidification_times : list + A list containing the solidification times for all solidified planets of the grid. + If a planet never solidifies, it will have a NaN in the list. + """ + + solidification_times = [] + columns_printed = False + + for i, case in enumerate(cases_data): + df = case['output_values'] + # Check if the required columns exist in the dataframe + if df is None: + solidification_times.append(np.nan) # Append NaN if no output values + continue + + if 'Phi_global' in df.columns and 'Time' in df.columns: + solidification_params = case['init_parameters'].get('params.stop.solid.phi_crit') + if solidification_params is None or 'phi_crit' not in solidification_params: + raise ValueError(f"Error: 'phi_crit' not found in init_parameters of case {i}. ") + phi_crit = solidification_params['phi_crit'] + + condition = df['Phi_global'] < phi_crit + if condition.any(): + first_index = condition.idxmax() + solid_time = df.loc[first_index, 'Time'] # Get the index of the time at which the condition is first satisfied + solidification_times.append(solid_time) + else: + solidification_times.append(np.nan) # Append NaN if condition is not satisfied + else: + if not columns_printed: + print("Warning: 'Phi_global' and/or 'Time' columns not found in some cases.") + print(f"Available columns: {', '.join(df.columns)}") + columns_printed = True + solidification_times.append(np.nan) # Append NaN if columns are missing + + # Count the number of cases with a status = '10 Completed (solidified)' + status_10_cases = [case for case in cases_data if (case.get('status') or '').strip() == 'Completed (solidified)'] + completed_count = len(status_10_cases) + # Count only valid solidification times (non-NaN) + valid_solidification_times = [time for time in solidification_times if not np.isnan(time) and time > 0.0] + valid_solidified_count = len(valid_solidification_times) + + print('-----------------------------------------------------------') + print(f"Extracted solidification times (Phi_global < {phi_crit})") + print(f"→ Found {valid_solidified_count} valid solidified cases based on Phi_global") + print(f"→ Found {completed_count} cases with status 'Completed (solidified)' ") + # Check if the number of valid solidified cases matches the number of cases with status '10 Completed (solidified)' in the grid, to be sure the extraction is correct + if valid_solidified_count != completed_count: + print("WARNING: The number of valid solidified planets does not match the number of planets with status: 'Completed (solidified)'") + else: + print("Solidified planets count matches the number of planets with status: 'Completed (solidified)'.") + print('-----------------------------------------------------------') + + return solidification_times diff --git a/src/proteus/grid/test_new_pp_grid.py b/src/proteus/grid/test_new_pp_grid.py new file mode 100644 index 000000000..0ab3c2420 --- /dev/null +++ b/src/proteus/grid/test_new_pp_grid.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +from pathlib import Path + +import post_processing_updated as pp + +# Point this to your actual grid folder +grid_path = Path("//projects/p315557/Paper_1/DATA/Grids/escape_grid_1Msun") + +# Test function load_grid_cases +data = pp.load_grid_cases(grid_path) +print("\nFirst case keys:") +print(data[0].keys()) +print("\nFirst case status:") +print(data[0]["status"]) +print("\nFirst case init parameters (top level keys):") +print(data[0]["init_parameters"].keys()) +print("\nFirst case output dataframe head:") +print(data[0]["output_values"].head()) + +# Test function get_grid_parameters_from_toml +param_grid = pp.get_grid_parameters_from_toml('/home2/p315557/PROTEUS/input/ensembles/escape_grid_1Msun.toml') +print("\nExtracted grid parameters:\n") +for k, v in param_grid.items(): + print(f"{k} -> {v}") +print("\nTotal parameters found:", len(param_grid)) + +# Test function extract_grid_output +test_parameter = "P_surf" # <-- change this to a real column +values = pp.extract_grid_output(data, test_parameter) +print("\nExtracted Values:") +print(values) +print("\nTotal extracted values:", len(values)) + +# Test function extract_solidification_time + +try: + solid_times = pp.extract_solidification_time(data) + print("\nExtracted Solidification Times:") + print(solid_times) +except ValueError as e: + print(f"\nError caught: {e}") From 84930fc6b1ee208fed26942d9f608bc4c699a7a0 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Mon, 15 Dec 2025 14:35:05 +0100 Subject: [PATCH 060/105] update toi561b file and new try to update the post processing steps --- ...561b.toml => toi561b_grid_a_0_0106AU.toml} | 48 ++++--- input/ensembles/toi561b_grid_a_0_0287AU.toml | 69 +++++++++ input/planets/toi561b.toml | 26 ++-- src/proteus/grid/post_processing_updated.py | 136 +++++++++++------- src/proteus/grid/test_new_pp_grid.py | 44 +++--- 5 files changed, 216 insertions(+), 107 deletions(-) rename input/ensembles/{grid_toi561b.toml => toi561b_grid_a_0_0106AU.toml} (73%) create mode 100644 input/ensembles/toi561b_grid_a_0_0287AU.toml diff --git a/input/ensembles/grid_toi561b.toml b/input/ensembles/toi561b_grid_a_0_0106AU.toml similarity index 73% rename from input/ensembles/grid_toi561b.toml rename to input/ensembles/toi561b_grid_a_0_0106AU.toml index bfca8fbd4..9f4f4c3d7 100644 --- a/input/ensembles/grid_toi561b.toml +++ b/input/ensembles/toi561b_grid_a_0_0106AU.toml @@ -1,7 +1,7 @@ # Config file for running a grid of forward models # Path to output folder where grid will be saved (relative to PROTEUS output folder) -output = "scratch/grid_toi561b" +output = "scratch/toi561b_grid_a_0_0106AU" # Make `output` a symbolic link to this absolute location. To disable: set to empty string. symlink = "" @@ -23,20 +23,31 @@ max_mem = 3 # maximum memory per CPU in GB (e.g. 3) # Each table name must be written in double quotes. # See examples below -# Escape efficiency factor set directly -["escape.zephyrus.efficiency"] +# Semi-major axis set directly +# We investigate 2 scenarios: period = 0.44 days (a=0.0106 AU, correct one) and test at 2 days (a=0.0287 AU) to reduce escape ? +["orbit.semimajoraxis"] method = "direct" - values = [0.1, 0.3] + values = [0.0106] -# Planet bulk C/H ratio set directly -["delivery.elements.CH_ratio"] +# Core-Radius Fraction (CRF) set directly +["struct.corefrac"] method = "direct" - values = [0.1, 1.0, 2.0] + values = [0.40, 0.55] -# Planet bulk S/H ratio set directly -["delivery.elements.SH_ratio"] +# Bond albedo set directly +["atmos_clim.albedo_pl"] method = "direct" - values = [0.216, 2.16, 21.6] + values = [0.0, 0.6] + +# Escape efficiency factor set directly +["escape.zephyrus.efficiency"] + method = "direct" + values = [0.01, 0.1] + +# Oxygen fugacity set directly +["outgas.fO2_shift_IW"] + method = "direct" + values = [0, 4] # Hydrogen inventory set by arange ["delivery.elements.H_oceans"] @@ -45,17 +56,14 @@ max_mem = 3 # maximum memory per CPU in GB (e.g. 3) stop = 1000.0 count = 4 -# Core-Mass Fraction (CMF) set directly -["struct.corefrac"] +# Planet bulk C/H ratio set directly +["delivery.elements.CH_ratio"] method = "direct" - values = [0.25, 0.40, 0.55] + values = [0.1, 1.0, 2.0] -# Bond albedo set directly -["atmos_clim.albedo_pl"] +# Planet bulk S/H ratio set directly +["delivery.elements.SH_ratio"] method = "direct" - values = [0.0, 0.2, 0.4, 0.6] + values = [0.216, 2.16, 21.6] -# # Semi-major axis set directly to have period = 0.44 days (correct) and test at 2 days -# ["orbit.semimajoraxis"] -# method = "direct" -# values = [0.0106, 0.0287] +# Stellar rotation period to test ? diff --git a/input/ensembles/toi561b_grid_a_0_0287AU.toml b/input/ensembles/toi561b_grid_a_0_0287AU.toml new file mode 100644 index 000000000..0326e12a7 --- /dev/null +++ b/input/ensembles/toi561b_grid_a_0_0287AU.toml @@ -0,0 +1,69 @@ +# Config file for running a grid of forward models + +# Path to output folder where grid will be saved (relative to PROTEUS output folder) +output = "scratch/toi561b_grid_a_0_0287AU" + +# Make `output` a symbolic link to this absolute location. To disable: set to empty string. +symlink = "" + +# Path to base (reference) config file relative to PROTEUS root folder +ref_config = "input/planets/toi561b.toml" + +# Use SLURM? +use_slurm = true + +# Execution limits +max_jobs = 500 # maximum number of concurrent tasks (e.g. 500 on Habrok) +max_days = 5 # maximum number of days to run (e.g. 1) +max_mem = 3 # maximum memory per CPU in GB (e.g. 3) + +# Now define grid axes... +# Each axis must be a new section (table) in this file. +# Each table corresponds to the name of the parameter to be varied. +# Each table name must be written in double quotes. +# See examples below + +# Semi-major axis set directly +# We investigate 2 scenarios: period = 0.44 days (a=0.0106 AU, correct one) and test at 2 days (a=0.0287 AU) to reduce escape ? +["orbit.semimajoraxis"] + method = "direct" + values = [0.0287] + +# Core-Radius Fraction (CRF) set directly +["struct.corefrac"] + method = "direct" + values = [0.40, 0.55] + +# Bond albedo set directly +["atmos_clim.albedo_pl"] + method = "direct" + values = [0.0, 0.6] + +# Escape efficiency factor set directly +["escape.zephyrus.efficiency"] + method = "direct" + values = [0.01, 0.1] + +# Oxygen fugacity set directly +["outgas.fO2_shift_IW"] + method = "direct" + values = [0, 4] + +# Hydrogen inventory set by arange +["delivery.elements.H_oceans"] + method = "logspace" + start = 1.0 + stop = 1000.0 + count = 4 + +# Planet bulk C/H ratio set directly +["delivery.elements.CH_ratio"] + method = "direct" + values = [0.1, 1.0, 2.0] + +# Planet bulk S/H ratio set directly +["delivery.elements.SH_ratio"] + method = "direct" + values = [0.216, 2.16, 21.6] + +# Stellar rotation period to test ? diff --git a/input/planets/toi561b.toml b/input/planets/toi561b.toml index 8690348f8..0330a42d9 100644 --- a/input/planets/toi561b.toml +++ b/input/planets/toi561b.toml @@ -10,17 +10,17 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [params.out] path = "scratch/toi561b" logging = "INFO" - plot_mod = 5 # Plotting frequency, 0: wait until completion | n: every n iterations - plot_fmt = "png" # Plotting image file format, "png" or "pdf" recommended - write_mod = 1 # Write CSV frequency, 0: wait until completion | n: every n iterations - archive_mod = 0 # Archive frequency, 0: wait until completion | n: every n iterations | none: do not archive - remove_sf = false # Remove SOCRATES spectral file when simulation ends. + plot_mod = 50 # Plotting frequency, 0: wait until completion | n: every n iterations + plot_fmt = "pdf" # Plotting image file format, "png" or "pdf" recommended + write_mod = 50 # Write CSV frequency, 0: wait until completion | n: every n iterations + archive_mod = 60 # Archive frequency, 0: wait until completion | n: every n iterations | none: do not archive + remove_sf = true # Remove SOCRATES spectral file when simulation ends. # time-stepping [params.dt] minimum = 3e2 # yr, minimum time-step minimum_rel = 1e-5 # relative minimum time-step [dimensionless] - maximum = 1e7 # yr, maximum time-step + maximum = 5e8 # yr, maximum time-step # if higher like 1e9, will produce too few snapshots initial = 1e3 # yr, inital step size starspec = 1e8 # yr, interval to re-calculate the stellar spectrum starinst = 1e2 # yr, interval to re-calculate the instellation @@ -153,7 +153,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" # Atmosphere - physics table [atmos_clim] - prevent_warming = false # do not allow the planet to heat up + prevent_warming = true # do not allow the planet to heat up surface_d = 0.01 # m, conductive skin thickness surface_k = 2.0 # W m-1 K-1, conductive skin thermal conductivity cloud_enabled = false # enable water cloud radiative effects @@ -161,7 +161,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" surf_state = "fixed" # surface scheme: "mixed_layer" | "fixed" | "skin" surf_greyalbedo = 0.1 # surface grey albedo albedo_pl = 0.0 # Enforced Bond albedo (do not use with `rayleigh = true`) from Lacedelli et al. 2022 - rayleigh = true # Enable rayleigh scattering + rayleigh = false # Enable rayleigh scattering tmp_minimum = 0.5 # temperature floor on solver tmp_maximum = 5000.0 # temperature ceiling on solver @@ -172,7 +172,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" p_top = 1.0e-5 # bar, top of atmosphere grid pressure p_obs = 0.02 # bar, level probed in transmission spectral_group = "Honeyside" # which gas opacities to include - spectral_bands = "256" # how many spectral bands? + spectral_bands = "48" # how many spectral bands? num_levels = 50 # Number of atmospheric grid levels chemistry = "none" # "none" | "eq" surf_material = "greybody" # surface material file for scattering @@ -183,12 +183,12 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" surf_roughness = 1e-3 # characteristic surface roughness [m] surf_windspeed = 2.0 # characteristic surface wind speed [m/s]. rainout = true # include volatile condensation/evaporation aloft - latent_heat = false # include latent heat release when `rainout=true`? + latent_heat = true # include latent heat release when `rainout=true`? oceans = true # form liquid oceans at planet surface? convection = true # include convective heat transport, with MLT conduction = true # include conductive heat transport, with Fourier's law sens_heat = true # include sensible heat flux near surface, with TKE scheme - real_gas = true # use real-gas equations of state + real_gas = true # use real-gas equations of state psurf_thresh = 0.1 # bar, surface pressure where we switch to 'transparent' mode dx_max_ini = 300.0 # initial maximum temperature step [kelvin] allowed by solver dx_max = 35.0 # maximum temperature step [kelvin] allowed by solver @@ -208,7 +208,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" p_top = 1.0e-5 # bar, top of atmosphere grid pressure p_obs = 1.0e-3 # bar, observed pressure level spectral_group = "Honeyside" # which gas opacities to include - spectral_bands = "256" # how many spectral bands? + spectral_bands = "48" # how many spectral bands? F_atm_bc = 0 # measure outgoing flux at: (0) TOA | (1) Surface num_levels = 60 # Number of atmospheric grid levels tropopause = "none" # none | skin | dynamic @@ -237,7 +237,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" grain_size = 0.1 # crystal settling grain size [m] F_initial = 1e5 # Initial heat flux guess [W m-2] radiogenic_heat = true # enable radiogenic heat production - tidal_heat = true # enable tidal heat production + tidal_heat = false # enable tidal heat production rheo_phi_loc = 0.4 # Centre of rheological transition rheo_phi_wid = 0.15 # Width of rheological transition bulk_modulus = 260e9 # Bulk modulus [Pa] diff --git a/src/proteus/grid/post_processing_updated.py b/src/proteus/grid/post_processing_updated.py index 37ab82919..80c6146d4 100644 --- a/src/proteus/grid/post_processing_updated.py +++ b/src/proteus/grid/post_processing_updated.py @@ -95,7 +95,7 @@ def load_grid_cases(grid_dir: Path): return combined_data -def get_grid_parameters_from_toml(toml_path: str): +def get_grid_parameters_from_toml(grid_dir: str): """ Extract grid parameter names and values from a PROTEUS ensemble TOML file. @@ -111,6 +111,9 @@ def get_grid_parameters_from_toml(toml_path: str): of all grid values for that parameter. """ + grid_dir = Path(grid_dir) + toml_path = grid_dir / "copy.grid.toml" + if not os.path.exists(toml_path): print(f"Error: TOML file not found at {toml_path}") return {} @@ -229,73 +232,108 @@ def extract_grid_output(cases_data: list, parameter_name: str): return parameter_values -def extract_solidification_time(cases_data: list): - """ - Extract the solidification time at the time step where the condition - 'Phi_global' < phi_crit is first satisfied for each planet. +def load_phi_crit(grid_dir: str | Path): + grid_dir = Path(grid_dir) + ref_file = grid_dir / "ref_config.toml" - Parameters - ---------- - cases_data : list - List of dictionaries containing simulation data. + if not ref_file.exists(): + raise FileNotFoundError(f"ref_config.toml not found in {grid_dir}") - phi_crit : float - The critical melt fraction value below which a planet is considered solidified. - A typical value is 0.005. + ref = toml.load(open(ref_file)) - Returns - ------- - solidification_times : list - A list containing the solidification times for all solidified planets of the grid. - If a planet never solidifies, it will have a NaN in the list. - """ + # Navigate structure safely + try: + phi_crit = ref["params"]["stop"]["solid"]["phi_crit"] + except KeyError: + raise KeyError("phi_crit not found in ref_config.toml") + + return phi_crit + +def extract_solidification_time(cases_data: list, grid_dir: str | Path): + # Load phi_crit once + phi_crit = load_phi_crit(grid_dir) solidification_times = [] columns_printed = False for i, case in enumerate(cases_data): df = case['output_values'] - # Check if the required columns exist in the dataframe + if df is None: - solidification_times.append(np.nan) # Append NaN if no output values + solidification_times.append(np.nan) continue if 'Phi_global' in df.columns and 'Time' in df.columns: - solidification_params = case['init_parameters'].get('params.stop.solid.phi_crit') - if solidification_params is None or 'phi_crit' not in solidification_params: - raise ValueError(f"Error: 'phi_crit' not found in init_parameters of case {i}. ") - phi_crit = solidification_params['phi_crit'] - condition = df['Phi_global'] < phi_crit + if condition.any(): - first_index = condition.idxmax() - solid_time = df.loc[first_index, 'Time'] # Get the index of the time at which the condition is first satisfied - solidification_times.append(solid_time) + idx = condition.idxmax() + solidification_times.append(df.loc[idx, 'Time']) else: - solidification_times.append(np.nan) # Append NaN if condition is not satisfied + solidification_times.append(np.nan) + else: if not columns_printed: - print("Warning: 'Phi_global' and/or 'Time' columns not found in some cases.") - print(f"Available columns: {', '.join(df.columns)}") + print("Warning: Missing Phi_global or Time column.") + print("Columns available:", df.columns.tolist()) columns_printed = True - solidification_times.append(np.nan) # Append NaN if columns are missing + solidification_times.append(np.nan) - # Count the number of cases with a status = '10 Completed (solidified)' - status_10_cases = [case for case in cases_data if (case.get('status') or '').strip() == 'Completed (solidified)'] - completed_count = len(status_10_cases) - # Count only valid solidification times (non-NaN) - valid_solidification_times = [time for time in solidification_times if not np.isnan(time) and time > 0.0] - valid_solidified_count = len(valid_solidification_times) + return solidification_times - print('-----------------------------------------------------------') - print(f"Extracted solidification times (Phi_global < {phi_crit})") - print(f"→ Found {valid_solidified_count} valid solidified cases based on Phi_global") - print(f"→ Found {completed_count} cases with status 'Completed (solidified)' ") - # Check if the number of valid solidified cases matches the number of cases with status '10 Completed (solidified)' in the grid, to be sure the extraction is correct - if valid_solidified_count != completed_count: - print("WARNING: The number of valid solidified planets does not match the number of planets with status: 'Completed (solidified)'") - else: - print("Solidified planets count matches the number of planets with status: 'Completed (solidified)'.") - print('-----------------------------------------------------------') - return solidification_times +def export_simulation_summary(cases_data: list, grid_dir: str | Path, param_grid: dict, output_file: str | Path): + """ + Export a summary of simulation cases to a CSV file with tested grid parameters only. + + Parameters + ---------- + cases_data : list + List of dictionaries containing simulation data. + grid_dir : str or Path + Path to the grid folder containing `cfgs/case_XXXXXX.toml` files. + param_grid : dict + Dictionary of tested grid parameters (from get_grid_parameters_from_toml). + Only these parameters will be included from each case. + output_file : str or Path + Path to the output CSV file. + """ + grid_dir = Path(grid_dir) + output_file = Path(output_file) + + summary_rows = [] + tested_param_names = list(param_grid.keys()) + + for case_index, case in enumerate(cases_data): + row = {} + + # Case number and status + row['case_number'] = case_index + row['status'] = case['status'] + + # Load tested parameters from the case TOML + case_toml = grid_dir / f"cfgs/case_{case_index:06d}.toml" + if case_toml.exists(): + try: + case_params = toml.load(open(case_toml)) + for param in tested_param_names: + row[param] = case_params.get(param, None) + except Exception as e: + print(f"Warning: Could not read {case_toml}: {e}") + else: + print(f"Warning: TOML file not found for case {case_index}: {case_toml}") + + # Output values (last time step) + df = case['output_values'] + if df is not None: + for col in df.columns: + row[col] = df[col].iloc[-1] + + summary_rows.append(row) + + # Create DataFrame + summary_df = pd.DataFrame(summary_rows) + + # Save as tab-separated CSV + summary_df.to_csv(output_file, sep='\t', index=False) + print(f"Simulation summary exported to {output_file}") diff --git a/src/proteus/grid/test_new_pp_grid.py b/src/proteus/grid/test_new_pp_grid.py index 0ab3c2420..e6944ceeb 100644 --- a/src/proteus/grid/test_new_pp_grid.py +++ b/src/proteus/grid/test_new_pp_grid.py @@ -5,38 +5,32 @@ import post_processing_updated as pp # Point this to your actual grid folder -grid_path = Path("//projects/p315557/Paper_1/DATA/Grids/escape_grid_1Msun") +grid_name = "escape_grid_1Msun" +grid_path = Path(f"/projects/p315557/Paper_1/DATA/Grids/{grid_name}") # Test function load_grid_cases data = pp.load_grid_cases(grid_path) -print("\nFirst case keys:") -print(data[0].keys()) -print("\nFirst case status:") -print(data[0]["status"]) -print("\nFirst case init parameters (top level keys):") -print(data[0]["init_parameters"].keys()) -print("\nFirst case output dataframe head:") -print(data[0]["output_values"].head()) # Test function get_grid_parameters_from_toml -param_grid = pp.get_grid_parameters_from_toml('/home2/p315557/PROTEUS/input/ensembles/escape_grid_1Msun.toml') -print("\nExtracted grid parameters:\n") -for k, v in param_grid.items(): - print(f"{k} -> {v}") -print("\nTotal parameters found:", len(param_grid)) +param_grid = pp.get_grid_parameters_from_toml(grid_path) +print("\nExtracted grid parameters:", param_grid) # Test function extract_grid_output -test_parameter = "P_surf" # <-- change this to a real column -values = pp.extract_grid_output(data, test_parameter) -print("\nExtracted Values:") -print(values) -print("\nTotal extracted values:", len(values)) +output_get = "P_surf", "T_surf" +for output in output_get: + values = pp.extract_grid_output(data, output) + #print("\nExtracted output values for parameter", output) + #print(values) + +# Test function to extract Phi_crit +phi_crit_values = pp.load_phi_crit(grid_path) +#print("\nExtracted Phi_crit Values:") +#print(phi_crit_values) # Test function extract_solidification_time +solid_times = pp.extract_solidification_time(data, grid_path) +#print("\nExtracted Solidification Times:") +#print(solid_times) -try: - solid_times = pp.extract_solidification_time(data) - print("\nExtracted Solidification Times:") - print(solid_times) -except ValueError as e: - print(f"\nError caught: {e}") +# Test function extract_grid_output_at_solidificationcreate csv +pp.export_simulation_summary(data, grid_path, param_grid, "nogit_csv_output/simulation_summary.csv") From 32183d456cf78926cb0f28e1a6cb26fef592468a Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Mon, 15 Dec 2025 16:26:42 +0100 Subject: [PATCH 061/105] generate csv file following harrison guidelines --- src/proteus/grid/post_processing_updated.py | 336 +++++++++++++------- src/proteus/grid/test_new_pp_grid.py | 29 +- 2 files changed, 225 insertions(+), 140 deletions(-) diff --git a/src/proteus/grid/post_processing_updated.py b/src/proteus/grid/post_processing_updated.py index 80c6146d4..25589160a 100644 --- a/src/proteus/grid/post_processing_updated.py +++ b/src/proteus/grid/post_processing_updated.py @@ -1,8 +1,5 @@ from __future__ import annotations -import ast -import os -import re from pathlib import Path import numpy as np @@ -95,101 +92,66 @@ def load_grid_cases(grid_dir: Path): return combined_data -def get_grid_parameters_from_toml(grid_dir: str): +def get_tested_grid_parameters(cases_data: list, grid_dir: str | Path): """ - Extract grid parameter names and values from a PROTEUS ensemble TOML file. + Extract tested grid parameters per case using: + - copy.grid.toml to determine which parameters were varied + - init_parameters already loaded by load_grid_cases Parameters ---------- - toml_path : str - Path to the ensemble TOML file (e.g. input/ensembles/blabla.toml) + cases_data : list + Output of load_grid_cases. + grid_dir : str or Path + Path to the grid directory containing copy.grid.toml. Returns ------- - param_grid : dict - Dictionary where each key is a parameter name and the value is a list - of all grid values for that parameter. + case_params : dict + Dictionary mapping case index -> {parameter_name: value} + tested_params : dict + Dictionary of tested grid parameters and their grid values + (directly from copy.grid.toml) """ grid_dir = Path(grid_dir) - toml_path = grid_dir / "copy.grid.toml" - - if not os.path.exists(toml_path): - print(f"Error: TOML file not found at {toml_path}") - return {} - - with open(toml_path, "r") as f: - lines = f.readlines() - param_grid = {} + # --------------------------------------------------------- + # 1) Load tested grid parameter definitions + # --------------------------------------------------------- + tested_params = toml.load(grid_dir / "copy.grid.toml") - # Regex patterns - section_pattern = re.compile(r'^\s*\["(.+?)"\]\s*$') - method_pattern = re.compile(r'^\s*method\s*=\s*"(.+?)"\s*$') - values_pattern = re.compile(r'^\s*values\s*=\s*(\[.*\])\s*$') - start_pattern = re.compile(r'^\s*start\s*=\s*([0-9.eE+-]+)\s*$') - stop_pattern = re.compile(r'^\s*stop\s*=\s*([0-9.eE+-]+)\s*$') - count_pattern = re.compile(r'^\s*count\s*=\s*(\d+)\s*$') + # Keep only actual grid parameters (ignore control keys) + tested_params = { + k: v for k, v in tested_params.items() + if "." in k + } - current_param = None - current_method = None - start = stop = count = None + grid_param_paths = list(tested_params.keys()) - for line in lines: - line = line.strip() + # --------------------------------------------------------- + # 2) Extract those parameters from loaded cases + # --------------------------------------------------------- + case_params = {} - # Skip empty lines and comments - if not line or line.startswith("#"): - continue - - # Detect new parameter block - section_match = section_pattern.match(line) - if section_match: - current_param = section_match.group(1) - current_method = None - start = stop = count = None - continue + for idx, case in enumerate(cases_data): + params_for_case = {} + init_params = case["init_parameters"] - if current_param is None: - continue + for path in grid_param_paths: + keys = path.split(".") + val = init_params - # Detect method - method_match = method_pattern.match(line) - if method_match: - current_method = method_match.group(1) - continue - - # Direct method values - values_match = values_pattern.match(line) - if values_match and current_method == "direct": try: - values = ast.literal_eval(values_match.group(1)) - param_grid[current_param] = values - except Exception as e: - print(f"Error parsing values for {current_param}: {e}") - continue - - # Logspace parameters - start_match = start_pattern.match(line) - if start_match: - start = float(start_match.group(1)) - continue - - stop_match = stop_pattern.match(line) - if stop_match: - stop = float(stop_match.group(1)) - continue - - count_match = count_pattern.match(line) - if count_match: - count = int(count_match.group(1)) + for k in keys: + val = val[k] + params_for_case[path] = val + except (KeyError, TypeError): + params_for_case[path] = None - # Once all three exist, generate logspace - if current_method == "logspace" and start is not None and stop is not None: - values = np.logspace(np.log10(start), np.log10(stop), count).tolist() - param_grid[current_param] = values + case_params[idx] = params_for_case - return param_grid + return case_params def extract_grid_output(cases_data: list, parameter_name: str): """ @@ -281,59 +243,203 @@ def extract_solidification_time(cases_data: list, grid_dir: str | Path): return solidification_times - -def export_simulation_summary(cases_data: list, grid_dir: str | Path, param_grid: dict, output_file: str | Path): +def generate_summary_csv( + cases_data: list, + case_params: dict, + grid_dir: str | Path, + grid_name: str +): + """ + Export a summary of simulation cases to a TSV file using + already-loaded data only. + + Column order: + - case metadata + - tested grid parameters + - runtime_helpfile.csv (last timestep) + - solidification_time """ - Export a summary of simulation cases to a CSV file with tested grid parameters only. + # --------------------------------------------------------- + # Compute solidification times ONCE + # --------------------------------------------------------- + solidification_times = extract_solidification_time(cases_data, grid_dir) - Parameters - ---------- - cases_data : list - List of dictionaries containing simulation data. - grid_dir : str or Path - Path to the grid folder containing `cfgs/case_XXXXXX.toml` files. - param_grid : dict - Dictionary of tested grid parameters (from get_grid_parameters_from_toml). - Only these parameters will be included from each case. - output_file : str or Path - Path to the output CSV file. + summary_rows = [] + + for case_index, case in enumerate(cases_data): + row = {} + + # ----------------------------------------------------- + # Case metadata + # ----------------------------------------------------- + row["case_number"] = case_index + row["status"] = case["status"] + + # ----------------------------------------------------- + # Tested grid parameters + # ----------------------------------------------------- + params = case_params.get(case_index, {}) + for k, v in params.items(): + row[k] = v + + # ----------------------------------------------------- + # Output values (last timestep) + # ----------------------------------------------------- + df = case["output_values"] + if df is not None: + for col in df.columns: + row[col] = df[col].iloc[-1] + + # ----------------------------------------------------- + # Solidification time (LAST column) + # ----------------------------------------------------- + row["solidification_time"] = solidification_times[case_index] + + summary_rows.append(row) + + # --------------------------------------------------------- + # Create DataFrame and save + # --------------------------------------------------------- + summary_df = pd.DataFrame(summary_rows) + output_dir = grid_dir / "post_processing" / "extracted_data" + output_dir.mkdir(parents=True, exist_ok=True) + output_file = output_dir / f"{grid_name}_final_extracted_data_all.csv" + summary_df.to_csv(output_file, sep="\t", index=False) + +def generate_completed_summary_csv( + cases_data: list, + case_params: dict, + grid_dir: str | Path, + grid_name: str +): """ - grid_dir = Path(grid_dir) - output_file = Path(output_file) + Export a summary of simulation cases to a TSV file, but only + include cases whose status starts with 'completed'. + + Column order: + - case metadata + - tested grid parameters + - runtime_helpfile.csv (last timestep) + - solidification_time + """ + # --------------------------------------------------------- + # Compute solidification times ONCE + # --------------------------------------------------------- + solidification_times = extract_solidification_time(cases_data, grid_dir) summary_rows = [] - tested_param_names = list(param_grid.keys()) for case_index, case in enumerate(cases_data): + status = case.get("status", "").lower() + if not status.startswith("completed"): + continue # skip non-completed cases + row = {} - # Case number and status - row['case_number'] = case_index - row['status'] = case['status'] + # ----------------------------------------------------- + # Case metadata + # ----------------------------------------------------- + row["case_number"] = case_index + row["status"] = case["status"] + + # ----------------------------------------------------- + # Tested grid parameters + # ----------------------------------------------------- + params = case_params.get(case_index, {}) + for k, v in params.items(): + row[k] = v + + # ----------------------------------------------------- + # Output values (last timestep) + # ----------------------------------------------------- + df = case["output_values"] + if df is not None: + for col in df.columns: + row[col] = df[col].iloc[-1] - # Load tested parameters from the case TOML - case_toml = grid_dir / f"cfgs/case_{case_index:06d}.toml" - if case_toml.exists(): - try: - case_params = toml.load(open(case_toml)) - for param in tested_param_names: - row[param] = case_params.get(param, None) - except Exception as e: - print(f"Warning: Could not read {case_toml}: {e}") - else: - print(f"Warning: TOML file not found for case {case_index}: {case_toml}") + # ----------------------------------------------------- + # Solidification time (LAST column) + # ----------------------------------------------------- + row["solidification_time"] = solidification_times[case_index] - # Output values (last time step) - df = case['output_values'] + summary_rows.append(row) + + # --------------------------------------------------------- + # Create DataFrame and save + # --------------------------------------------------------- + summary_df = pd.DataFrame(summary_rows) + output_dir = grid_dir / "post_processing" / "extracted_data" + output_dir.mkdir(parents=True, exist_ok=True) + + # Updated CSV name to indicate filtered results + output_file = output_dir / f"{grid_name}_final_extracted_data_completed.csv" + summary_df.to_csv(output_file, sep="\t", index=False) + +def generate_running_error_summary_csv( + cases_data: list, + case_params: dict, + grid_dir: str | Path, + grid_name: str +): + """ + Export a summary of simulation cases to a TSV file, but only + include cases whose status starts with 'Running' or 'Error'. + + Column order: + - case metadata + - tested grid parameters + - runtime_helpfile.csv (last timestep) + - solidification_time + """ + # --------------------------------------------------------- + # Compute solidification times ONCE + # --------------------------------------------------------- + solidification_times = extract_solidification_time(cases_data, grid_dir) + + summary_rows = [] + + for case_index, case in enumerate(cases_data): + status = case.get("status", "").lower() + if not (status.startswith("running") or status.startswith("error")): + continue # skip cases that are neither running nor error + + row = {} + + # ----------------------------------------------------- + # Case metadata + # ----------------------------------------------------- + row["case_number"] = case_index + row["status"] = case["status"] + + # ----------------------------------------------------- + # Tested grid parameters + # ----------------------------------------------------- + params = case_params.get(case_index, {}) + for k, v in params.items(): + row[k] = v + + # ----------------------------------------------------- + # Output values (last timestep) + # ----------------------------------------------------- + df = case["output_values"] if df is not None: for col in df.columns: row[col] = df[col].iloc[-1] + # ----------------------------------------------------- + # Solidification time (LAST column) + # ----------------------------------------------------- + row["solidification_time"] = solidification_times[case_index] + summary_rows.append(row) - # Create DataFrame + # --------------------------------------------------------- + # Create DataFrame and save + # --------------------------------------------------------- summary_df = pd.DataFrame(summary_rows) + output_dir = grid_dir / "post_processing" / "extracted_data" + output_dir.mkdir(parents=True, exist_ok=True) - # Save as tab-separated CSV - summary_df.to_csv(output_file, sep='\t', index=False) - print(f"Simulation summary exported to {output_file}") + # Updated CSV name to indicate filtered results + output_file = output_dir / f"{grid_name}_final_extracted_data_running_error.csv" + summary_df.to_csv(output_file, sep="\t", index=False) diff --git a/src/proteus/grid/test_new_pp_grid.py b/src/proteus/grid/test_new_pp_grid.py index e6944ceeb..ca1aac2c8 100644 --- a/src/proteus/grid/test_new_pp_grid.py +++ b/src/proteus/grid/test_new_pp_grid.py @@ -8,29 +8,8 @@ grid_name = "escape_grid_1Msun" grid_path = Path(f"/projects/p315557/Paper_1/DATA/Grids/{grid_name}") -# Test function load_grid_cases data = pp.load_grid_cases(grid_path) - -# Test function get_grid_parameters_from_toml -param_grid = pp.get_grid_parameters_from_toml(grid_path) -print("\nExtracted grid parameters:", param_grid) - -# Test function extract_grid_output -output_get = "P_surf", "T_surf" -for output in output_get: - values = pp.extract_grid_output(data, output) - #print("\nExtracted output values for parameter", output) - #print(values) - -# Test function to extract Phi_crit -phi_crit_values = pp.load_phi_crit(grid_path) -#print("\nExtracted Phi_crit Values:") -#print(phi_crit_values) - -# Test function extract_solidification_time -solid_times = pp.extract_solidification_time(data, grid_path) -#print("\nExtracted Solidification Times:") -#print(solid_times) - -# Test function extract_grid_output_at_solidificationcreate csv -pp.export_simulation_summary(data, grid_path, param_grid, "nogit_csv_output/simulation_summary.csv") +case_params = pp.get_tested_grid_parameters(data,grid_path) +pp.generate_summary_csv(data,case_params,grid_path,grid_name) +pp.generate_completed_summary_csv(data,case_params,grid_path,grid_name) +pp.generate_running_error_summary_csv(data,case_params,grid_path,grid_name) From 145ce1c9b0fcfa616f2aa860f3ba2687cca77658 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Mon, 15 Dec 2025 17:06:40 +0100 Subject: [PATCH 062/105] plot status grid ok --- src/proteus/grid/post_processing_updated.py | 95 ++++++++++++++++++++- src/proteus/grid/test_new_pp_grid.py | 10 ++- 2 files changed, 100 insertions(+), 5 deletions(-) diff --git a/src/proteus/grid/post_processing_updated.py b/src/proteus/grid/post_processing_updated.py index 25589160a..ddb752355 100644 --- a/src/proteus/grid/post_processing_updated.py +++ b/src/proteus/grid/post_processing_updated.py @@ -2,10 +2,15 @@ from pathlib import Path +import matplotlib.pyplot as plt import numpy as np import pandas as pd +import seaborn as sns import toml +# --------------------------------------------------------- +# Data loading and processing functions +# --------------------------------------------------------- def load_grid_cases(grid_dir: Path): """ @@ -151,7 +156,7 @@ def get_tested_grid_parameters(cases_data: list, grid_dir: str | Path): case_params[idx] = params_for_case - return case_params + return case_params, tested_params def extract_grid_output(cases_data: list, parameter_name: str): """ @@ -443,3 +448,91 @@ def generate_running_error_summary_csv( # Updated CSV name to indicate filtered results output_file = output_dir / f"{grid_name}_final_extracted_data_running_error.csv" summary_df.to_csv(output_file, sep="\t", index=False) + +# --------------------------------------------------------- +# Plotting functions +# --------------------------------------------------------- + +def plot_grid_status(cases_data: list, grid_dir: str | Path, grid_name: str): + """ + Plot the status of simulations from the PROTEUS grid with improved x-axis readability. + + Parameters + ---------- + cases_data : list + List of dictionaries returned by `load_grid_cases`. + + plot_dir : Path + Path to the plots directory. + + grid_name : str + Name of the grid, used for the plot title. + + status_colors : dict, optional + A dictionary mapping statuses to specific colors. If None, a default palette is used. + """ + + # Extract and clean statuses + statuses = [case.get('status', 'Unknown') for case in cases_data] + statuses = pd.Series(statuses, name='Status') + status_counts = statuses.value_counts().sort_values(ascending=False) + + # Set colors for the bars + palette = sns.color_palette("Accent", len(status_counts)) + formatted_status_keys = [s.replace(" (", " \n (") for s in status_counts.index] + palette = dict(zip(formatted_status_keys, palette)) + + # Prepare dataframe for plotting + plot_df = pd.DataFrame({ + 'Status': formatted_status_keys, + 'Count': status_counts.values + }) + + plt.figure(figsize=(11, 7)) + ax = sns.barplot( + data=plot_df, + x='Status', + y='Count', + hue='Status', + palette=palette, + dodge=False, + edgecolor='black' + ) + + # Remove legend if it was created + if ax.legend_: + ax.legend_.remove() + + # Add value labels above bars + total_simulations = len(cases_data) + for i, count in enumerate(status_counts.values): + percentage = (count / total_simulations) * 100 + ax.text( + i, count + 1, + f"{count} ({percentage:.1f}%)", + ha='center', va='bottom', fontsize=14 + ) + + # Boxed total in upper right + plt.gca().text( + 0.97, 0.94, + f"Total number of simulations : {total_simulations}", + transform=plt.gca().transAxes, + ha='right', va='top', + fontsize=16, + bbox=dict(boxstyle="round,pad=0.3", fc="white", ec="black", lw=1) + ) + + plt.grid(alpha=0.2, axis='y') + plt.title(f"Simulation status summary for grid {grid_name}", fontsize=16) + plt.xlabel("Simulation status", fontsize=16) + plt.ylabel("Number of simulations", fontsize=16) + plt.yticks(fontsize=14) + plt.xticks(fontsize=14) + plt.tight_layout() + + output_dir = grid_dir / "post_processing" / "grid_plots" + output_dir.mkdir(parents=True, exist_ok=True) + output_file = output_dir / f"{grid_name}_summary_grid_statuses.png" + plt.savefig(output_file, dpi=300) + plt.close() diff --git a/src/proteus/grid/test_new_pp_grid.py b/src/proteus/grid/test_new_pp_grid.py index ca1aac2c8..ad0f2fc13 100644 --- a/src/proteus/grid/test_new_pp_grid.py +++ b/src/proteus/grid/test_new_pp_grid.py @@ -9,7 +9,9 @@ grid_path = Path(f"/projects/p315557/Paper_1/DATA/Grids/{grid_name}") data = pp.load_grid_cases(grid_path) -case_params = pp.get_tested_grid_parameters(data,grid_path) -pp.generate_summary_csv(data,case_params,grid_path,grid_name) -pp.generate_completed_summary_csv(data,case_params,grid_path,grid_name) -pp.generate_running_error_summary_csv(data,case_params,grid_path,grid_name) +# case_params, tested_params = pp.get_tested_grid_parameters(data,grid_path) +# pp.generate_summary_csv(data,case_params,grid_path,grid_name) +# pp.generate_completed_summary_csv(data,case_params,grid_path,grid_name) +# pp.generate_running_error_summary_csv(data,case_params,grid_path,grid_name) + +pp.plot_grid_status(data,grid_path,grid_name) From 6d12a5397c825238bd155fd8a451a96a9c825ead Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Mon, 15 Dec 2025 19:04:16 +0100 Subject: [PATCH 063/105] almost there for ecdf --- src/proteus/grid/post_processing_updated.py | 222 +++++++++++++++++++- src/proteus/grid/test_new_pp_grid.py | 73 ++++++- 2 files changed, 279 insertions(+), 16 deletions(-) diff --git a/src/proteus/grid/post_processing_updated.py b/src/proteus/grid/post_processing_updated.py index ddb752355..942f9074c 100644 --- a/src/proteus/grid/post_processing_updated.py +++ b/src/proteus/grid/post_processing_updated.py @@ -2,6 +2,7 @@ from pathlib import Path +import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd @@ -124,13 +125,14 @@ def get_tested_grid_parameters(cases_data: list, grid_dir: str | Path): # --------------------------------------------------------- # 1) Load tested grid parameter definitions # --------------------------------------------------------- - tested_params = toml.load(grid_dir / "copy.grid.toml") + raw_params = toml.load(grid_dir / "copy.grid.toml") - # Keep only actual grid parameters (ignore control keys) - tested_params = { - k: v for k, v in tested_params.items() - if "." in k - } + # Keep only the parameters and their values (ignore 'method' keys) + tested_params = {} + for key, value in raw_params.items(): + if isinstance(value, dict) and "values" in value: + # Only store the 'values' list + tested_params[key] = value["values"] grid_param_paths = list(tested_params.keys()) @@ -507,8 +509,9 @@ def plot_grid_status(cases_data: list, grid_dir: str | Path, grid_name: str): total_simulations = len(cases_data) for i, count in enumerate(status_counts.values): percentage = (count / total_simulations) * 100 + offset = 0.005 * status_counts.max() # 1% of the max count ax.text( - i, count + 1, + i, count + offset, f"{count} ({percentage:.1f}%)", ha='center', va='bottom', fontsize=14 ) @@ -519,8 +522,7 @@ def plot_grid_status(cases_data: list, grid_dir: str | Path, grid_name: str): f"Total number of simulations : {total_simulations}", transform=plt.gca().transAxes, ha='right', va='top', - fontsize=16, - bbox=dict(boxstyle="round,pad=0.3", fc="white", ec="black", lw=1) + fontsize=16 ) plt.grid(alpha=0.2, axis='y') @@ -533,6 +535,206 @@ def plot_grid_status(cases_data: list, grid_dir: str | Path, grid_name: str): output_dir = grid_dir / "post_processing" / "grid_plots" output_dir.mkdir(parents=True, exist_ok=True) - output_file = output_dir / f"{grid_name}_summary_grid_statuses.png" + output_file = output_dir / f"summary_grid_statuses_{grid_name}.png" plt.savefig(output_file, dpi=300) plt.close() + +def group_output_by_parameter(df,grid_parameters,outputs): + """ + Groups output values (like solidification times) by a specific grid parameter. + + Parameters + ---------- + df : pd.DataFrame + DataFrame containing simulation results including value of the grid parameter and the corresponding extracted output. + + grid_parameters : str + Column name of the grid parameter to group by (like 'escape.zephyrus.Pxuv'). + + outputs : str + Column name of the output to extract (like 'solidification_time'). + + Returns + ------- + dict + Dictionary where each key is of the form '[output]_per_[parameter]', and each value is a dict {param_value: [output_values]}. + """ + grouped = {} + + for param in grid_parameters: + for output in outputs: + key_name = f"{output}_per_{param}" + value_dict = {} + for param_value in df[param].dropna().unique(): + subset = df[df[param] == param_value] + output_values = subset[output].replace([np.inf, -np.inf], np.nan) + output_values = output_values.dropna() + output_values = output_values[output_values > 0] # Remove zeros and negatives + + value_dict[param_value] = output_values + + grouped[key_name] = value_dict + + return grouped + +def ecdf_grid_plot(grid_params: dict, grouped_data: dict, param_settings: dict, output_settings: dict, grid_dir: str | Path, grid_name: str): + """ + Creates a grid of ECDF plots where each row corresponds to one input parameter + and each column corresponds to one output. Saves the resulting figure as a PNG. + + Parameters + ---------- + + grid_params : dict + A mapping from parameter names (e.g. "orbit.semimajoraxis") to arrays/lists of tested values. + + grouped_data : dict + Dictionary where each key is of the form '[output]_per_[parameter]', and each value is a dict {param_value: [output_values]}. + + param_settings : dict + For each input-parameter key, a dict containing: + - "label": label of the colormap for the corresponding input parameter + - "colormap": a matplotlib colormap (e.g. mpl.cm.plasma) + - "log_scale": bool, whether to color-normalize on a log scale + + output_settings : dict + For each output key, a dict containing: + - "label": label of the x-axis for the corresponding output quantity + - "log_scale": bool, whether to plot the x-axis on log scale + - "scale": float, a factor to multiply raw values by before plotting + + plots_path : str + Path to the grid where to create "single_plots_ecdf" and save all .png plots + """ + # List of parameter names (rows) and output names (columns) + param_names = list(param_settings.keys()) + out_names = list(output_settings.keys()) + + # Create subplot grid: rows = parameters, columns = outputs + n_rows = len(param_names) + n_cols = len(out_names) + fig, axes = plt.subplots(n_rows, n_cols, figsize=(4 * n_cols, 2.5 * n_rows), squeeze=False) + + # Loop through parameters (rows) and outputs (columns) + for i, param_name in enumerate(param_names): + tested_param = grid_params.get(param_name, []) + if not tested_param: + print(f"⚠️ Skipping {param_name} — no tested values found in grid_params") + continue + + settings = param_settings[param_name] + + # Determine coloring + is_numeric = np.issubdtype(np.array(tested_param).dtype, np.number) + if is_numeric: + vmin, vmax = min(tested_param), max(tested_param) + if vmin == vmax: + # avoid log/normalize errors with constant values + vmin, vmax = vmin - 1e-9, vmax + 1e-9 + if settings.get("log_scale", False): + norm = mpl.colors.LogNorm(vmin=vmin, vmax=vmax) + else: + norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax) + def color_func(v): + return settings["colormap"](norm(v)) + colorbar_needed = True + else: + unique_vals = sorted(set(tested_param)) + cmap = mpl.colormaps.get_cmap(settings["colormap"]).resampled(len(unique_vals)) + color_map = {val: cmap(j) for j, val in enumerate(unique_vals)} + def color_func(v): + return color_map[v] + colorbar_needed = False + + for j, output_name in enumerate(out_names): + ax = axes[i][j] + out_settings = output_settings[output_name] + + # Add panel number in upper-left corner + panel_number = i * n_cols + j + 1 # number of panels left-to-right, top-to-bottom + ax.text( + 0.02, 0.98, # relative position in axes coordinates + str(panel_number), # text to display + transform=ax.transAxes, # use axis-relative coordinates + fontsize=18, + fontweight='bold', + va='top', # vertical alignment + ha='left', # horizontal alignment + color='black' + ) + + # Plot one ECDF per tested parameter value + for val in tested_param: + data_key = f"{output_name}_per_{param_name}" + if val not in grouped_data.get(data_key, {}): + continue + raw = np.array(grouped_data[data_key][val]) * out_settings.get("scale", 1.0) + # Plot ECDf if output == df['H_kg_atm'] then plot only values > 1e10 AND psurf > 1 bar + if output_name.endswith('_kg_atm'): + raw = np.clip(raw, 1e15, None) + elif output_name.endswith('P_surf'): + raw = np.clip(raw, 1, None) + else: + raw = raw + + sns.ecdfplot( + data=raw, + log_scale=out_settings.get("log_scale", False), + color=color_func(val), + linewidth=4, + linestyle='-', + ax=ax + ) + + # Configure x-axis labels, ticks, grids + if i == n_rows - 1: + ax.set_xlabel(out_settings["label"], fontsize=22) + ax.xaxis.set_label_coords(0.5, -0.3) + ax.tick_params(axis='x', labelsize=22) + else: + ax.tick_params(axis='x', labelbottom=False) + + # Configure y-axis (shared label added later) + if j == 0: + ax.set_ylabel("") + ticks = [0.0, 0.5, 1.0] + ax.set_yticks(ticks) + ax.tick_params(axis='y', labelsize=22) + else: + ax.set_ylabel("") + ax.set_yticks(ticks) + ax.tick_params(axis='y', labelleft=False) + + ax.grid(alpha=0.4) + + # After plotting all outputs for this parameter (row), add colorbar or legend + if colorbar_needed: + sm = mpl.cm.ScalarMappable(cmap=settings["colormap"], norm=norm) + # attach the colorbar to the right‐most subplot in row i: + rightmost_ax = axes[i, -1] + cbar = fig.colorbar(sm,ax=rightmost_ax,pad=0.03,aspect=10) + cbar.set_label(settings["label"], fontsize=24) + # This is for plot 0.194Msun + # if param_name == "orbit.semimajoraxis": + # cbar.ax.yaxis.set_label_coords(9.5, 0.5) + # else: + # cbar.ax.yaxis.set_label_coords(6, 0.5) + # This is for 1Msun + cbar.ax.yaxis.set_label_coords(6, 0.5) + ticks = sorted(set(tested_param)) + cbar.set_ticks(ticks) + cbar.ax.tick_params(labelsize=22) + else: + handles = [mpl.lines.Line2D([0], [0], color=color_map[val], lw=4, label=str(val)) for val in unique_vals] + ax.legend(handles=handles, fontsize=24,bbox_to_anchor=(1.01, 1), loc='upper left') + + # Add a single, shared y-axis label + fig.text(0.04, 0.5, 'Normalized cumulative fraction of simulations', va='center', rotation='vertical', fontsize=40) + + # Tweak layout and save + plt.tight_layout(rect=[0.08, 0.02, 1, 0.97]) + output_dir = grid_dir / "post_processing" / "grid_plots" + output_dir.mkdir(parents=True, exist_ok=True) + output_file = output_dir / f"ecdf_grid_plot_{grid_name}.png" + fig.savefig(output_file, dpi=300) + plt.close(fig) diff --git a/src/proteus/grid/test_new_pp_grid.py b/src/proteus/grid/test_new_pp_grid.py index ad0f2fc13..f265a894d 100644 --- a/src/proteus/grid/test_new_pp_grid.py +++ b/src/proteus/grid/test_new_pp_grid.py @@ -2,16 +2,77 @@ from pathlib import Path +import matplotlib.cm as cm +import pandas as pd import post_processing_updated as pp -# Point this to your actual grid folder grid_name = "escape_grid_1Msun" grid_path = Path(f"/projects/p315557/Paper_1/DATA/Grids/{grid_name}") +# grid_name= "toi561b_grid" +# grid_path = Path(f"/projects/p315557/TOI-561b/{grid_name}") + +# grid_name= "escape_on_off_janus_agni_1Msun" +# grid_path = Path(f"/projects/p315557/Paper_1/DATA/Comparison_escape_on_off/{grid_name}") + data = pp.load_grid_cases(grid_path) -# case_params, tested_params = pp.get_tested_grid_parameters(data,grid_path) -# pp.generate_summary_csv(data,case_params,grid_path,grid_name) -# pp.generate_completed_summary_csv(data,case_params,grid_path,grid_name) -# pp.generate_running_error_summary_csv(data,case_params,grid_path,grid_name) +input_param_grid_per_case, tested_params_grid = pp.get_tested_grid_parameters(data,grid_path) +#pp.generate_summary_csv(data,case_params,grid_path,grid_name) +#pp.generate_completed_summary_csv(data,case_params,grid_path,grid_name) +#pp.generate_running_error_summary_csv(data,case_params,grid_path,grid_name) + +#pp.plot_grid_status(data,grid_path,grid_name) +rows = [] +for case_index, case in enumerate(data): + row = {} + + # Add input parameters + init_params = case['init_parameters'] + for k, v in init_params.items(): + row[k] = v + + # Add outputs (last timestep) + df_out = case['output_values'] + if df_out is not None: + last_row = df_out.iloc[-1].to_dict() + row.update(last_row) + + # Add status + row['status'] = case['status'] + + rows.append(row) + +df = pd.DataFrame(rows) +def flatten_nested_columns(df): + flat_data = {} + for col in df.columns: + if isinstance(df[col].iloc[0], dict): + # Expand each dictionary into separate columns + nested_df = pd.json_normalize(df[col]) + nested_df.index = df.index + for nested_col in nested_df.columns: + flat_data[f"{col}.{nested_col}"] = nested_df[nested_col] + else: + flat_data[col] = df[col] + return pd.DataFrame(flat_data) +df_flat = flatten_nested_columns(df) +grouped_data = pp.group_output_by_parameter(df_flat, tested_params_grid, ['T_surf']) + + +param_settings_grid = { + "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.Paired, "log_scale": False}, + "orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.plasma, "log_scale": False}, + "escape.zephyrus.efficiency": {"label": r"$\rm \epsilon$", "colormap": cm.spring, "log_scale": False}, + "escape.zephyrus.Pxuv": {"label": r"P$_{\rm XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, + "outgas.fO2_shift_IW": {"label": r"$\rm \log_{10} fO_2 [IW]$", "colormap": cm.coolwarm, "log_scale": False}, + "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, + "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.winter, "log_scale": False}} +output_settings_grid = { + 'solidification_time': {"label": "Solidification [yr]", "log_scale": True, "scale": 1.0}, + 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, + 'T_surf': {"label": r"T$_{\rm surf}$ [$10^3$ K]", "log_scale": False, "scale": 1.0/1000.0}, + 'P_surf': {"label": r"P$_{\rm surf}$ [bar]", "log_scale": True, "scale": 1.0}, + 'atm_kg_per_mol': {"label": "MMW [g/mol]", "log_scale": False, "scale": 1000.0}, + 'esc_rate_total': {"label": "Escape rate [kg/s]", "log_scale": True, "scale": 1.0}} -pp.plot_grid_status(data,grid_path,grid_name) +pp.ecdf_grid_plot(tested_params_grid, grouped_data, param_settings_grid, output_settings_grid, grid_path, grid_name) From 3f3d641bcbaab023271ecf6926d00c9a37d091c6 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 17 Dec 2025 19:36:34 +0100 Subject: [PATCH 064/105] create toml grid analyze and use it for ecdf plot, clean and update functions comments --- input/ensembles/escape_grid_1Msun.toml | 70 +++ input/ensembles/example.grid_analyse.toml | 85 +++ input/ensembles/toi561b_grid_a_0_0106AU.toml | 4 +- src/proteus/grid/post_processing_updated.py | 583 +++++++++++-------- src/proteus/grid/run_grid_analysis.py | 16 +- src/proteus/grid/test_grid_analyze.py | 61 ++ src/proteus/grid/test_new_pp_grid.py | 80 ++- 7 files changed, 601 insertions(+), 298 deletions(-) create mode 100644 input/ensembles/example.grid_analyse.toml create mode 100644 src/proteus/grid/test_grid_analyze.py diff --git a/input/ensembles/escape_grid_1Msun.toml b/input/ensembles/escape_grid_1Msun.toml index 445d385d2..fd8bd207f 100644 --- a/input/ensembles/escape_grid_1Msun.toml +++ b/input/ensembles/escape_grid_1Msun.toml @@ -57,3 +57,73 @@ max_mem = 3 # maximum memory per CPU in GB (e.g. 3) ["delivery.elements.H_oceans"] method = "direct" values = [1.0, 5.0, 10.0] + +# Plotting configuration for ECDF +[plot.input_parameters] + + [plot.parameters.atmos_clim.module] + label = "Atmosphere module" + colormap = "viridis" + log_scale = false + + [plot.parameters.orbit.semimajoraxis] + label = "a [AU]" + colormap = "viridis" + log_scale = false + + [plot.parameters.escape.zephyrus.efficiency] + label = "\\rm \\epsilon" + colormap = "viridis" + log_scale = false + + [plot.parameters.escape.zephyrus.Pxuv] + label = "P_{\\rm XUV} [bar]" + colormap = "viridis" + log_scale = true + + [plot.parameters.outgas.fO2_shift_IW] + label = "\\rm \\log_{10} fO_2 [IW]" + colormap = "viridis" + log_scale = false + + [plot.parameters.delivery.elements.CH_ratio] + label = "C/H ratio" + colormap = "viridis" + log_scale = false + + [plot.parameters.delivery.elements.H_oceans] + label = "[H] [oceans]" + colormap = "viridis" + log_scale = false + +[plot.outputs] + + [plot.outputs.solidification_time] + label = "Solidification [yr]" + log_scale = true + scale = 1.0 + + [plot.outputs.Phi_global] + label = "Melt fraction [%]" + log_scale = false + scale = 100.0 + + [plot.outputs.T_surf] + label = "T_{\\rm surf} [10^3 K]" + log_scale = false + scale = 0.001 + + [plot.outputs.P_surf] + label = "P_{\\rm surf} [bar]" + log_scale = true + scale = 1.0 + + [plot.outputs.atm_kg_per_mol] + label = "MMW [g/mol]" + log_scale = false + scale = 1000.0 + + [plot.outputs.esc_rate_total] + label = "Escape rate [kg/s]" + log_scale = true + scale = 1.0 diff --git a/input/ensembles/example.grid_analyse.toml b/input/ensembles/example.grid_analyse.toml new file mode 100644 index 000000000..0f1f64069 --- /dev/null +++ b/input/ensembles/example.grid_analyse.toml @@ -0,0 +1,85 @@ +# Config file for grid post-processing analysis and plotting + +# Path to grid folder +grid_path = "/projects/p315557/Paper_1/DATA/Grids/escape_grid_1Msun/" + +# Post-processing options +update_csv = true # Whether to update the summary CSV file before plotting +plot_status = true # Generate status summary plots of the grid +plot_ecdf = true # Generate ECDF grid plot for input parameters and output + +# Input parameters configuration for ECDF plot (1 row per input parameter) +[input_parameters] + + colormap = "viridis" # Default colormap for all input parameters + + [input_parameters.atmos_clim.module] + label = "Atmosphere module" # label of the input parameter + log_scale = false # whether to use log scale or not for colorbar depending on parameters range + + [input_parameters.orbit.semimajoraxis] + label = "a [AU]" + log_scale = false + + [input_parameters.escape.zephyrus.efficiency] + label = "\\rm \\epsilon" # TOML file cannot read Latex command, so use double backslash + log_scale = false + + [input_parameters.escape.zephyrus.Pxuv] + label = "P_{\\rm XUV}\\,[\\mathrm{bar}]" + log_scale = true + + [input_parameters.outgas.fO2_shift_IW] + label = "\\log_{10} fO_2\\,[\\Delta IW]" + log_scale = false + + [input_parameters.delivery.elements.CH_ratio] + label = "C/H ratio" + log_scale = false + + [input_parameters.delivery.elements.H_oceans] + label = "[H] [oceans]" + log_scale = false + +# Output variables configuration for ECDF plot (1 column per output variable) +[output_variables] + + [output_variables.solidification_time] + label = "Solidification [yr]" # label of the output variable on x-axis + log_scale = true # whether to use log scale or not on x-axis + scale = 1.0 # scaling factor to apply to the output variable values + + [output_variables.Phi_global] + label = "Melt fraction [%]" + log_scale = false + scale = 100.0 # convert melt fraction to percentage + + [output_variables.T_surf] + label = "T_{\\rm surf}\\,[10^{3}\\,\\mathrm{K}]" + log_scale = false + scale = 0.001 # convert K to 10^3 K for beyter readability on plot + + [output_variables.P_surf] + label = "P_{\\rm surf}\\,[\\mathrm{bar}]" + log_scale = true + scale = 1.0 + + [output_variables.atm_kg_per_mol] + label = "MMW [g/mol]" + log_scale = false + scale = 1000.0 # convert kg/mol to g/mol + + [output_variables.esc_rate_total] + label = "Escape rate [kg/s]" + log_scale = true + scale = 1.0 + + [output_variables.H2O_kg_atm] + label = "\\ H_{2}O_\\mathrm{atm} [kg]" + log_scale = true + scale = 1.0 + + [output_variables.C_kg_atm] + label = "C_{\\rm atm} [kg]" + log_scale = true + scale = 1.0 diff --git a/input/ensembles/toi561b_grid_a_0_0106AU.toml b/input/ensembles/toi561b_grid_a_0_0106AU.toml index 9f4f4c3d7..3767956fd 100644 --- a/input/ensembles/toi561b_grid_a_0_0106AU.toml +++ b/input/ensembles/toi561b_grid_a_0_0106AU.toml @@ -59,11 +59,11 @@ max_mem = 3 # maximum memory per CPU in GB (e.g. 3) # Planet bulk C/H ratio set directly ["delivery.elements.CH_ratio"] method = "direct" - values = [0.1, 1.0, 2.0] + values = [0.1, 2.0] # 1.0 out # Planet bulk S/H ratio set directly ["delivery.elements.SH_ratio"] method = "direct" - values = [0.216, 2.16, 21.6] + values = [0.216, 21.6] # 2.16 out # Stellar rotation period to test ? diff --git a/src/proteus/grid/post_processing_updated.py b/src/proteus/grid/post_processing_updated.py index 942f9074c..cedf5a6b3 100644 --- a/src/proteus/grid/post_processing_updated.py +++ b/src/proteus/grid/post_processing_updated.py @@ -3,6 +3,7 @@ from pathlib import Path import matplotlib as mpl +import matplotlib.cm as cm import matplotlib.pyplot as plt import numpy as np import pandas as pd @@ -10,9 +11,28 @@ import toml # --------------------------------------------------------- -# Data loading and processing functions +# Data loading, extraction, and CSV generation functions # --------------------------------------------------------- +def get_grid_name(grid_path: str | Path) -> str: + """ + Returns the grid name (last part of the path) from the given grid path. + + Parameters + ---------- + grid_path : str or Path + Full path to the grid directory. + + Returns + ------- + grid_name : str + Name of the grid directory. + """ + grid_path = Path(grid_path) + if not grid_path.is_dir(): + raise ValueError(f"{grid_path} is not a valid directory") + return grid_path.name + def load_grid_cases(grid_dir: Path): """ Load information for each simulation of a PROTEUS grid. @@ -28,7 +48,7 @@ def load_grid_cases(grid_dir: Path): ---------- combined_data : list List of dictionaries, each containing: - - 'init_parameters' (dict): Parameters loaded from `init_coupler.toml`. + - 'init_parameters' (dict): All input parameters loaded from `init_coupler.toml`. - 'output_values' (pandas.DataFrame): Data from `runtime_helpfile.csv`. - 'status' (str): Status string from the `status` file, or 'Unknown' if unavailable. """ @@ -53,7 +73,7 @@ def load_grid_cases(grid_dir: Path): except Exception as e: print(f"Error reading init file in {case.name}: {e}") - # Read runtime_helpfile.csv if available + # Read runtime_helpfile.csv df = None if runtime_file.exists(): try: @@ -61,7 +81,7 @@ def load_grid_cases(grid_dir: Path): except Exception as e: print(f"WARNING : Error reading runtime_helpfile.csv for {case.name}: {e}") - # Read status file if available + # Read status file status = 'Unknown' if status_file.exists(): try: @@ -84,10 +104,9 @@ def load_grid_cases(grid_dir: Path): 'status' : status }) - # + # Print summary of statuses statuses = [c['status'] for c in combined_data] status_counts = pd.Series(statuses).value_counts().sort_values(ascending=False) - print('-----------------------------------------------------------') print(f"Total number of simulations: {len(statuses)}") print('-----------------------------------------------------------') @@ -101,8 +120,8 @@ def load_grid_cases(grid_dir: Path): def get_tested_grid_parameters(cases_data: list, grid_dir: str | Path): """ Extract tested grid parameters per case using: - - copy.grid.toml to determine which parameters were varied - - init_parameters already loaded by load_grid_cases + - copy.grid.toml to determine which parameters were varied in the grid + - init_parameters already loaded by load_grid_cases for each simulation of the grid Parameters ---------- @@ -116,15 +135,12 @@ def get_tested_grid_parameters(cases_data: list, grid_dir: str | Path): case_params : dict Dictionary mapping case index -> {parameter_name: value} tested_params : dict - Dictionary of tested grid parameters and their grid values - (directly from copy.grid.toml) + Dictionary of tested grid parameters and their grid values (directly from copy.grid.toml) """ grid_dir = Path(grid_dir) - # --------------------------------------------------------- - # 1) Load tested grid parameter definitions - # --------------------------------------------------------- + # 1. Load tested input parameters in the grid raw_params = toml.load(grid_dir / "copy.grid.toml") # Keep only the parameters and their values (ignore 'method' keys) @@ -136,9 +152,7 @@ def get_tested_grid_parameters(cases_data: list, grid_dir: str | Path): grid_param_paths = list(tested_params.keys()) - # --------------------------------------------------------- - # 2) Extract those parameters from loaded cases - # --------------------------------------------------------- + # 2.Extract those parameters from loaded cases for each case of the grid case_params = {} for idx, case in enumerate(cases_data): @@ -160,48 +174,59 @@ def get_tested_grid_parameters(cases_data: list, grid_dir: str | Path): return case_params, tested_params -def extract_grid_output(cases_data: list, parameter_name: str): - """ - Extract a specific parameter from the 'output_values' of each simulation case. +# def extract_grid_output(cases_data: list, parameter_name: str): +# """ +# Extract a specific output for each simulation of the grid at the last time step. + +# Parameters +# ---------- +# cases_data : list +# List of dictionaries containing simulation data. + +# parameter_name : str +# The name of the parameter to extract from 'output_values'. + +# Returns +# ------- +# output_values : list +# A list containing the extracted values of the specified parameter for all cases of the grid. +# """ + +# output_last_step = [] +# columns_printed = False # Flag to print columns only once + +# for case_index, case in enumerate(cases_data): +# df = case['output_values'] +# if df is None: +# print(f"Warning: No output values found for case number '{case_index}'") +# output_last_step.append(np.nan) # Append NaN if no output values +# continue # Skip cases with no output +# if parameter_name in df.columns: +# parameter_value = df[parameter_name].iloc[-1] +# output_last_step.append(parameter_value) +# else: +# if not columns_printed: +# print(f"Warning: Parameter '{parameter_name}' does not exist in case '{case['init_parameters'].get('name', 'Unknown')}'") +# print(f"Available columns in this case: {', '.join(df.columns)}") +# columns_printed = True + +# return output_last_step + +def load_phi_crit(grid_dir: str | Path): + """" + Load the critical melt fraction (phi_crit) from the reference configuration file of the grid. Parameters ---------- - cases_data : list - List of dictionaries containing simulation data. - - parameter_name : str - The name of the parameter to extract from 'output_values'. + grid_dir : str or Path + Path to the grid directory containing ref_config.toml. Returns ------- - parameter_values : list - A list containing the extracted values of the specified parameter for all cases of the grid. - """ - - parameter_values = [] - columns_printed = False # Flag to print columns only once - - for case_index, case in enumerate(cases_data): - df = case['output_values'] - if df is None: - print(f"Warning: No output values found for case number '{case_index}'") - parameter_values.append(np.nan) # Append NaN if no output values - continue # Skip cases with no output - if parameter_name in df.columns: - parameter_value = df[parameter_name].iloc[-1] - parameter_values.append(parameter_value) - else: - if not columns_printed: - print(f"Warning: Parameter '{parameter_name}' does not exist in case '{case['init_parameters'].get('name', 'Unknown')}'") - print(f"Available columns in this case: {', '.join(df.columns)}") - columns_printed = True - - # Print the extracted output values for the specified parameter - print(f"Extracted output (at last time step) : {parameter_name} ") + phi_crit : float + The critical melt fraction value loaded from the reference configuration. - return parameter_values - -def load_phi_crit(grid_dir: str | Path): + """ grid_dir = Path(grid_dir) ref_file = grid_dir / "ref_config.toml" @@ -210,7 +235,7 @@ def load_phi_crit(grid_dir: str | Path): ref = toml.load(open(ref_file)) - # Navigate structure safely + # Find phi_crit value try: phi_crit = ref["params"]["stop"]["solid"]["phi_crit"] except KeyError: @@ -219,6 +244,24 @@ def load_phi_crit(grid_dir: str | Path): return phi_crit def extract_solidification_time(cases_data: list, grid_dir: str | Path): + """" + Extract solidification time for each simulation of the grid for + the condition Phi_global < phi_crit at last time step. + + Parameters + ---------- + cases_data : list + List of dictionaries containing simulation data. + + grid_dir : str or Path + Path to the grid directory containing ref_config.toml. + + Returns + ------- + solidification_times : list + A list containing the solidification times for all cases of the grid. + """ + # Load phi_crit once phi_crit = load_phi_crit(grid_dir) @@ -232,6 +275,7 @@ def extract_solidification_time(cases_data: list, grid_dir: str | Path): solidification_times.append(np.nan) continue + # Condition for complete solidification if 'Phi_global' in df.columns and 'Time' in df.columns: condition = df['Phi_global'] < phi_crit @@ -239,7 +283,7 @@ def extract_solidification_time(cases_data: list, grid_dir: str | Path): idx = condition.idxmax() solidification_times.append(df.loc[idx, 'Time']) else: - solidification_times.append(np.nan) + solidification_times.append(np.nan) # if planet is not solidified, append NaN else: if not columns_printed: @@ -250,92 +294,70 @@ def extract_solidification_time(cases_data: list, grid_dir: str | Path): return solidification_times -def generate_summary_csv( - cases_data: list, - case_params: dict, - grid_dir: str | Path, - grid_name: str -): +def generate_summary_csv(cases_data: list, case_params: dict, grid_dir: str | Path, grid_name: str): """ - Export a summary of simulation cases to a TSV file using - already-loaded data only. - - Column order: - - case metadata - - tested grid parameters - - runtime_helpfile.csv (last timestep) - - solidification_time + Generate CSV file summarizing all simulation cases in the grid, including: + - Case status + - Values of tested grid parameters + - All extracted values from runtime_helpfile.csv (at last timestep) + - Solidification time + + Parameters + ---------- + cases_data : list + List of dictionaries containing simulation data. + case_params : dict + Dictionary mapping case index -> {parameter_name: value} + grid_dir : str or Path + Path to the grid directory containing ref_config.toml. + grid_name : str + Name of the grid. """ - # --------------------------------------------------------- - # Compute solidification times ONCE - # --------------------------------------------------------- + + # Compute solidification times solidification_times = extract_solidification_time(cases_data, grid_dir) + # Extract data for each case summary_rows = [] - for case_index, case in enumerate(cases_data): row = {} - # ----------------------------------------------------- - # Case metadata - # ----------------------------------------------------- + # Case status row["case_number"] = case_index row["status"] = case["status"] - # ----------------------------------------------------- - # Tested grid parameters - # ----------------------------------------------------- + # Values of tested grid parameters for each case params = case_params.get(case_index, {}) for k, v in params.items(): row[k] = v - # ----------------------------------------------------- - # Output values (last timestep) - # ----------------------------------------------------- + # Output values (at last timestep) df = case["output_values"] if df is not None: for col in df.columns: row[col] = df[col].iloc[-1] - # ----------------------------------------------------- - # Solidification time (LAST column) - # ----------------------------------------------------- + # Solidification time row["solidification_time"] = solidification_times[case_index] summary_rows.append(row) - # --------------------------------------------------------- - # Create DataFrame and save - # --------------------------------------------------------- + # Create DataFrame and save it in the grid directory in post_processing/extracted_data/ summary_df = pd.DataFrame(summary_rows) output_dir = grid_dir / "post_processing" / "extracted_data" output_dir.mkdir(parents=True, exist_ok=True) output_file = output_dir / f"{grid_name}_final_extracted_data_all.csv" summary_df.to_csv(output_file, sep="\t", index=False) -def generate_completed_summary_csv( - cases_data: list, - case_params: dict, - grid_dir: str | Path, - grid_name: str -): +def generate_completed_summary_csv(cases_data: list, case_params: dict, grid_dir: str | Path, grid_name: str): """ - Export a summary of simulation cases to a TSV file, but only - include cases whose status starts with 'completed'. - - Column order: - - case metadata - - tested grid parameters - - runtime_helpfile.csv (last timestep) - - solidification_time + Same function as generate_summary_csv, but only include fully 'Completed' cases. """ - # --------------------------------------------------------- - # Compute solidification times ONCE - # --------------------------------------------------------- + # Compute solidification times solidification_times = extract_solidification_time(cases_data, grid_dir) + # Extract data for each fully completed case summary_rows = [] - for case_index, case in enumerate(cases_data): status = case.get("status", "").lower() if not status.startswith("completed"): @@ -343,68 +365,44 @@ def generate_completed_summary_csv( row = {} - # ----------------------------------------------------- - # Case metadata - # ----------------------------------------------------- + # Case status row["case_number"] = case_index row["status"] = case["status"] - # ----------------------------------------------------- - # Tested grid parameters - # ----------------------------------------------------- + # Values of tested grid parameters for each case params = case_params.get(case_index, {}) for k, v in params.items(): row[k] = v - # ----------------------------------------------------- - # Output values (last timestep) - # ----------------------------------------------------- + # Output values (at last timestep) df = case["output_values"] if df is not None: for col in df.columns: row[col] = df[col].iloc[-1] - # ----------------------------------------------------- - # Solidification time (LAST column) - # ----------------------------------------------------- + # Solidification time row["solidification_time"] = solidification_times[case_index] summary_rows.append(row) - # --------------------------------------------------------- # Create DataFrame and save - # --------------------------------------------------------- summary_df = pd.DataFrame(summary_rows) output_dir = grid_dir / "post_processing" / "extracted_data" output_dir.mkdir(parents=True, exist_ok=True) - # Updated CSV name to indicate filtered results + # Updated CSV name to indicate only completed cases output_file = output_dir / f"{grid_name}_final_extracted_data_completed.csv" summary_df.to_csv(output_file, sep="\t", index=False) -def generate_running_error_summary_csv( - cases_data: list, - case_params: dict, - grid_dir: str | Path, - grid_name: str -): +def generate_running_error_summary_csv(cases_data: list, case_params: dict, grid_dir: str | Path, grid_name: str): """ - Export a summary of simulation cases to a TSV file, but only - include cases whose status starts with 'Running' or 'Error'. - - Column order: - - case metadata - - tested grid parameters - - runtime_helpfile.csv (last timestep) - - solidification_time + Same function as generate_summary_csv, but only include 'Running' and 'Error' cases. """ - # --------------------------------------------------------- - # Compute solidification times ONCE - # --------------------------------------------------------- + # Compute solidification times solidification_times = extract_solidification_time(cases_data, grid_dir) + # Extract data for each running or error case summary_rows = [] - for case_index, case in enumerate(cases_data): status = case.get("status", "").lower() if not (status.startswith("running") or status.startswith("error")): @@ -412,42 +410,32 @@ def generate_running_error_summary_csv( row = {} - # ----------------------------------------------------- - # Case metadata - # ----------------------------------------------------- + # Case status row["case_number"] = case_index row["status"] = case["status"] - # ----------------------------------------------------- - # Tested grid parameters - # ----------------------------------------------------- + # Values of tested grid parameters for each case params = case_params.get(case_index, {}) for k, v in params.items(): row[k] = v - # ----------------------------------------------------- - # Output values (last timestep) - # ----------------------------------------------------- + # Output values (at last timestep) df = case["output_values"] if df is not None: for col in df.columns: row[col] = df[col].iloc[-1] - # ----------------------------------------------------- - # Solidification time (LAST column) - # ----------------------------------------------------- + # Solidification time row["solidification_time"] = solidification_times[case_index] summary_rows.append(row) - # --------------------------------------------------------- # Create DataFrame and save - # --------------------------------------------------------- summary_df = pd.DataFrame(summary_rows) output_dir = grid_dir / "post_processing" / "extracted_data" output_dir.mkdir(parents=True, exist_ok=True) - # Updated CSV name to indicate filtered results + # Updated CSV name to indicate only running/error cases output_file = output_dir / f"{grid_name}_final_extracted_data_running_error.csv" summary_df.to_csv(output_file, sep="\t", index=False) @@ -455,93 +443,195 @@ def generate_running_error_summary_csv( # Plotting functions # --------------------------------------------------------- -def plot_grid_status(cases_data: list, grid_dir: str | Path, grid_name: str): +def plot_grid_status(df: pd.DataFrame, grid_dir: str | Path, grid_name: str): """ - Plot the status of simulations from the PROTEUS grid with improved x-axis readability. + Plot histogram summary of number of simulation statuses in + the grid using the generated CSV file for all cases. Parameters ---------- - cases_data : list - List of dictionaries returned by `load_grid_cases`. + df : pandas.DataFrame + DataFrame loaded from grid_name_final_extracted_data_all.csv. - plot_dir : Path - Path to the plots directory. + grid_dir : Path + Path to the grid directory. grid_name : str - Name of the grid, used for the plot title. - - status_colors : dict, optional - A dictionary mapping statuses to specific colors. If None, a default palette is used. + Name of the grid. """ - # Extract and clean statuses - statuses = [case.get('status', 'Unknown') for case in cases_data] - statuses = pd.Series(statuses, name='Status') + if "status" not in df.columns: + raise ValueError("CSV must contain a 'status' column") + + # Clean and count statuses + statuses = df["status"].astype(str) status_counts = statuses.value_counts().sort_values(ascending=False) + total_simulations = len(df) - # Set colors for the bars - palette = sns.color_palette("Accent", len(status_counts)) + # Format status labels for better readability formatted_status_keys = [s.replace(" (", " \n (") for s in status_counts.index] + palette = sns.color_palette("Accent", len(status_counts)) palette = dict(zip(formatted_status_keys, palette)) - # Prepare dataframe for plotting + # Prepare DataFrame for plotting plot_df = pd.DataFrame({ - 'Status': formatted_status_keys, - 'Count': status_counts.values + "Status": formatted_status_keys, + "Count": status_counts.values }) + # Plot histogram plt.figure(figsize=(11, 7)) ax = sns.barplot( data=plot_df, - x='Status', - y='Count', - hue='Status', + x="Status", + y="Count", + hue="Status", palette=palette, dodge=False, - edgecolor='black' + edgecolor="black" ) - # Remove legend if it was created + # Remove legend if ax.legend_: ax.legend_.remove() - # Add value labels above bars - total_simulations = len(cases_data) + # Add counts and percentages above bars per status for i, count in enumerate(status_counts.values): - percentage = (count / total_simulations) * 100 - offset = 0.005 * status_counts.max() # 1% of the max count + percentage = 100 * count / total_simulations + offset = 0.01 * status_counts.max() ax.text( - i, count + offset, + i, + count + offset, f"{count} ({percentage:.1f}%)", - ha='center', va='bottom', fontsize=14 + ha="center", + va="bottom", + fontsize=14 ) - # Boxed total in upper right - plt.gca().text( - 0.97, 0.94, + # Add total number of simulations text + ax.text( + 0.97, + 0.94, f"Total number of simulations : {total_simulations}", - transform=plt.gca().transAxes, - ha='right', va='top', + transform=ax.transAxes, + ha="right", + va="top", fontsize=16 ) - plt.grid(alpha=0.2, axis='y') - plt.title(f"Simulation status summary for grid {grid_name}", fontsize=16) - plt.xlabel("Simulation status", fontsize=16) - plt.ylabel("Number of simulations", fontsize=16) - plt.yticks(fontsize=14) - plt.xticks(fontsize=14) - plt.tight_layout() + # Formatting + ax.grid(alpha=0.2, axis="y") + ax.set_title(f"Simulation status summary for grid {grid_name}", fontsize=16) + ax.set_xlabel("Simulation status", fontsize=16) + ax.set_ylabel("Number of simulations", fontsize=16) + ax.tick_params(axis="x", labelsize=14) + ax.tick_params(axis="y", labelsize=14) - output_dir = grid_dir / "post_processing" / "grid_plots" + # Save + output_dir = Path(grid_dir) / "post_processing" / "grid_plots" output_dir.mkdir(parents=True, exist_ok=True) output_file = output_dir / f"summary_grid_statuses_{grid_name}.png" - plt.savefig(output_file, dpi=300) + plt.savefig(output_file, dpi=300, bbox_inches='tight') plt.close() -def group_output_by_parameter(df,grid_parameters,outputs): +def flatten_input_parameters(d: dict, parent_key: str = "") -> dict: + """ + Flattens a nested input-parameter dictionary from a TOML configuration + into a flat mapping of dot-separated parameter paths to their plotting + configuration. + + Parameters + ---------- + d : dict + Nested dictionary describing input parameters (from TOML). + parent_key : str, optional + Accumulated parent key for recursive calls. + + Returns + ------- + flat : dict + Dictionary mapping parameter paths (e.g. ``"escape.zephyrus.Pxuv"``) + to their corresponding configuration dictionaries. + """ + + flat = {} + + for k, v in d.items(): + if k == "colormap": + continue + + new_key = f"{parent_key}.{k}" if parent_key else k + + if isinstance(v, dict) and "label" in v: + # Leaf parameter block + flat[new_key] = v + elif isinstance(v, dict): + # Recurse deeper + flat.update(flatten_input_parameters(v, new_key)) + + return flat + +def load_ecdf_plot_settings(cfg): """ - Groups output values (like solidification times) by a specific grid parameter. + Load ECDF plotting settings for both input parameters and output variables + from a configuration dictionary loaded from TOML. + + Parameters + ---------- + cfg : dict + Configuration dictionary loaded from a TOML file. + + Returns + ------- + param_settings : dict + Mapping of input-parameter paths to plotting settings. Each value + is a dict containing: + - "label" : str + Label for the parameter (used in colorbar). + - "colormap" : matplotlib colormap + Colormap used to color ECDF curves. + - "log_scale" : bool + Whether to normalize colors on a logarithmic scale. + + output_settings : dict + Mapping of output variable names to plotting settings. Each value + is a dict containing: + - "label" : str + X-axis label for the ECDF plot. + - "log_scale" : bool + Whether to use a logarithmic x-axis. + - "scale" : float + Factor applied to raw output values before plotting. + """ + + # Load input parameter settings + raw_params = cfg["input_parameters"] + default_cmap = getattr(cm, raw_params.get("colormap", "viridis")) + + # Flatten input parameters dictionary + flat_params = flatten_input_parameters(raw_params) + + param_settings = {} + for key, val in flat_params.items(): + param_settings[key] = { + "label": val.get("label", key), + "colormap": default_cmap, + "log_scale": val.get("log_scale", False), + } + + output_settings = {} + for key, val in cfg.get("output_variables", {}).items(): + output_settings[key] = { + "label": val.get("label", key), + "log_scale": val.get("log_scale", False), + "scale": val.get("scale", 1.0), + } + + return param_settings, output_settings + +def group_output_by_parameter(df, grid_parameters, outputs): + """ + Groups output values (like P_surf) by a specific grid parameter. Parameters ---------- @@ -549,10 +639,10 @@ def group_output_by_parameter(df,grid_parameters,outputs): DataFrame containing simulation results including value of the grid parameter and the corresponding extracted output. grid_parameters : str - Column name of the grid parameter to group by (like 'escape.zephyrus.Pxuv'). + Column name of the grid parameter to group by (like 'escape.zephyrus.efficiency'). outputs : str - Column name of the output to extract (like 'solidification_time'). + Column name of the output to extract (like 'P_surf'). Returns ------- @@ -567,9 +657,9 @@ def group_output_by_parameter(df,grid_parameters,outputs): value_dict = {} for param_value in df[param].dropna().unique(): subset = df[df[param] == param_value] - output_values = subset[output].replace([np.inf, -np.inf], np.nan) - output_values = output_values.dropna() - output_values = output_values[output_values > 0] # Remove zeros and negatives + output_values = subset[output].replace([np.inf, -np.inf], np.nan) # Replace inf with NaN + output_values = output_values.dropna() # Remove NaN values + output_values = output_values[output_values > 0] # Keep only positive values value_dict[param_value] = output_values @@ -577,16 +667,22 @@ def group_output_by_parameter(df,grid_parameters,outputs): return grouped -def ecdf_grid_plot(grid_params: dict, grouped_data: dict, param_settings: dict, output_settings: dict, grid_dir: str | Path, grid_name: str): +def latex(label: str) -> str: + """ + Wraps a label in dollar signs for LaTeX formatting if it contains 2 backslashes. """ - Creates a grid of ECDF plots where each row corresponds to one input parameter + return f"${label}$" if "\\" in label else label + +def ecdf_grid_plot(grouped_data: dict, param_settings: dict, output_settings: dict, grid_dir: str | Path, grid_name: str): + """ + Creates ECDF grid plots where each row corresponds to one input parameter and each column corresponds to one output. Saves the resulting figure as a PNG. Parameters ---------- grid_params : dict - A mapping from parameter names (e.g. "orbit.semimajoraxis") to arrays/lists of tested values. + Dictionary of tested grid parameters and their grid values (directly from copy.grid.toml) grouped_data : dict Dictionary where each key is of the form '[output]_per_[parameter]', and each value is a dict {param_value: [output_values]}. @@ -599,21 +695,31 @@ def ecdf_grid_plot(grid_params: dict, grouped_data: dict, param_settings: dict, output_settings : dict For each output key, a dict containing: - - "label": label of the x-axis for the corresponding output quantity + - "label": label of the x-axis for the corresponding output column - "log_scale": bool, whether to plot the x-axis on log scale - - "scale": float, a factor to multiply raw values by before plotting + - "scale": float, a factor to multiply raw values by before plotting plots_path : str Path to the grid where to create "single_plots_ecdf" and save all .png plots """ + + # Load tested grid parameters + raw_params = toml.load(grid_dir / "copy.grid.toml") + tested_params = {} + for key, value in raw_params.items(): + if isinstance(value, dict) and "values" in value: + # Only store the 'values' list + tested_params[key] = value["values"] + grid_params = tested_params + # List of parameter names (rows) and output names (columns) param_names = list(param_settings.keys()) out_names = list(output_settings.keys()) - # Create subplot grid: rows = parameters, columns = outputs + # Create subplot grid: rows = input parameters, columns = outputs variables n_rows = len(param_names) n_cols = len(out_names) - fig, axes = plt.subplots(n_rows, n_cols, figsize=(4 * n_cols, 2.5 * n_rows), squeeze=False) + fig, axes = plt.subplots(n_rows, n_cols, figsize=(4 * n_cols, 2.75 * n_rows), squeeze=False, gridspec_kw={'wspace': 0.1, 'hspace': 0.2}) # Loop through parameters (rows) and outputs (columns) for i, param_name in enumerate(param_names): @@ -621,15 +727,13 @@ def ecdf_grid_plot(grid_params: dict, grouped_data: dict, param_settings: dict, if not tested_param: print(f"⚠️ Skipping {param_name} — no tested values found in grid_params") continue - settings = param_settings[param_name] - # Determine coloring + # Determine if parameter is numeric or string for coloring is_numeric = np.issubdtype(np.array(tested_param).dtype, np.number) if is_numeric: vmin, vmax = min(tested_param), max(tested_param) if vmin == vmax: - # avoid log/normalize errors with constant values vmin, vmax = vmin - 1e-9, vmax + 1e-9 if settings.get("log_scale", False): norm = mpl.colors.LogNorm(vmin=vmin, vmax=vmax) @@ -651,16 +755,17 @@ def color_func(v): out_settings = output_settings[output_name] # Add panel number in upper-left corner - panel_number = i * n_cols + j + 1 # number of panels left-to-right, top-to-bottom + panel_number = i * n_cols + j + 1 ax.text( - 0.02, 0.98, # relative position in axes coordinates - str(panel_number), # text to display - transform=ax.transAxes, # use axis-relative coordinates + 0.03, 0.95, + str(panel_number), + transform=ax.transAxes, fontsize=18, fontweight='bold', - va='top', # vertical alignment - ha='left', # horizontal alignment - color='black' + va='top', + ha='left', + color='black', + bbox=dict(facecolor='white', edgecolor='silver', boxstyle='round,pad=0.2', alpha=0.8) ) # Plot one ECDF per tested parameter value @@ -669,14 +774,16 @@ def color_func(v): if val not in grouped_data.get(data_key, {}): continue raw = np.array(grouped_data[data_key][val]) * out_settings.get("scale", 1.0) - # Plot ECDf if output == df['H_kg_atm'] then plot only values > 1e10 AND psurf > 1 bar - if output_name.endswith('_kg_atm'): - raw = np.clip(raw, 1e15, None) - elif output_name.endswith('P_surf'): - raw = np.clip(raw, 1, None) - else: - raw = raw + # # Handle special cases for clipping + # if output_name.endswith('_kg_atm'): + # raw = np.clip(raw, 1e15, None) + # elif output_name.endswith('P_surf'): + # raw = np.clip(raw, 1, None) + # else: + # raw = raw + + # Plot ECDF sns.ecdfplot( data=raw, log_scale=out_settings.get("log_scale", False), @@ -688,7 +795,7 @@ def color_func(v): # Configure x-axis labels, ticks, grids if i == n_rows - 1: - ax.set_xlabel(out_settings["label"], fontsize=22) + ax.set_xlabel(latex(out_settings["label"]), fontsize=22) ax.xaxis.set_label_coords(0.5, -0.3) ax.tick_params(axis='x', labelsize=22) else: @@ -708,33 +815,25 @@ def color_func(v): ax.grid(alpha=0.4) # After plotting all outputs for this parameter (row), add colorbar or legend - if colorbar_needed: + if colorbar_needed: # colorbar for numeric parameters sm = mpl.cm.ScalarMappable(cmap=settings["colormap"], norm=norm) - # attach the colorbar to the right‐most subplot in row i: - rightmost_ax = axes[i, -1] + rightmost_ax = axes[i, -1] # Get the rightmost axis in the current row cbar = fig.colorbar(sm,ax=rightmost_ax,pad=0.03,aspect=10) - cbar.set_label(settings["label"], fontsize=24) - # This is for plot 0.194Msun - # if param_name == "orbit.semimajoraxis": - # cbar.ax.yaxis.set_label_coords(9.5, 0.5) - # else: - # cbar.ax.yaxis.set_label_coords(6, 0.5) - # This is for 1Msun + cbar.set_label(latex(settings["label"]), fontsize=24) cbar.ax.yaxis.set_label_coords(6, 0.5) ticks = sorted(set(tested_param)) cbar.set_ticks(ticks) cbar.ax.tick_params(labelsize=22) - else: + else: # legend for string parameters handles = [mpl.lines.Line2D([0], [0], color=color_map[val], lw=4, label=str(val)) for val in unique_vals] ax.legend(handles=handles, fontsize=24,bbox_to_anchor=(1.01, 1), loc='upper left') # Add a single, shared y-axis label - fig.text(0.04, 0.5, 'Normalized cumulative fraction of simulations', va='center', rotation='vertical', fontsize=40) + fig.text(0.07, 0.5, 'Empirical cumulative fraction of grid simulations', va='center', rotation='vertical', fontsize=40) - # Tweak layout and save - plt.tight_layout(rect=[0.08, 0.02, 1, 0.97]) + # Save figure output_dir = grid_dir / "post_processing" / "grid_plots" output_dir.mkdir(parents=True, exist_ok=True) output_file = output_dir / f"ecdf_grid_plot_{grid_name}.png" - fig.savefig(output_file, dpi=300) + fig.savefig(output_file, dpi=300, bbox_inches='tight') plt.close(fig) diff --git a/src/proteus/grid/run_grid_analysis.py b/src/proteus/grid/run_grid_analysis.py index 12043cbda..e63d64eea 100644 --- a/src/proteus/grid/run_grid_analysis.py +++ b/src/proteus/grid/run_grid_analysis.py @@ -164,13 +164,15 @@ def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True) # ECDF Grid Plot # The user needs to comment the parameters he didn't used in the grid/ add the ones non-listed here. Same for the outputs. param_settings_grid = { - "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.Paired, "log_scale": False}, - "orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.plasma, "log_scale": False}, - "escape.zephyrus.efficiency": {"label": r"$\rm \epsilon$", "colormap": cm.spring, "log_scale": False}, - "escape.zephyrus.Pxuv": {"label": r"P$_{\rm XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, - "outgas.fO2_shift_IW": {"label": r"$\rm \log_{10} fO_2 [IW]$", "colormap": cm.coolwarm, "log_scale": False}, - "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, - "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.winter, "log_scale": False}, + #"atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.Paired, "log_scale": False}, + "orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.Spectral, "log_scale": False}, + "struct.corefrac": {"label": "CRF", "colormap": cm.Spectral, "log_scale": False}, + "atmos_clim.albedo_pl": {"label": r"$A_b$", "colormap": cm.Spectral, "log_scale": False}, + "escape.zephyrus.efficiency": {"label": r"$\rm \epsilon$", "colormap": cm.Spectral, "log_scale": False}, + "outgas.fO2_shift_IW": {"label": r"$\rm \log_{10} fO_2 [IW]$", "colormap": cm.Spectral, "log_scale": False}, + "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.Spectral, "log_scale": True}, + "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.Spectral, "log_scale": False}, + "delivery.elements.SH_ratio": {"label": "S/H ratio", "colormap": cm.Spectral, "log_scale": True}, # "delivery.elements.SH_ratio": {"label": "S/H ratio", "colormap": cm.autumn, "log_scale": True}, # "escape.reservoir": {"label": "Reservoir", "colormap": cm.viridis, "log_scale": False} # "escape.module": {"label": "Escape module", "colormap": cm.RdYlGn, "log_scale": False} diff --git a/src/proteus/grid/test_grid_analyze.py b/src/proteus/grid/test_grid_analyze.py new file mode 100644 index 000000000..bafe4b92f --- /dev/null +++ b/src/proteus/grid/test_grid_analyze.py @@ -0,0 +1,61 @@ +from __future__ import annotations + +import tomllib +from pathlib import Path + +import pandas as pd +import post_processing_updated as pp + +# Load configuration from TOML file +toml_file = Path("/home2/p315557/PROTEUS/input/ensembles/example.grid_analyse.toml") # Update with the correct path +with open(toml_file, "rb") as f: + cfg = tomllib.load(f) + +# Get grid path and name +grid_path = Path(cfg["grid_path"]) +grid_name = Path(pp.get_grid_name(grid_path)) +print(grid_path) +print(f"Analyzing grid: {grid_name}") + + +# Load grid data +data = pp.load_grid_cases(grid_path) +input_param_grid_per_case, tested_params_grid = pp.get_tested_grid_parameters(data, grid_path) + +# Generate summary CSV files +if cfg.get("update_csv", True): + pp.generate_summary_csv(data, input_param_grid_per_case, grid_path, grid_name) + pp.generate_completed_summary_csv(data, input_param_grid_per_case, grid_path, grid_name) + pp.generate_running_error_summary_csv(data, input_param_grid_per_case, grid_path, grid_name) + +# Plot grid status +if cfg.get("plot_status", True): + path_csv_all_simulations = grid_path / "post_processing/extracted_data" / f"{grid_name}_final_extracted_data_all.csv" + all_simulations_data_csv = pd.read_csv(path_csv_all_simulations, sep="\t") + pp.plot_grid_status(all_simulations_data_csv, grid_path, grid_name) + print("Plot grid status summary is available.") + +# Formatting for ECDF plot: group output data by parameters and prepare settings for plotting only for Completed simulations +path_csv_completed_simulations = grid_path / "post_processing/extracted_data" / f"{grid_name}_final_extracted_data_completed.csv" +completed_simulations_data_csv = pd.read_csv(path_csv_completed_simulations, sep="\t") +columns_output = list(cfg["output_variables"].keys()) +grouped_data = {} +for col in columns_output: + group = pp.group_output_by_parameter( + completed_simulations_data_csv, + tested_params_grid, + [col] + ) + grouped_data.update(group) +param_settings_grid, output_settings_grid = pp.load_ecdf_plot_settings(cfg) + +# Generate ECDF plot +if cfg.get("plot_ecdf", True): + pp.ecdf_grid_plot( + grouped_data, + param_settings_grid, + output_settings_grid, + grid_path, + grid_name + ) + print("ECDF grid plot is available.") diff --git a/src/proteus/grid/test_new_pp_grid.py b/src/proteus/grid/test_new_pp_grid.py index f265a894d..bc6a82ae1 100644 --- a/src/proteus/grid/test_new_pp_grid.py +++ b/src/proteus/grid/test_new_pp_grid.py @@ -8,65 +8,51 @@ grid_name = "escape_grid_1Msun" grid_path = Path(f"/projects/p315557/Paper_1/DATA/Grids/{grid_name}") - # grid_name= "toi561b_grid" # grid_path = Path(f"/projects/p315557/TOI-561b/{grid_name}") - # grid_name= "escape_on_off_janus_agni_1Msun" # grid_path = Path(f"/projects/p315557/Paper_1/DATA/Comparison_escape_on_off/{grid_name}") +# Load grid data data = pp.load_grid_cases(grid_path) +# Store the input parameters for each case based on the tested parameters in the grid and store the tested parameters in the grid input_param_grid_per_case, tested_params_grid = pp.get_tested_grid_parameters(data,grid_path) -#pp.generate_summary_csv(data,case_params,grid_path,grid_name) -#pp.generate_completed_summary_csv(data,case_params,grid_path,grid_name) -#pp.generate_running_error_summary_csv(data,case_params,grid_path,grid_name) - -#pp.plot_grid_status(data,grid_path,grid_name) -rows = [] -for case_index, case in enumerate(data): - row = {} - - # Add input parameters - init_params = case['init_parameters'] - for k, v in init_params.items(): - row[k] = v - - # Add outputs (last timestep) - df_out = case['output_values'] - if df_out is not None: - last_row = df_out.iloc[-1].to_dict() - row.update(last_row) - # Add status - row['status'] = case['status'] +# Generate CSV file for all grid, only completed simulations, and running error simulations +#pp.generate_summary_csv(data,input_param_grid_per_case,grid_path,grid_name) +#pp.generate_completed_summary_csv(data,input_param_grid_per_case,grid_path,grid_name) +#pp.generate_running_error_summary_csv(data,input_param_grid_per_case,grid_path,grid_name) - rows.append(row) +# Load the final extracted data CSV for all simulations +path_csv_all_simulations = grid_path / "post_processing/extracted_data" / f"{grid_name}_final_extracted_data_all.csv" +all_simulations_data_csv = pd.read_csv(path_csv_all_simulations, sep="\t") +# Plot grid status +pp.plot_grid_status(all_simulations_data_csv,grid_path,grid_name) -df = pd.DataFrame(rows) -def flatten_nested_columns(df): - flat_data = {} - for col in df.columns: - if isinstance(df[col].iloc[0], dict): - # Expand each dictionary into separate columns - nested_df = pd.json_normalize(df[col]) - nested_df.index = df.index - for nested_col in nested_df.columns: - flat_data[f"{col}.{nested_col}"] = nested_df[nested_col] - else: - flat_data[col] = df[col] - return pd.DataFrame(flat_data) -df_flat = flatten_nested_columns(df) -grouped_data = pp.group_output_by_parameter(df_flat, tested_params_grid, ['T_surf']) +# Load the final extracted data CSV for completed simulations +path_csv_completed_simulations = grid_path / "post_processing/extracted_data" / f"{grid_name}_final_extracted_data_completed.csv" +completed_simulations_data_csv = pd.read_csv(path_csv_completed_simulations, sep="\t") +# Group output data by tested parameters in the grid +grouped_data = {} +columns_output = ['solidification_time', 'Phi_global', 'T_surf', 'P_surf', 'atm_kg_per_mol', 'esc_rate_total'] +for col in columns_output: + group = pp.group_output_by_parameter( + completed_simulations_data_csv, + tested_params_grid, + [col] + ) + grouped_data.update(group) +# Define parameter and output settings for the ECDF grid plots param_settings_grid = { - "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.Paired, "log_scale": False}, - "orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.plasma, "log_scale": False}, - "escape.zephyrus.efficiency": {"label": r"$\rm \epsilon$", "colormap": cm.spring, "log_scale": False}, - "escape.zephyrus.Pxuv": {"label": r"P$_{\rm XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, - "outgas.fO2_shift_IW": {"label": r"$\rm \log_{10} fO_2 [IW]$", "colormap": cm.coolwarm, "log_scale": False}, - "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, - "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.winter, "log_scale": False}} + "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.viridis, "log_scale": False}, + "orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.viridis, "log_scale": False}, + "escape.zephyrus.efficiency": {"label": r"$\rm \epsilon$", "colormap": cm.viridis, "log_scale": False}, + "escape.zephyrus.Pxuv": {"label": r"P$_{\rm XUV}$ [bar]", "colormap": cm.viridis, "log_scale": True}, + "outgas.fO2_shift_IW": {"label": r"$\rm \log_{10} fO_2 [IW]$", "colormap": cm.viridis, "log_scale": False}, + "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.viridis, "log_scale": False}, + "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.viridis, "log_scale": False}} output_settings_grid = { 'solidification_time': {"label": "Solidification [yr]", "log_scale": True, "scale": 1.0}, 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, @@ -75,4 +61,4 @@ def flatten_nested_columns(df): 'atm_kg_per_mol': {"label": "MMW [g/mol]", "log_scale": False, "scale": 1000.0}, 'esc_rate_total': {"label": "Escape rate [kg/s]", "log_scale": True, "scale": 1.0}} -pp.ecdf_grid_plot(tested_params_grid, grouped_data, param_settings_grid, output_settings_grid, grid_path, grid_name) +pp.ecdf_grid_plot(grouped_data, param_settings_grid, output_settings_grid, grid_path, grid_name) From 4be722acba363ef895f3e71bcb0fd63dca06432c Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 17 Dec 2025 21:15:24 +0100 Subject: [PATCH 065/105] update the command and doc --- docs/usage.md | 51 +- input/ensembles/toi561b_grid_a_0_0106AU.toml | 16 +- src/proteus/cli.py | 25 +- ...ocessing_updated.py => post_processing.py} | 123 +- src/proteus/grid/post_processing_grid.py | 1087 ----------------- src/proteus/grid/run_grid_analysis.py | 213 ---- src/proteus/grid/test_grid_analyze.py | 61 - src/proteus/grid/test_new_pp_grid.py | 64 - 8 files changed, 119 insertions(+), 1521 deletions(-) rename src/proteus/grid/{post_processing_updated.py => post_processing.py} (90%) delete mode 100644 src/proteus/grid/post_processing_grid.py delete mode 100644 src/proteus/grid/run_grid_analysis.py delete mode 100644 src/proteus/grid/test_grid_analyze.py delete mode 100644 src/proteus/grid/test_new_pp_grid.py diff --git a/docs/usage.md b/docs/usage.md index 9d34cab27..53f833dff 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -184,57 +184,36 @@ PROTEUS will perform this step automatically if enabled in the configuration fil ## Postprocessing of PROTEUS simulation grids -Results from a PROTEUS grid can be post-processed using the `proteus grid-analyze` command. +Results from a PROTEUS grid can be post-processed using the `proteus grid-analyse` command. -This will generate a CSV file with extracted data (`your_grid_name_extracted_data.csv`) from the grid results and ECDF plots +This will generate 3 CSV files, depending on simulations statuses, from the grid results and ECDF plots (see [seaborn.ecdfplot doc](https://seaborn.pydata.org/generated/seaborn.ecdfplot.html)). -Here is the structure of the generated `post_processing_grid` folder inside the grid directory : +Here is the structure of the generated `post_processing` folder inside the grid directory : ```console your_grid_name/ ├─case_00000 <---- case of your grid (for the structure refer to the tree from the [## Output and results] section) ├─case_00001 ├─... - ├─cfgs <---- folder with all the `input.toml` files for all cases - ├─logs <---- folder with all the `proteus_case_number.log` files for all cases - ├─manager.log <---- the log file of the grid - ├─slurm_dispatch.sh <---- if use_slurm=True in `grid_proteus.py`, this is the slurm file to submit with `sbatch` command - ├─post_processing_grid <---- this folder contains all the output from this script - │ └─extracted_data <---- folder with the generated CSV file - │ └─your_grid_name_extracted_data.csv <---- CSV file containing the tested input parameters and extracted output from the grid - │ └─plots_grid <---- folder with the generated plots - │ ├─ecdf_grid_plot.png <---- Grid plot to visualize all tested input parameters vs extracted outputs using ECDF distribution - │ ├─grid_statuses_summary.png <---- Summary plot of statuses for all cases of the grid - │ └─single_plots_ecdf <---- folder with all the single ECDF plots corresponding to all the panels from the grid plot - │ ├─ecdf_[extracted_output]_per_[input_param].png <---- Single plot using ECDF distribution to visualize one tested input parameter vs one extracted output for all cases + ├─cfgs <---- folder with all the `input.toml` files for all cases + ├─logs <---- folder with all the `proteus_case_number.log` files for all cases + ├─manager.log <---- the log file of the grid + ├─slurm_dispatch.sh <---- if use_slurm=True in `grid_proteus.py`, this is the slurm file to submit with `sbatch` command + ├─post_processing <---- this folder contains all the output from this script + │ └─extracted_data <---- folder with the generated CSV file + │ └─your_grid_name_final_extracted_data.csv <---- CSV file containing the tested input parameters and extracted output from the grid + │ └─plots_grid <---- folder with the generated plots + │ ├─ecdf_grid_plot_your_grid_name.png <---- Grid plot to visualize all tested input parameters vs extracted outputs using ECDF distribution + │ └─summary_grid_statuses_your_grid_name.png <---- Summary plot of statuses for all cases of the grid │ └─... ``` - - To post-processed the grid and generate ECDF plots for further analysis, use the proteus command line interface: ```console -proteus grid-analyze /path/to/grid/ [grid_name] -``` - -The user can also specify to update the CSV file with new output to extract for instance by adding the `--update-csv` flag, using : - -```console -proteus grid-analyze /path/to/grid/ [grid_name] --update-csv +proteus grid-analyse /path/to/example.grid_analyse.toml ``` -To get more information about this command, run : - -```console -proteus grid-analyze --help -``` - -*Note to the user : update `output_to_extract` and other plotting parameters for your grid* - -1. The user can choose the output to extract for each simulations at the last time-step (from the `runtime_helpfile.csv` file of each cases) like 'esc_rate_total','Phi_global','P_surf','T_surf','M_planet'... -To do so, the user should go to `PROTEUS/src/proteus/grid/run_grid_analysis.py` and modify the variable `output_to_extract` within the `run_grid_analyze` function. - -2. In the Step 2 of the same function, the user should also modify accordingly the `param_settings_single` and `output_settings_single` object for generating single plots (same for the grid plot with `param_settings_grid` and `output_settings_grid`).For this, the user should add the input parameters and output extracted from the grid, if this is not already present in the script and comment the one useless for the grid. +`/path/to/example.grid_analyse.toml` is the path to the toml file containing grid analysis configuration. ## Postprocessing of results with AGNI for multiprofile analysis diff --git a/input/ensembles/toi561b_grid_a_0_0106AU.toml b/input/ensembles/toi561b_grid_a_0_0106AU.toml index 3767956fd..4ba595545 100644 --- a/input/ensembles/toi561b_grid_a_0_0106AU.toml +++ b/input/ensembles/toi561b_grid_a_0_0106AU.toml @@ -49,12 +49,18 @@ max_mem = 3 # maximum memory per CPU in GB (e.g. 3) method = "direct" values = [0, 4] -# Hydrogen inventory set by arange +# # Hydrogen inventory set by arange +# ["delivery.elements.H_oceans"] +# method = "logspace" +# start = 1.0 +# stop = 1000.0 +# count = 4 + +# Hydrogen inventory set directly ["delivery.elements.H_oceans"] - method = "logspace" - start = 1.0 - stop = 1000.0 - count = 4 + method = "direct" + values = [1.0, 10.0, 100.0, 1000.0] + # Planet bulk C/H ratio set directly ["delivery.elements.CH_ratio"] diff --git a/src/proteus/cli.py b/src/proteus/cli.py index 633d70c0b..9e4e6024b 100644 --- a/src/proteus/cli.py +++ b/src/proteus/cli.py @@ -307,29 +307,22 @@ def observe(config_path: Path): cli.add_command(observe) # ---------------- -# 'grid_analyze' postprocessing commands +# 'grid_analyse' postprocessing commands # ---------------- @click.command() -@click.argument("grid_path", type=str, default=None, required=True) -@click.argument("grid_name", type=str, default=None, required=True) -@click.option("--update-csv", is_flag=True, help="Update the CSV file containing extracted data.") # If the user wants to update the CSV file, he needs to specify it in the command line with this flag. Otherwise, set to False by default. +@click.argument("path_grid_analyse", type=str, required=True) -def grid_analyze(grid_path: str, grid_name: str, update_csv: bool): - """Run grid analysis on PROTEUS grid output files - - GRID_PATH : Path to the output directory containing the PROTEUS grid files. (Do not include the grid name here) - - GRID_NAME : Name of the grid to analyze. +def grid_analyse(path_grid_analyse: str): + """Generate grid analysis plots and CSV summary files from a grid + path_grid_analyse : Path to the toml file containing grid analysis configuration + """ + from proteus.grid.post_processing import main - Example of usage : + main(path_grid_analyse) - proteus grid_analyze /path/to/grid/ grid_name --update-csv - """ - from proteus.grid.run_grid_analysis import run_grid_analyze - run_grid_analyze(path_to_grid=grid_path, grid_name=grid_name, update_csv=update_csv) +cli.add_command(grid_analyse) -cli.add_command(grid_analyze) # ---------------- # GridPROTEUS and BO inference scheme, runners # ---------------- diff --git a/src/proteus/grid/post_processing_updated.py b/src/proteus/grid/post_processing.py similarity index 90% rename from src/proteus/grid/post_processing_updated.py rename to src/proteus/grid/post_processing.py index cedf5a6b3..e5d28f9be 100644 --- a/src/proteus/grid/post_processing_updated.py +++ b/src/proteus/grid/post_processing.py @@ -1,14 +1,15 @@ from __future__ import annotations +import tomllib from pathlib import Path import matplotlib as mpl -import matplotlib.cm as cm import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import toml +from matplotlib import cm # --------------------------------------------------------- # Data loading, extraction, and CSV generation functions @@ -174,44 +175,6 @@ def get_tested_grid_parameters(cases_data: list, grid_dir: str | Path): return case_params, tested_params -# def extract_grid_output(cases_data: list, parameter_name: str): -# """ -# Extract a specific output for each simulation of the grid at the last time step. - -# Parameters -# ---------- -# cases_data : list -# List of dictionaries containing simulation data. - -# parameter_name : str -# The name of the parameter to extract from 'output_values'. - -# Returns -# ------- -# output_values : list -# A list containing the extracted values of the specified parameter for all cases of the grid. -# """ - -# output_last_step = [] -# columns_printed = False # Flag to print columns only once - -# for case_index, case in enumerate(cases_data): -# df = case['output_values'] -# if df is None: -# print(f"Warning: No output values found for case number '{case_index}'") -# output_last_step.append(np.nan) # Append NaN if no output values -# continue # Skip cases with no output -# if parameter_name in df.columns: -# parameter_value = df[parameter_name].iloc[-1] -# output_last_step.append(parameter_value) -# else: -# if not columns_printed: -# print(f"Warning: Parameter '{parameter_name}' does not exist in case '{case['init_parameters'].get('name', 'Unknown')}'") -# print(f"Available columns in this case: {', '.join(df.columns)}") -# columns_printed = True - -# return output_last_step - def load_phi_crit(grid_dir: str | Path): """" Load the critical melt fraction (phi_crit) from the reference configuration file of the grid. @@ -837,3 +800,85 @@ def color_func(v): output_file = output_dir / f"ecdf_grid_plot_{grid_name}.png" fig.savefig(output_file, dpi=300, bbox_inches='tight') plt.close(fig) + +# --------------------------------------------------------- +# main +# --------------------------------------------------------- +def main(grid_analyse_toml_file: str | Path = None): + + # Load configuration from grid_analyse.toml + with open(grid_analyse_toml_file, "rb") as f: + cfg = tomllib.load(f) + + # Get grid path and name + grid_path = Path(cfg["grid_path"]) + grid_name = get_grid_name(grid_path) + + print(grid_path) + print(f"Analyzing grid: {grid_name}") + + # Load grid data + data = load_grid_cases(grid_path) + input_param_grid_per_case, tested_params_grid = get_tested_grid_parameters( + data, grid_path + ) + + # --- Summary CSVs --- + update_csv = cfg.get("update_csv", True) + + summary_dir = grid_path / "post_processing" / "extracted_data" + summary_csv_all = summary_dir / f"{grid_name}_final_extracted_data_all.csv" + summary_csv_completed = summary_dir / f"{grid_name}_final_extracted_data_completed.csv" + summary_csv_running_error = summary_dir / f"{grid_name}_final_extracted_data_running_error.csv" + + if update_csv: + generate_summary_csv( + data, input_param_grid_per_case, grid_path, grid_name + ) + generate_completed_summary_csv( + data, input_param_grid_per_case, grid_path, grid_name + ) + generate_running_error_summary_csv( + data, input_param_grid_per_case, grid_path, grid_name + ) + else: + # Check that CSVs exist + for f in [summary_csv_all, summary_csv_completed, summary_csv_running_error]: + if not f.exists(): + raise FileNotFoundError( + f"{f.name} not found in {summary_dir}, " + "but update_csv is set to False. Please set update_csv to True to generate it." + ) + + # --- Plot grid status --- + if cfg.get("plot_status", True): + all_simulations_data_csv = pd.read_csv(summary_csv_all, sep="\t") + plot_grid_status(all_simulations_data_csv, grid_path, grid_name) + print("Plot grid status summary is available.") + + # --- ECDF plots --- + if cfg.get("plot_ecdf", True): + completed_simulations_data_csv = pd.read_csv(summary_csv_completed, sep="\t") + columns_output = list(cfg["output_variables"].keys()) + grouped_data = {} + for col in columns_output: + group = group_output_by_parameter( + completed_simulations_data_csv, + tested_params_grid, + [col], + ) + grouped_data.update(group) + + param_settings_grid, output_settings_grid = load_ecdf_plot_settings(cfg) + ecdf_grid_plot( + grouped_data, + param_settings_grid, + output_settings_grid, + grid_path, + grid_name, + ) + print("ECDF grid plot is available.") + + +if __name__ == "__main__": + main( "/home2/p315557/PROTEUS/input/ensembles/example.grid_analyse.toml") diff --git a/src/proteus/grid/post_processing_grid.py b/src/proteus/grid/post_processing_grid.py deleted file mode 100644 index 9bfbbf219..000000000 --- a/src/proteus/grid/post_processing_grid.py +++ /dev/null @@ -1,1087 +0,0 @@ -from __future__ import annotations - -import ast -import csv -import os -import re -from io import StringIO -from pathlib import Path -from typing import Any, Dict, List - -import matplotlib as mpl -import matplotlib.pyplot as plt -import numpy as np -import pandas as pd -import seaborn as sns -import toml - -##### Functions for extracting grid data ##### - -def load_grid_cases(grid_dir: Path): - """ - Load information for each simulation of a PROTEUS grid. - Read 'runtime_helpfile.csv', 'init_coupler.toml' and status - files for each simulation of the grid. - - Parameters - ---------- - grid_dir : Path or str - Path to the grid directory containing the 'case_*' folders - - Returns - ---------- - combined_data : list - List of dictionaries, each containing: - - 'init_parameters' (dict): Parameters loaded from `init_coupler.toml`. - - 'output_values' (pandas.DataFrame): Data from `runtime_helpfile.csv`. - - 'status' (str): Status string from the `status` file, or 'Unknown' if unavailable. - """ - - combined_data = [] - grid_dir = Path(grid_dir) - - # Collect and sort the case directories - case_dirs = list(grid_dir.glob('case_*')) - case_dirs.sort(key=lambda p: int(p.name.split('_')[1])) - - for case in case_dirs: - runtime_file = case / 'runtime_helpfile.csv' - init_file = case / 'init_coupler.toml' - status_file = case / 'status' - - # Load init parameters - init_params = {} - if init_file.exists(): - try: - init_params = toml.load(open(init_file)) - except Exception as e: - print(f"Error reading init file in {case.name}: {e}") - - # Read runtime_helpfile.csv if available - df = None - if runtime_file.exists(): - try: - df = pd.read_csv(runtime_file, sep='\t') - except Exception as e: - print(f"WARNING : Error reading runtime_helpfile.csv for {case.name}: {e}") - - # Read status file if available - status = 'Unknown' - if status_file.exists(): - try: - raw_lines = [ln.strip() for ln in status_file.read_text(encoding='utf-8').splitlines() if ln.strip()] - if len(raw_lines) >= 2: - status = raw_lines[1] - elif raw_lines: - status = raw_lines[0] - else: - status = 'Empty' - except Exception as e: - print(f"WARNING : Error reading status file in {case.name}: {e}") - else: - print(f"WARNING : Missing status file in {case.name}") - - # Combine all info about simulations into a list of dictionaries - combined_data.append({ - 'init_parameters': init_params, - 'output_values' : df, - 'status' : status - }) - - # - statuses = [c['status'] for c in combined_data] - status_counts = pd.Series(statuses).value_counts().sort_values(ascending=False) - - print('-----------------------------------------------------------') - print(f"Total number of simulations: {len(statuses)}") - print('-----------------------------------------------------------') - print("Number of simulations per status:") - for st, count in status_counts.items(): - print(f" - {st:<45} : {count}") - print('-----------------------------------------------------------') - - return combined_data - -def get_grid_parameters(grid_dir: str): - """ - Extract grid parameter names and values from the 'manager.log' file. - - Parameters - ---------- - grid_dir : str - Path to the directory of the PROTEUS grid - - Returns - ------- - param_grid : dict - A dictionary where each key is a parameter name, and its corresponding values used for the entire grid is a list. - - case_params : dict - Dictionary containing each case number with the name and values of the tested parameters in this grid. - """ - - log_file = os.path.join(grid_dir, 'manager.log') - - # Check if the 'manager.log' file exists - if not os.path.exists(log_file): - print(f"Error: manager.log not found at {log_file}") - return {}, {} - - # Read all lines from the 'manager.log' file - with open(log_file, 'r') as file: - lines = file.readlines() - - param_grid = {} - case_params = {} - - # Expressions to match the relevant lines - dimension_pattern = re.compile(r"parameter:\s*(\S+)") - values_pattern = re.compile(r"values\s*:\s*\[(.*?)\]") - case_line_pattern = re.compile(r"\]\s+(\d+):\s+(\{.*\})") - - current_param = None - for line in lines: - line = line.strip() - # Check if the line defines a new parameter - dim_match = dimension_pattern.search(line) - if dim_match: - current_param = dim_match.group(1) - continue - # Check if the line defines values for the current parameter - val_match = values_pattern.search(line) - if val_match and current_param: - try: - val_list = ast.literal_eval(f"[{val_match.group(1)}]") - param_grid[current_param] = val_list - current_param = None - except Exception as e: - print(f"Error parsing values for {current_param}: {e}") - # Check if the line contains case-specific data - case_match = case_line_pattern.search(line) - if case_match: - case_num = int(case_match.group(1)) - case_dict_str = case_match.group(2) - try: - case_params[case_num] = ast.literal_eval(case_dict_str) - except Exception as e: - print(f"Error parsing case {case_num}: {e}") - - return param_grid, case_params - -def extract_grid_output(cases_data: list, parameter_name: str): - """ - Extract a specific parameter from the 'output_values' of each simulation case. - - Parameters - ---------- - cases_data : list - List of dictionaries containing simulation data. - - parameter_name : str - The name of the parameter to extract from 'output_values'. - - Returns - ------- - parameter_values : list - A list containing the extracted values of the specified parameter for all cases of the grid. - """ - - parameter_values = [] - columns_printed = False # Flag to print columns only once - - for case_index, case in enumerate(cases_data): - df = case['output_values'] - if df is None: - print(f"Warning: No output values found for case number '{case_index}'") - parameter_values.append(np.nan) # Append NaN if no output values - continue # Skip cases with no output - if parameter_name in df.columns: - parameter_value = df[parameter_name].iloc[-1] - parameter_values.append(parameter_value) - else: - if not columns_printed: - print(f"Warning: Parameter '{parameter_name}' does not exist in case '{case['init_parameters'].get('name', 'Unknown')}'") - print(f"Available columns in this case: {', '.join(df.columns)}") - columns_printed = True - - # Print the extracted output values for the specified parameter - print(f"Extracted output (at last time step) : {parameter_name} ") - - return parameter_values - -def extract_solidification_time(cases_data: list, phi_crit: float = 0.005): - """ - Extract the solidification time at the time step where the condition - 'Phi_global' < phi_crit is first satisfied for each planet. - - Parameters - ---------- - cases_data : list - List of dictionaries containing simulation data. - - phi_crit : float - The critical melt fraction value below which a planet is considered solidified. - A typical value is 0.005. - - Returns - ------- - solidification_times : list - A list containing the solidification times for all solidified planets of the grid. - If a planet never solidifies, it will have a NaN in the list. - """ - - solidification_times = [] - columns_printed = False - - for i, case in enumerate(cases_data): - df = case['output_values'] - # Check if the required columns exist in the dataframe - if df is None: - solidification_times.append(np.nan) # Append NaN if no output values - continue - - if 'Phi_global' in df.columns and 'Time' in df.columns: - condition = df['Phi_global'] < phi_crit - if condition.any(): - first_index = condition.idxmax() - solid_time = df.loc[first_index, 'Time'] # Get the index of the time at which the condition is first satisfied - solidification_times.append(solid_time) - else: - solidification_times.append(np.nan) # Append NaN if condition is not satisfied - else: - if not columns_printed: - print("Warning: 'Phi_global' and/or 'Time' columns not found in some cases.") - print(f"Available columns: {', '.join(df.columns)}") - columns_printed = True - solidification_times.append(np.nan) # Append NaN if columns are missing - - # Count the number of cases with a status = '10 Completed (solidified)' - status_10_cases = [case for case in cases_data if (case.get('status') or '').strip() == 'Completed (solidified)'] - completed_count = len(status_10_cases) - # Count only valid solidification times (non-NaN) - valid_solidification_times = [time for time in solidification_times if not np.isnan(time) and time > 0.0] - valid_solidified_count = len(valid_solidification_times) - - print('-----------------------------------------------------------') - print(f"Extracted solidification times (Phi_global < {phi_crit})") - print(f"→ Found {valid_solidified_count} valid solidified cases based on Phi_global") - print(f"→ Found {completed_count} cases with status 'Completed (solidified)' ") - # Check if the number of valid solidified cases matches the number of cases with status '10 Completed (solidified)' in the grid, to be sure the extraction is correct - if valid_solidified_count != completed_count: - print("WARNING: The number of valid solidified planets does not match the number of planets with status: '10 Completed (solidified)'") - # To debug, the user can uncomment the following lines to print the solidification times for all plaent with '10 Completed (solidified)' - # print("\nChecking final Phi_global values for all status '10 Completed (solidified)' cases:") - # for i, case in enumerate(status_10_cases): - # df = case['output_values'] - # if 'Phi_global' in df.columns: - # final_phi = df['Phi_global'].iloc[-1] - # print(f"[Status Case {i}] Final Phi_global = {final_phi}") - # else: - # print(f"[Status Case {i}] Phi_global column missing.") - else: - print("Solidified planets count matches the number of planets with status: 'Completed (solidified)'.") - print('-----------------------------------------------------------') - - return solidification_times - -def save_grid_data_to_csv(grid_name: str, cases_data: list, grid_parameters: dict, case_params: Dict[int, Dict[str, Any]], - extracted_value: dict, output_to_extract: list, output_dir: Path, phi_crit: float = 0.005): - """ - Save all simulation information (status, grid parameters, output values) into a CSV file - for later analysis (using plot_grid.py to make plots for instance). - - Parameters - ---------- - grid_name : str - Name of the grid. - - cases_data : list - List of dictionaries containing simulation data. - - grid_parameters : dict - A dictionary where each key is a parameter name, and its corresponding values used for the entire grid is a list. - - case_params : dict - Dictionary containing each case number with the name and values of the tested parameters in this grid. - - extracted_value : dict - A list containing the extracted values of the specified parameter for all cases of the grid. - - output_to_extract : list - List of output values extracted from each simulation in the grid. - - output_dir : Path - The directory where the generated CSV file will be saved. If the directory does not exist, - it will be created. - - phi_crit : float - The critical melt fraction value used to determine if a planet is considered solidified. - A typical value is 0.005. - """ - # Check if the output directory exist, if not create it - output_dir = Path(output_dir) - output_dir.mkdir(parents=True, exist_ok=True) - - # CSV file path - csv_file = output_dir / f"{grid_name}_extracted_data.csv" - - # Write CSV file - with open(csv_file, 'w', newline='') as csvfile: - writer = csv.writer(csvfile) - - # Header block - writer.writerow(["#############################################################################################################"]) - writer.writerow([f"Grid name: {grid_name}"]) - writer.writerow([f"Total number of cases: {len(cases_data)}"]) - writer.writerow([f"Dimension of the grid: {len(grid_parameters)}"]) - writer.writerow([f"phi_crit: {phi_crit}"]) - writer.writerow(["----------------------------------------------------------"]) - writer.writerow([" Grid Parameters"]) - writer.writerow(["----------------------------------------------------------"]) - max_label_length = max(len(param) for param in grid_parameters.keys()) - for param, values in grid_parameters.items(): - aligned_param = f"{param: <{max_label_length}}" - values_str = f"[{', '.join(map(str, values))}]" - writer.writerow([f"{aligned_param}: {values_str}"]) - writer.writerow(["----------------------------------------------------------"]) - writer.writerow(["Extracted output values:" f"[{', '.join(extracted_value.keys())}]"]) - writer.writerow(["----------------------------------------------------------"]) - writer.writerow([f"| Case number | Status | {' | '.join(grid_parameters.keys())} | {' | '.join(extracted_value.keys())} |"]) - writer.writerow(["#############################################################################################################"]) - writer.writerow([]) - - # CSV table header - writer.writerow(["Case number", "Status"] + list(grid_parameters.keys()) + list(extracted_value.keys())) - - # Write data rows - for case_index, case_data in enumerate(cases_data): - status = case_data.get('status', 'Unknown') or 'Unknown' - row = [case_index, f"'{status}'"] - # Add grid parameters values for each case - case_param_values = case_params.get(case_index, {}) - for param in grid_parameters.keys(): - row.append(case_param_values.get(param, 'NA')) - # Add extracted‐output values (now every list is length=num_cases) - for param in extracted_value.keys(): - value_list = extracted_value[param] - row.append(value_list[case_index]) - - # Write the row to the CSV file - writer.writerow(row) - - print(f"Extracted data has been successfully saved to {csv_file}.") - print('-----------------------------------------------------------') - -def save_error_running_cases(grid_name: str, cases_data: List[Dict[str, Any]], grid_parameters: Dict[str, List[Any]], case_params: Dict[int, Dict[str, Any]], extracted_value: Dict[str, List[Any]], output_to_extract: List[str], output_dir: Path,) -> None: - """ - Scan through `cases_data` and pick out any case whose status is 'running' - or starts with 'error', then write those rows (with identical columns) - to a separate CSV named '{grid_name}_error_running_cases.csv'. - """ - # 1) Find indices with status starting with 'error' (case‐insensitive) and exactly 'running' - error_indices = set() - running_indices = set() - - for idx, case_data in enumerate(cases_data): - status_raw = case_data.get("status", "") - status = status_raw.strip().lower() - if status.startswith("error"): - error_indices.add(idx) - elif status == "running": - running_indices.add(idx) - # Combine both into a single set of “status‐based” bad indices - status_bad_indices = error_indices.union(running_indices) - - # 2) Find indices with any missing ("NA" or None) in extracted outputs - na_indices = set() - for param, vals in extracted_value.items(): - for idx, val in enumerate(vals): - s = str(val).strip().upper() - if s == "" or s == "NA" or s == "NONE": - na_indices.add(idx) - - # 3) Union of (error/running) and (missing) indices - bad_indices = sorted(status_bad_indices.union(na_indices)) - if not bad_indices: - print("→ No 'error'/'running' or missing‐outputs cases found; skipping error/running CSV.") - return - - # 4) Build output path - err_csv = output_dir + f"{grid_name}_error_running_cases.csv" - - with open(err_csv, mode="w", newline="") as csvfile: - writer = csv.writer(csvfile) - - # --- Header block --- - writer.writerow([ - "############################################################" - "################################" - ]) - writer.writerow([f"Grid name: {grid_name}"]) - writer.writerow([f"Total number of selected cases: {len(bad_indices)}"]) - writer.writerow([f"Number of 'error…' cases: {len(error_indices)}"]) - writer.writerow([f"Number of 'running' cases: {len(running_indices)}"]) - writer.writerow([f"Number of missing‐output (NA or None): {len(na_indices)}"]) - writer.writerow(["----------------------------------------------------------"]) - # Column names - writer.writerow( - ["Case number", "Status"] - + list(grid_parameters.keys()) - + list(extracted_value.keys()) - ) - writer.writerow([ - "############################################################" - "################################" - ]) - writer.writerow([]) - - # --- Data rows for each bad index --- - for case_index in bad_indices: - status = cases_data[case_index].get("status", "Unknown") or "Unknown" - row = [case_index, f"'{status}'"] - - # Grid‐parameter columns - for param in grid_parameters.keys(): - row.append(case_params.get(case_index, {}).get(param, "NA")) - - # Extracted‐output columns - for param in extracted_value.keys(): - vals = extracted_value[param] - if case_index < len(vals): - row.append(vals[case_index]) - else: - row.append("NA") - - writer.writerow(row) - - print(f"→ Error/Running (and missing‐output) CSV saved to: {err_csv}") - -def save_completed_cases( - grid_name: str, - cases_data: List[Dict[str, Any]], - grid_parameters: Dict[str, List[Any]], - case_params: Dict[int, Dict[str, Any]], - extracted_value: Dict[str, List[Any]], - output_to_extract: List[str], - output_dir: Path, -) -> None: - """ - Save all cases whose status starts with 'Completed' into a separate CSV file - named '{grid_name}_filtered.csv'. - - Parameters - ---------- - grid_name : str - Name of the grid. - - cases_data : list - List of dictionaries containing simulation data. - - grid_parameters : dict - A dictionary where each key is a parameter name, and its corresponding values - used for the entire grid is a list. - - case_params : dict - Dictionary containing each case number with the name and values of the - tested parameters in this grid. - - extracted_value : dict - A dictionary containing the extracted values of the specified parameter - for all cases of the grid. - - output_to_extract : list - List of output values extracted from each simulation in the grid. - - output_dir : Path - Directory where the generated CSV file will be saved. - Created if it does not exist. - """ - output_dir = Path(output_dir) - output_dir.mkdir(parents=True, exist_ok=True) - - # Identify indices where status starts with "Completed" (case-insensitive, trimmed) - completed_indices = [] - for idx, case_data in enumerate(cases_data): - status_raw = case_data.get("status", "") - # Normalize status string safely - if not isinstance(status_raw, str): - continue - status_clean = status_raw.strip().lower().replace('\xa0', ' ') # replace non-breaking spaces if any - if status_clean.startswith("completed"): - completed_indices.append(idx) - - if not completed_indices: - print("→ No cases with status starting with 'Completed' found; skipping filtered CSV.") - return - - # Build output path - filtered_csv = output_dir / f"{grid_name}_filtered.csv" - - with open(filtered_csv, mode="w", newline="") as csvfile: - writer = csv.writer(csvfile) - - # Header block - writer.writerow(["#############################################################################################################"]) - writer.writerow([f"Grid name: {grid_name}"]) - writer.writerow([f"Total number of 'Completed*' cases: {len(completed_indices)}"]) - writer.writerow(["----------------------------------------------------------"]) - writer.writerow( - ["Case number", "Status"] - + list(grid_parameters.keys()) - + list(extracted_value.keys()) - ) - writer.writerow(["#############################################################################################################"]) - writer.writerow([]) - - # Write each completed case row - for case_index in completed_indices: - status = cases_data[case_index].get("status", "Unknown") or "Unknown" - row = [case_index, f"'{status}'"] - - # Add grid parameter values - for param in grid_parameters.keys(): - row.append(case_params.get(case_index, {}).get(param, "NA")) - - # Add extracted outputs - for param in extracted_value.keys(): - vals = extracted_value[param] - if case_index < len(vals): - row.append(vals[case_index]) - else: - row.append("NA") - - writer.writerow(row) - - print(f"→ Completed* cases CSV saved to: {filtered_csv}") - -##### Functions for plotting grid data results ##### - -def load_extracted_data(data_path : str | Path, grid_name :str): - - """ - Load extracted data from the CSV file generated with post_processing.py, returning a DataFrame for plotting. - - Parameters - ---------- - data_path : str or Path - Path to the directory containing the CSV file with the extracted data. - grid_name : str - Name of the grid - - Returns - ------- - df : pd.DataFrame - DataFrame with the extracted simulation data. - - grid_params : dict - Dictionary of grid parameter names and their value lists. - - extracted_outputs : list of str - List of extracted output variable names. - """ - - csv_file = os.path.join(data_path, f"{grid_name}_extracted_data.csv") - if not os.path.exists(csv_file): - raise FileNotFoundError(f"CSV file not found at: {csv_file}") - - with open(csv_file, 'r') as f: - lines = f.readlines() - - data_start_idx = None - grid_params = {} - extracted_outputs = [] - grid_dimension = None - - # Get grid dimension - for i, line in enumerate(lines): - if line.strip().startswith("Dimension of the grid"): - grid_dimension = int(line.split(":")[-1].strip()) - break - - if grid_dimension is None: - raise ValueError("Could not find 'Dimension of the grid' in the CSV file.") - - # Extract grid parameters - for i, line in enumerate(lines): - if "Grid Parameters" in line: - grid_param_start_idx = i + 2 # Skip divider after header - break - else: - raise ValueError("Could not find 'Grid Parameters' section in the CSV file.") - - for line in lines[grid_param_start_idx:]: - line = line.strip().strip('"') # Remove quotes and whitespace - - # Stop at the next divider or output section - if line.startswith("----------------------------------------------------------") or "Extracted output values" in line: - break - - if ':' in line: - param_name, param_values = line.split(":", 1) - param_name = param_name.strip() - param_values = param_values.strip().strip("[]").split(",") - param_values = [val.strip() for val in param_values] - param_values = [float(val) if is_float(val) else val for val in param_values] - grid_params[param_name] = param_values - - # Check dimensions - if len(grid_params) != grid_dimension: - raise ValueError(f"Mismatch: Expected {grid_dimension} grid parameters, found {len(grid_params)}.") - - # Extract output names - for line in lines: - line = line.strip().strip('"') # Remove quotes and whitespace - - if line.startswith("Extracted output values"): - if ":" in line: - _, outputs_part = line.split(":", 1) - outputs_part = outputs_part.strip().strip("[]") - extracted_outputs = [s.strip() for s in outputs_part.split(",")] - break - - # Find start of actual data - for i, line in enumerate(lines): - if line.strip().startswith("Case number"): - data_start_idx = i - break - - if data_start_idx is None: - raise ValueError("Could not find CSV header line starting with 'Case number'.") - - # Extract the actual data section and read it - data_section = ''.join(lines[data_start_idx:]) - df = pd.read_csv(StringIO(data_section)) - - return df, grid_params, extracted_outputs - -def is_float(value): - """Helper function to check if a string can be converted to a float.""" - try: - float(value) # Try converting string to float - return True - except ValueError: - return False # Fail not a valid float - -def plot_dir_exists(plot_dir: Path): - - """ - Check if the plot directory exists. If not, create it. - - Parameters - ---------- - plot_dir : Path - Path object pointing to the desired plot directory. - """ - plot_dir = Path(plot_dir) - if not plot_dir.exists(): - plot_dir.mkdir(parents=True, exist_ok=True) - print(f"Created plot directory: {plot_dir}") - -def group_output_by_parameter(df,grid_parameters,outputs): - """ - Groups output values (like solidification times) by a specific grid parameter. - - Parameters - ---------- - df : pd.DataFrame - DataFrame containing simulation results including value of the grid parameter and the corresponding extracted output. - - grid_parameters : str - Column name of the grid parameter to group by (like 'escape.zephyrus.Pxuv'). - - outputs : str - Column name of the output to extract (like 'solidification_time'). - - Returns - ------- - dict - Dictionary where each key is of the form '[output]_per_[parameter]', and each value is a dict {param_value: [output_values]}. - """ - grouped = {} - - for param in grid_parameters: - for output in outputs: - key_name = f"{output}_per_{param}" - value_dict = {} - for param_value in df[param].dropna().unique(): - subset = df[df[param] == param_value] - output_values = subset[output].replace([np.inf, -np.inf], np.nan) - output_values = output_values.dropna() - output_values = output_values[output_values > 0] # Remove zeros and negatives - - value_dict[param_value] = output_values - - grouped[key_name] = value_dict - - return grouped - -def plot_grid_status(cases_data, plot_dir: Path, grid_name: str, status_colors: dict = None): - """ - Plot the status of simulations from the PROTEUS grid with improved x-axis readability. - - Parameters - ---------- - cases_data : list or DataFrame - Contains the status of all simulations from the grid. - - plot_dir : Path - Path to the plots directory. - - grid_name : str - Name of the grid, used for the plot title. - - status_colors : dict, optional - A dictionary mapping statuses to specific colors. If None, a default palette is used. - """ - - # Extract and clean statuses - statuses = cases_data['Status'].fillna('unknown').astype(str) - status_counts = statuses.value_counts().sort_values(ascending=False) - - # Set colors for the bars - if status_colors: - formatted_status_keys = [s.replace(" ", "\n") for s in status_counts.index] - palette = {formatted: status_colors.get(original, 'gray') - for formatted, original in zip(formatted_status_keys, status_counts.index)} - else: - palette = sns.color_palette("Accent", len(status_counts)) - formatted_status_keys = [s.replace(" (", " \n (") for s in status_counts.index] - palette = dict(zip(formatted_status_keys, palette)) - - # Prepare dataframe for plotting - plot_df = pd.DataFrame({ - 'Status': formatted_status_keys, - 'Count': status_counts.values - }) - - plt.figure(figsize=(10, 7)) - ax = sns.barplot( - data=plot_df, - x='Status', - y='Count', - hue='Status', - palette=palette, - dodge=False, - edgecolor='black' - ) - - # Remove legend if it was created - if ax.legend_: - ax.legend_.remove() - - # Add value labels above bars - total_simulations = len(cases_data) - for i, count in enumerate(status_counts.values): - percentage = (count / total_simulations) * 100 - ax.text( - i, count + 1, - f"{count} ({percentage:.1f}%)", - ha='center', va='bottom', fontsize=15 - ) - - # Boxed total in upper right - plt.gca().text( - 0.97, 0.94, - f"Total number of simulations : {total_simulations}", - transform=plt.gca().transAxes, - ha='right', va='top', - fontsize=14, - #bbox=dict(boxstyle="round,pad=0.5", facecolor="white", edgecolor="black") - ) - - plt.grid(alpha=0.2, axis='y') - plt.title(f"Grid statuses summary : {grid_name}", fontsize=16) - plt.xlabel("Simulation statuses", fontsize=16) - plt.ylabel("Number of simulations", fontsize=16) - plt.yticks(fontsize=12) - plt.xticks(fontsize=12) - plt.tight_layout() - output_path = plot_dir + 'grid_statuses_summary.png' - plt.savefig(output_path, dpi=300) - plt.close() - - print("Summary plot of grid statuses is available") - -def ecdf_single_plots(grid_params: dict, grouped_data: dict, param_settings: dict, output_settings: dict, plots_path: str): - """ - Generates and saves one ECDF plot per combination of output and input parameter. - - Parameters - ---------- - - grid_params : dict - A mapping from parameter names (e.g. "orbit.semimajoraxis") to arrays/lists of tested values. - - grouped_data : dict - Dictionary where each key is of the form '[output]_per_[parameter]', and each value is a dict {param_value: [output_values]}. - - param_settings : dict - For each input-parameter key, a dict containing: - - "label": label of the colormap for the corresponding input parameter - - "colormap": a matplotlib colormap (e.g. mpl.cm.plasma) - - "log_scale": bool, whether to color-normalize on a log scale - - output_settings : dict - For each output key, a dict containing: - - "label": label of the x-axis for the corresponding output quantity - - "log_scale": bool, whether to plot the x-axis on log scale - - "scale": float, a factor to multiply raw values by before plotting - - plots_path : str - Path to the grid where to create "single_plots_ecdf" and save all .png plots - """ - # Create output directory if not already there - output_dir = os.path.join(plots_path, "single_plots_ecdf") - os.makedirs(output_dir, exist_ok=True) - - for output_name, out_settings in output_settings.items(): - for param_name, settings in param_settings.items(): - tested_param = grid_params.get(param_name, []) - if len(tested_param) <= 1: - # Skip if only a single value was tested - continue - - # Plot settings for this input parameter - param_label = settings["label"] - cmap = settings["colormap"] - color_log = settings.get("log_scale", False) - - # Plot settings for this output - x_label = out_settings["label"] - x_log = out_settings.get("log_scale", False) - scale = out_settings.get("scale", 1.0) - - # Determine if the parameter array is numeric - is_numeric = np.issubdtype(np.array(tested_param).dtype, np.number) - - if is_numeric: - # Continuous colormap: Normalize either linearly or in log-space - if color_log: - norm = mpl.colors.LogNorm(vmin=min(tested_param), vmax=max(tested_param)) - else: - norm = mpl.colors.Normalize(vmin=min(tested_param), vmax=max(tested_param)) - def color_func(v): - return cmap(norm(v)) - colorbar_needed = True - else: - # Categorical colormap: map each unique value to one color - unique_vals = sorted(set(tested_param)) - cats_cmap = mpl.colormaps.get_cmap(cmap.name).resampled(len(unique_vals)) - color_map = {val: cats_cmap(i) for i, val in enumerate(unique_vals)} - def color_func(val): - return color_map[val] - colorbar_needed = False - - # Create a new figure & axes - fig, ax = plt.subplots(figsize=(10, 6)) - data_key = f"{output_name}_per_{param_name}" - - for val in tested_param: - # Skip if no data for this value - if val not in grouped_data.get(data_key, {}): - continue - raw = np.array(grouped_data[data_key][val]) * scale - sns.ecdfplot( - data=raw, - log_scale=x_log, - stat="proportion", - color=color_func(val), - linewidth=3, - ax=ax - ) - - # Axis formatting - ax.set_xlabel(x_label, fontsize=14) - ax.set_ylabel("Normalized cumulative fraction of simulations", fontsize=14) - ax.grid(alpha=0.1) - - # Colorbar or legend - if colorbar_needed: - sm = mpl.cm.ScalarMappable(cmap=cmap, norm=norm) - cbar = fig.colorbar(sm, ax=ax, pad=0.02, aspect=30) - cbar.set_label(param_label, fontsize=14) - # Set ticks at each tested parameter value - ticks = sorted(set(tested_param)) - cbar.set_ticks(ticks) - else: - # Build a legend for categorical values - unique_vals = sorted(set(tested_param)) - handles = [ - mpl.lines.Line2D([0], [0], color=color_map[val], lw=3, label=str(val)) - for val in unique_vals - ] - ax.legend(handles=handles, loc="lower right", title=param_label) - - # Save and close - fname = f"ecdf_{output_name}_per_{param_name.replace('.', '_')}.png" - plt.tight_layout() - plt.savefig(os.path.join(output_dir, fname), dpi=300) - plt.close() - - print(f"All single ECDF plots are available at {output_dir}") - -def ecdf_grid_plot(grid_params: dict, grouped_data: dict, param_settings: dict, output_settings: dict, plots_path: str): - """ - Creates a grid of ECDF plots where each row corresponds to one input parameter - and each column corresponds to one output. Saves the resulting figure as a PNG. - - Parameters - ---------- - - grid_params : dict - A mapping from parameter names (e.g. "orbit.semimajoraxis") to arrays/lists of tested values. - - grouped_data : dict - Dictionary where each key is of the form '[output]_per_[parameter]', and each value is a dict {param_value: [output_values]}. - - param_settings : dict - For each input-parameter key, a dict containing: - - "label": label of the colormap for the corresponding input parameter - - "colormap": a matplotlib colormap (e.g. mpl.cm.plasma) - - "log_scale": bool, whether to color-normalize on a log scale - - output_settings : dict - For each output key, a dict containing: - - "label": label of the x-axis for the corresponding output quantity - - "log_scale": bool, whether to plot the x-axis on log scale - - "scale": float, a factor to multiply raw values by before plotting - - plots_path : str - Path to the grid where to create "single_plots_ecdf" and save all .png plots - """ - # Ensure output directory exists - os.makedirs(plots_path, exist_ok=True) - - # List of parameter names (rows) and output names (columns) - param_names = list(param_settings.keys()) - out_names = list(output_settings.keys()) - - # Create subplot grid: rows = parameters, columns = outputs - n_rows = len(param_names) - n_cols = len(out_names) - fig, axes = plt.subplots(n_rows, n_cols, figsize=(4 * n_cols, 2.5 * n_rows), squeeze=False) - - # Loop through parameters (rows) and outputs (columns) - for i, param_name in enumerate(param_names): - tested_param = grid_params.get(param_name, []) - if not tested_param: - print(f"⚠️ Skipping {param_name} — no tested values found in grid_params") - continue - - settings = param_settings[param_name] - - # Determine coloring - is_numeric = np.issubdtype(np.array(tested_param).dtype, np.number) - if is_numeric: - vmin, vmax = min(tested_param), max(tested_param) - if vmin == vmax: - # avoid log/normalize errors with constant values - vmin, vmax = vmin - 1e-9, vmax + 1e-9 - if settings.get("log_scale", False): - norm = mpl.colors.LogNorm(vmin=vmin, vmax=vmax) - else: - norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax) - def color_func(v): - return settings["colormap"](norm(v)) - colorbar_needed = True - else: - unique_vals = sorted(set(tested_param)) - cmap = mpl.colormaps.get_cmap(settings["colormap"]).resampled(len(unique_vals)) - color_map = {val: cmap(j) for j, val in enumerate(unique_vals)} - def color_func(v): - return color_map[v] - colorbar_needed = False - - for j, output_name in enumerate(out_names): - ax = axes[i][j] - out_settings = output_settings[output_name] - - # Add panel number in upper-left corner - panel_number = i * n_cols + j + 1 # number of panels left-to-right, top-to-bottom - ax.text( - 0.02, 0.98, # relative position in axes coordinates - str(panel_number), # text to display - transform=ax.transAxes, # use axis-relative coordinates - fontsize=18, - fontweight='bold', - va='top', # vertical alignment - ha='left', # horizontal alignment - color='black' - ) - - # Plot one ECDF per tested parameter value - for val in tested_param: - data_key = f"{output_name}_per_{param_name}" - if val not in grouped_data.get(data_key, {}): - continue - raw = np.array(grouped_data[data_key][val]) * out_settings.get("scale", 1.0) - # Plot ECDf if output == df['H_kg_atm'] then plot only values > 1e10 AND psurf > 1 bar - if output_name.endswith('_kg_atm'): - raw = np.clip(raw, 1e15, None) - elif output_name.endswith('P_surf'): - raw = np.clip(raw, 1, None) - else: - raw = raw - - sns.ecdfplot( - data=raw, - log_scale=out_settings.get("log_scale", False), - color=color_func(val), - linewidth=4, - linestyle='-', - ax=ax - ) - - # Configure x-axis labels, ticks, grids - if i == n_rows - 1: - ax.set_xlabel(out_settings["label"], fontsize=22) - ax.xaxis.set_label_coords(0.5, -0.3) - ax.tick_params(axis='x', labelsize=22) - else: - ax.tick_params(axis='x', labelbottom=False) - - # Configure y-axis (shared label added later) - if j == 0: - ax.set_ylabel("") - ticks = [0.0, 0.5, 1.0] - ax.set_yticks(ticks) - ax.tick_params(axis='y', labelsize=22) - else: - ax.set_ylabel("") - ax.set_yticks(ticks) - ax.tick_params(axis='y', labelleft=False) - - ax.grid(alpha=0.4) - - # After plotting all outputs for this parameter (row), add colorbar or legend - if colorbar_needed: - sm = mpl.cm.ScalarMappable(cmap=settings["colormap"], norm=norm) - # attach the colorbar to the right‐most subplot in row i: - rightmost_ax = axes[i, -1] - cbar = fig.colorbar(sm,ax=rightmost_ax,pad=0.03,aspect=10) - cbar.set_label(settings["label"], fontsize=24) - # This is for plot 0.194Msun - # if param_name == "orbit.semimajoraxis": - # cbar.ax.yaxis.set_label_coords(9.5, 0.5) - # else: - # cbar.ax.yaxis.set_label_coords(6, 0.5) - # This is for 1Msun - cbar.ax.yaxis.set_label_coords(6, 0.5) - ticks = sorted(set(tested_param)) - cbar.set_ticks(ticks) - cbar.ax.tick_params(labelsize=22) - else: - handles = [mpl.lines.Line2D([0], [0], color=color_map[val], lw=4, label=str(val)) for val in unique_vals] - ax.legend(handles=handles, fontsize=24,bbox_to_anchor=(1.01, 1), loc='upper left') - - # Add a single, shared y-axis label - fig.text(0.04, 0.5, 'Normalized cumulative fraction of simulations', va='center', rotation='vertical', fontsize=40) - - # Tweak layout and save - plt.tight_layout(rect=[0.08, 0.02, 1, 0.97]) - filename = "ecdf_grid_plot.png" - out_path = os.path.join(plots_path, filename) - fig.savefig(out_path, dpi=300) - plt.close(fig) - - #print(f"Grid ECDF plot saved at {out_path}") diff --git a/src/proteus/grid/run_grid_analysis.py b/src/proteus/grid/run_grid_analysis.py deleted file mode 100644 index e63d64eea..000000000 --- a/src/proteus/grid/run_grid_analysis.py +++ /dev/null @@ -1,213 +0,0 @@ -# This script is used to post-process a grid of PROTEUS simulations. -# It extracts the output values from the simulation cases, saves them to a CSV file, -# and generates plots for the grid statuses based on the extracted data. It also generates -# ECDF single plots and a big grid plot for the input parameters vs extracted outputs. - -# The users need to specify the path to the grid directory and the grid name. (see the example below) -# He also needs to specify the output columns to extract from the 'runtime_helpfile.csv' of each case and -# update the related plotting variables accordingly. This can be done in the `run_grid_analyze` function (see below). -from __future__ import annotations - -import os - -import matplotlib.cm as cm - -from proteus.grid.post_processing_grid import ( - ecdf_grid_plot, - ecdf_single_plots, - extract_grid_output, - extract_solidification_time, - get_grid_parameters, - group_output_by_parameter, - load_extracted_data, - load_grid_cases, - plot_dir_exists, - plot_grid_status, - save_completed_cases, - save_error_running_cases, - save_grid_data_to_csv, -) - - -def run_grid_analyze(path_to_grid: str, grid_name: str, update_csv: bool = True): - """ - Run the post-processing of a PROTEUS grid, extracting simulation data in a CSV file and generating plots. - - Parameters - ---------- - path_to_grid : str - Path to the directory containing the grid folder. - - grid_name : str - Name of the grid folder to process. - - update_csv : bool, optional - If True, the CSV file will be updated or created. If False, it will skip the CSV extraction step if the file already exists. - """ - - # ------------------------------------------------------------ - # 1) Build all the folder/filename strings - # ------------------------------------------------------------ - grid_path = os.path.join(path_to_grid, grid_name) + os.sep - postprocess_path = os.path.join(grid_path, "post_processing_grid") + os.sep - data_dir = os.path.join(postprocess_path, "extracted_data") + os.sep - os.makedirs(data_dir, exist_ok=True) - - - plots_path = os.path.join(postprocess_path, "plots_grid") + os.sep - plot_dir_exists(plots_path) - - csv_file = os.path.join(data_dir, f"{grid_name}_extracted_data.csv") - - # ------------------------------------------------------------ - # 2) Define which outputs to pull from each case's runtime_helpfile.csv - # ------------------------------------------------------------ - # User choose the output to extract from 'runtime_helpfile.csv' of each case (always the [-1] element of the column). - # For the units, check the file src/proteus/utils/coupler.py, lines 348-400 (keys) - - output_to_extract = ['Time','esc_rate_total','Phi_global','P_surf','T_surf','T_eqm','M_planet','R_obs','p_xuv', 'R_xuv', 'F_xuv', 'atm_kg_per_mol', - 'H_kg_atm','O_kg_atm','C_kg_atm','N_kg_atm','S_kg_atm', 'Si_kg_atm', 'Mg_kg_atm', 'Fe_kg_atm', 'Na_kg_atm', - 'H2O_kg_atm','CO2_kg_atm', 'O2_kg_atm', 'H2_kg_atm', 'CH4_kg_atm', 'CO_kg_atm', 'N2_kg_atm', 'NH3_kg_atm', - 'S2_kg_atm', 'SO2_kg_atm', 'H2S_kg_atm', 'SiO_kg_atm','SiO2_kg_atm', 'MgO_kg_atm', 'FeO2_kg_atm', 'runtime'] - - # ------------------------------------------------------------ - # STEP 1: CSV extraction (only if update_csv=True or CSV missing) - # ------------------------------------------------------------ - - if update_csv or not os.path.isfile(csv_file): - - print('-----------------------------------------------------------') - print(f'Step 1 : Post-processing the grid {grid_name} ...') - print('-----------------------------------------------------------') - - extracted_value = {} # Initialize the dictionary to store extracted values - - cases_data = load_grid_cases(grid_path) # Load all simulation cases - grid_parameters, case_init_param = get_grid_parameters(grid_path) # Extract grid parameters - - for param in output_to_extract: - extracted_value[param] = extract_grid_output(cases_data, param) # Extract output values - - solidification_times = extract_solidification_time(cases_data) # Extract the solidification time - extracted_value['solidification_time'] = solidification_times # Add solidification time to the extracted_values - - save_grid_data_to_csv(grid_name, cases_data, grid_parameters, case_init_param, - extracted_value, solidification_times, data_dir) # Save all the extracted data to a CSV file - print(f'--> CSV file written to: {csv_file}') - - save_error_running_cases( - grid_name=grid_name, - cases_data=cases_data, - grid_parameters=grid_parameters, - case_params=case_init_param, - extracted_value=extracted_value, - output_to_extract=output_to_extract, - output_dir=data_dir) - - save_completed_cases( - grid_name=grid_name, - cases_data=cases_data, - grid_parameters=grid_parameters, - case_params=case_init_param, - extracted_value=extracted_value, - output_to_extract=output_to_extract, - output_dir=data_dir) - else: - print('-----------------------------------------------------------') - print(f'Step 1 : Skipped (CSV already exists at {csv_file})') - print('-----------------------------------------------------------') - - - # --------------------------------------------- - # STEP 2: Load data from CSV and make plots - # --------------------------------------------- - - print('-----------------------------------------------------------') - print(f'Step 2 : Loading data and plotting for grid {grid_name} ...') - print('-----------------------------------------------------------') - - - df, grid_params, extracted_outputs = load_extracted_data(data_dir, grid_name) # Load the data - grouped_data = group_output_by_parameter(df, grid_params, extracted_outputs) # Group extracted outputs by grid parameters - - # Histogram of grid statuses - plot_grid_status(df, plots_path, grid_name) # Plot the grid statuses in an histogram - - # Single ECDF Plots - # The user needs to comment the parameters he didn't used in the grid/ add the ones non-listed here. Same for the outputs. - param_settings_single = { - "orbit.semimajoraxis": {"label": "Semi-major axis [AU]", "colormap": cm.plasma, "log_scale": False}, - "escape.zephyrus.Pxuv": {"label": r"$P_{XUV}$ [bar]", "colormap": cm.cividis, "log_scale": True}, - "escape.zephyrus.efficiency": {"label": r"Escape efficiency factor $\epsilon$", "colormap": cm.spring, "log_scale": False}, - "outgas.fO2_shift_IW": {"label": r"$\log_{10}(fO_2)$ [IW]", "colormap": cm.coolwarm, "log_scale": False}, - "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.rainbow, "log_scale": False}, - "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.copper, "log_scale": False}, - "delivery.elements.H_oceans": {"label": "[H] [Earth's oceans]", "colormap": cm.winter, "log_scale": False}, - # "delivery.elements.SH_ratio": {"label": "S/H ratio", "colormap": cm.autumn, "log_scale": False}, - # "escape.reservoir": {"label": "Reservoir", "colormap": cm.viridis, "log_scale": False} - # "escape.module": {"label": "Escape module", "colormap": cm.RdYlGn, "log_scale": False} - } - output_settings_single = { - 'esc_rate_total': {"label": "Total escape rate [kg/s]", "log_scale": True, "scale": 1.0}, - 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, - 'P_surf': {"label": "Surface pressure [bar]", "log_scale": True, "scale": 1.0}, - 'atm_kg_per_mol': {"label": "Mean molecular weight (MMW) [g/mol]", "log_scale": False, "scale": 1000.0}, - 'solidification_time': {"label": "Solidification time [yr]", "log_scale": True, "scale": 1.0}, - 'T_surf': {"label": r"T$_{surf}$ [K]", "log_scale": False, "scale": 1.0}, - 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24}, - 'H_kg_atm': {"label": r"[H$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, - 'F_xuv': {"label": r"F$_{XUV}$ [W/m$^2$]", "log_scale": True, "scale": 1.0}, - #'T_eqm': {"label": r"T$_{eqm}$ [K]", "log_scale": False, "scale": 1.0} - } - ecdf_single_plots(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_single, output_settings=output_settings_single, plots_path=plots_path) - - # ECDF Grid Plot - # The user needs to comment the parameters he didn't used in the grid/ add the ones non-listed here. Same for the outputs. - param_settings_grid = { - #"atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.Paired, "log_scale": False}, - "orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.Spectral, "log_scale": False}, - "struct.corefrac": {"label": "CRF", "colormap": cm.Spectral, "log_scale": False}, - "atmos_clim.albedo_pl": {"label": r"$A_b$", "colormap": cm.Spectral, "log_scale": False}, - "escape.zephyrus.efficiency": {"label": r"$\rm \epsilon$", "colormap": cm.Spectral, "log_scale": False}, - "outgas.fO2_shift_IW": {"label": r"$\rm \log_{10} fO_2 [IW]$", "colormap": cm.Spectral, "log_scale": False}, - "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.Spectral, "log_scale": True}, - "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.Spectral, "log_scale": False}, - "delivery.elements.SH_ratio": {"label": "S/H ratio", "colormap": cm.Spectral, "log_scale": True}, - # "delivery.elements.SH_ratio": {"label": "S/H ratio", "colormap": cm.autumn, "log_scale": True}, - # "escape.reservoir": {"label": "Reservoir", "colormap": cm.viridis, "log_scale": False} - # "escape.module": {"label": "Escape module", "colormap": cm.RdYlGn, "log_scale": False} - } - output_settings_grid = { - 'solidification_time': {"label": "Solidification [yr]", "log_scale": True, "scale": 1.0}, - 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, - 'T_surf': {"label": r"T$_{\rm surf}$ [$10^3$ K]", "log_scale": False, "scale": 1.0/1000.0}, - #'F_xuv': {"label": r"F$_{XUV}$ [W/m$^2$]", "log_scale": True, "scale": 1.0}, - #'T_eqm': {"label": r"T$_{eqm}$ [K]", "log_scale": False, "scale": 1.0}, - 'P_surf': {"label": r"P$_{\rm surf}$ [bar]", "log_scale": True, "scale": 1.0}, - 'atm_kg_per_mol': {"label": "MMW [g/mol]", "log_scale": False, "scale": 1000.0}, - 'esc_rate_total': {"label": "Escape rate [kg/s]", "log_scale": True, "scale": 1.0}, - #'H_kg_atm': {"label": r"[H$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, - # 'M_planet': {"label": r"M$_p$ [M$_\oplus$]", "log_scale": False, "scale": 1.0/5.9722e24}, - #'O_kg_atm': {"label": r"[O$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, - #'C_kg_atm': {"label": r"[C$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, - #'N_kg_atm': {"label": r"[N$_{atm}$] [kg]", "log_scale": True, "scale": 1.0}, - #'S_kg_atm': {"label": r"[S$_{atm}$] [kg]", "log_scale": True, "scale": 1.0} - } - ecdf_grid_plot(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_grid, output_settings=output_settings_grid, plots_path=plots_path) - - # output_settings_grid_species = { - # 'atm_kg_per_mol': {"label": "MMW [g/mol]", "log_scale": False, "scale": 1000.0}, - # 'H_kg_atm': {"label": r"[H$_{\rm atm}$] [kg]", "log_scale": True, "scale": 1.0}, - # 'O_kg_atm': {"label": r"[O$_{\rm atm}$] [kg]", "log_scale": True, "scale": 1.0}, - # 'C_kg_atm': {"label": r"[C$_{\rm atm}$] [kg]", "log_scale": True, "scale": 1.0}, - # 'N_kg_atm': {"label": r"[N$_{\rm atm}$] [kg]", "log_scale": True, "scale": 1.0}, - # 'S_kg_atm': {"label": r"[S$_{\rm atm}$] [kg]", "log_scale": True, "scale": 1.0} - # } - # ecdf_grid_plot(grid_params=grid_params, grouped_data=grouped_data, param_settings=param_settings_grid, output_settings=output_settings_grid_species, plots_path=plots_path) - - print('-----------------------------------------------------------') - print(f'Plots saved in {plots_path}') - print(f'Post-processing of grid {grid_name} completed successfully!') - print('-----------------------------------------------------------') - print('If you want to change the parameters to post-process the grid, please edit the code in PROTEUS/src/proteus/grid/post_processing_grid.py') - print('-----------------------------------------------------------') diff --git a/src/proteus/grid/test_grid_analyze.py b/src/proteus/grid/test_grid_analyze.py deleted file mode 100644 index bafe4b92f..000000000 --- a/src/proteus/grid/test_grid_analyze.py +++ /dev/null @@ -1,61 +0,0 @@ -from __future__ import annotations - -import tomllib -from pathlib import Path - -import pandas as pd -import post_processing_updated as pp - -# Load configuration from TOML file -toml_file = Path("/home2/p315557/PROTEUS/input/ensembles/example.grid_analyse.toml") # Update with the correct path -with open(toml_file, "rb") as f: - cfg = tomllib.load(f) - -# Get grid path and name -grid_path = Path(cfg["grid_path"]) -grid_name = Path(pp.get_grid_name(grid_path)) -print(grid_path) -print(f"Analyzing grid: {grid_name}") - - -# Load grid data -data = pp.load_grid_cases(grid_path) -input_param_grid_per_case, tested_params_grid = pp.get_tested_grid_parameters(data, grid_path) - -# Generate summary CSV files -if cfg.get("update_csv", True): - pp.generate_summary_csv(data, input_param_grid_per_case, grid_path, grid_name) - pp.generate_completed_summary_csv(data, input_param_grid_per_case, grid_path, grid_name) - pp.generate_running_error_summary_csv(data, input_param_grid_per_case, grid_path, grid_name) - -# Plot grid status -if cfg.get("plot_status", True): - path_csv_all_simulations = grid_path / "post_processing/extracted_data" / f"{grid_name}_final_extracted_data_all.csv" - all_simulations_data_csv = pd.read_csv(path_csv_all_simulations, sep="\t") - pp.plot_grid_status(all_simulations_data_csv, grid_path, grid_name) - print("Plot grid status summary is available.") - -# Formatting for ECDF plot: group output data by parameters and prepare settings for plotting only for Completed simulations -path_csv_completed_simulations = grid_path / "post_processing/extracted_data" / f"{grid_name}_final_extracted_data_completed.csv" -completed_simulations_data_csv = pd.read_csv(path_csv_completed_simulations, sep="\t") -columns_output = list(cfg["output_variables"].keys()) -grouped_data = {} -for col in columns_output: - group = pp.group_output_by_parameter( - completed_simulations_data_csv, - tested_params_grid, - [col] - ) - grouped_data.update(group) -param_settings_grid, output_settings_grid = pp.load_ecdf_plot_settings(cfg) - -# Generate ECDF plot -if cfg.get("plot_ecdf", True): - pp.ecdf_grid_plot( - grouped_data, - param_settings_grid, - output_settings_grid, - grid_path, - grid_name - ) - print("ECDF grid plot is available.") diff --git a/src/proteus/grid/test_new_pp_grid.py b/src/proteus/grid/test_new_pp_grid.py deleted file mode 100644 index bc6a82ae1..000000000 --- a/src/proteus/grid/test_new_pp_grid.py +++ /dev/null @@ -1,64 +0,0 @@ -from __future__ import annotations - -from pathlib import Path - -import matplotlib.cm as cm -import pandas as pd -import post_processing_updated as pp - -grid_name = "escape_grid_1Msun" -grid_path = Path(f"/projects/p315557/Paper_1/DATA/Grids/{grid_name}") -# grid_name= "toi561b_grid" -# grid_path = Path(f"/projects/p315557/TOI-561b/{grid_name}") -# grid_name= "escape_on_off_janus_agni_1Msun" -# grid_path = Path(f"/projects/p315557/Paper_1/DATA/Comparison_escape_on_off/{grid_name}") - -# Load grid data -data = pp.load_grid_cases(grid_path) -# Store the input parameters for each case based on the tested parameters in the grid and store the tested parameters in the grid -input_param_grid_per_case, tested_params_grid = pp.get_tested_grid_parameters(data,grid_path) - -# Generate CSV file for all grid, only completed simulations, and running error simulations -#pp.generate_summary_csv(data,input_param_grid_per_case,grid_path,grid_name) -#pp.generate_completed_summary_csv(data,input_param_grid_per_case,grid_path,grid_name) -#pp.generate_running_error_summary_csv(data,input_param_grid_per_case,grid_path,grid_name) - -# Load the final extracted data CSV for all simulations -path_csv_all_simulations = grid_path / "post_processing/extracted_data" / f"{grid_name}_final_extracted_data_all.csv" -all_simulations_data_csv = pd.read_csv(path_csv_all_simulations, sep="\t") -# Plot grid status -pp.plot_grid_status(all_simulations_data_csv,grid_path,grid_name) - -# Load the final extracted data CSV for completed simulations -path_csv_completed_simulations = grid_path / "post_processing/extracted_data" / f"{grid_name}_final_extracted_data_completed.csv" -completed_simulations_data_csv = pd.read_csv(path_csv_completed_simulations, sep="\t") - -# Group output data by tested parameters in the grid -grouped_data = {} -columns_output = ['solidification_time', 'Phi_global', 'T_surf', 'P_surf', 'atm_kg_per_mol', 'esc_rate_total'] -for col in columns_output: - group = pp.group_output_by_parameter( - completed_simulations_data_csv, - tested_params_grid, - [col] - ) - grouped_data.update(group) - -# Define parameter and output settings for the ECDF grid plots -param_settings_grid = { - "atmos_clim.module": {"label": "Atmosphere module", "colormap": cm.viridis, "log_scale": False}, - "orbit.semimajoraxis": {"label": "a [AU]", "colormap": cm.viridis, "log_scale": False}, - "escape.zephyrus.efficiency": {"label": r"$\rm \epsilon$", "colormap": cm.viridis, "log_scale": False}, - "escape.zephyrus.Pxuv": {"label": r"P$_{\rm XUV}$ [bar]", "colormap": cm.viridis, "log_scale": True}, - "outgas.fO2_shift_IW": {"label": r"$\rm \log_{10} fO_2 [IW]$", "colormap": cm.viridis, "log_scale": False}, - "delivery.elements.CH_ratio": {"label": "C/H ratio", "colormap": cm.viridis, "log_scale": False}, - "delivery.elements.H_oceans": {"label": "[H] [oceans]", "colormap": cm.viridis, "log_scale": False}} -output_settings_grid = { - 'solidification_time': {"label": "Solidification [yr]", "log_scale": True, "scale": 1.0}, - 'Phi_global': {"label": "Melt fraction [%]", "log_scale": False, "scale": 100.0}, - 'T_surf': {"label": r"T$_{\rm surf}$ [$10^3$ K]", "log_scale": False, "scale": 1.0/1000.0}, - 'P_surf': {"label": r"P$_{\rm surf}$ [bar]", "log_scale": True, "scale": 1.0}, - 'atm_kg_per_mol': {"label": "MMW [g/mol]", "log_scale": False, "scale": 1000.0}, - 'esc_rate_total': {"label": "Escape rate [kg/s]", "log_scale": True, "scale": 1.0}} - -pp.ecdf_grid_plot(grouped_data, param_settings_grid, output_settings_grid, grid_path, grid_name) From a8238f9fb9ff7a427236fa0f76f52d809c132be4 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 17 Dec 2025 21:18:57 +0100 Subject: [PATCH 066/105] update minor typos --- src/proteus/grid/post_processing.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index e5d28f9be..8626a928d 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -738,14 +738,6 @@ def color_func(v): continue raw = np.array(grouped_data[data_key][val]) * out_settings.get("scale", 1.0) - # # Handle special cases for clipping - # if output_name.endswith('_kg_atm'): - # raw = np.clip(raw, 1e15, None) - # elif output_name.endswith('P_surf'): - # raw = np.clip(raw, 1, None) - # else: - # raw = raw - # Plot ECDF sns.ecdfplot( data=raw, @@ -881,4 +873,4 @@ def main(grid_analyse_toml_file: str | Path = None): if __name__ == "__main__": - main( "/home2/p315557/PROTEUS/input/ensembles/example.grid_analyse.toml") + main() From ebe169e540c5ab7693fdee0f64234161356dcd9b Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 17 Dec 2025 21:54:49 +0100 Subject: [PATCH 067/105] update doc --- docs/usage.md | 50 +++++++++++++++++--------------------------------- 1 file changed, 17 insertions(+), 33 deletions(-) diff --git a/docs/usage.md b/docs/usage.md index 53f833dff..ec010b645 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -145,6 +145,23 @@ Use the CLI to package the results of a grid into a zip file; e.g. for sharing o ```console proteus grid-pack -o output/grid_demo/ ``` +## Postprocessing of grid results + +Results from a PROTEUS grid can be post-processed using the `proteus grid-analyse` command. This generates ECDF plots that summarize the last time step of all simulation cases in the grid. (For more details on ECDF plots, see the [Seaborn `ecdfplot` documentation](https://seaborn.pydata.org/generated/seaborn.ecdfplot.html).) + +Before running the command, update the `example.grid_analyse.toml` file to match your grid. Specify the input parameters used in your simulations and select the output variables you want to visualize. To post-process a grid and generate ECDF plots for further analysis, run the following command: + +``` +proteus grid-analyse input/ensembles/example.grid_analyse.toml +``` + +Executing the command creates a `post_processing` folder inside your grid directory containing all post-processing outputs: + +- Extracted data: CSV files with simulation status, input parameters, and output values at the last time step are stored in: + `post_processing/extracted_data/` +- Plots: Status summaries and ECDF grid plots are saved in: + `post_processing/plots/` + ## Retrieval scheme (Bayesian optimisation) @@ -182,39 +199,6 @@ proteus observe -c [cfgfile] PROTEUS will perform this step automatically if enabled in the configuration file. -## Postprocessing of PROTEUS simulation grids - -Results from a PROTEUS grid can be post-processed using the `proteus grid-analyse` command. - -This will generate 3 CSV files, depending on simulations statuses, from the grid results and ECDF plots -(see [seaborn.ecdfplot doc](https://seaborn.pydata.org/generated/seaborn.ecdfplot.html)). -Here is the structure of the generated `post_processing` folder inside the grid directory : - -```console -your_grid_name/ - ├─case_00000 <---- case of your grid (for the structure refer to the tree from the [## Output and results] section) - ├─case_00001 - ├─... - ├─cfgs <---- folder with all the `input.toml` files for all cases - ├─logs <---- folder with all the `proteus_case_number.log` files for all cases - ├─manager.log <---- the log file of the grid - ├─slurm_dispatch.sh <---- if use_slurm=True in `grid_proteus.py`, this is the slurm file to submit with `sbatch` command - ├─post_processing <---- this folder contains all the output from this script - │ └─extracted_data <---- folder with the generated CSV file - │ └─your_grid_name_final_extracted_data.csv <---- CSV file containing the tested input parameters and extracted output from the grid - │ └─plots_grid <---- folder with the generated plots - │ ├─ecdf_grid_plot_your_grid_name.png <---- Grid plot to visualize all tested input parameters vs extracted outputs using ECDF distribution - │ └─summary_grid_statuses_your_grid_name.png <---- Summary plot of statuses for all cases of the grid - │ └─... -``` - To post-processed the grid and generate ECDF plots for further analysis, use the proteus command line interface: - -```console -proteus grid-analyse /path/to/example.grid_analyse.toml -``` - -`/path/to/example.grid_analyse.toml` is the path to the toml file containing grid analysis configuration. - ## Postprocessing of results with AGNI for multiprofile analysis PROTEUS includes a functionality to postprocess the planet's atmosphere for a number of zenith angles. From 64febce928191b441dda45aba2b480c71f25d7cf Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 17 Dec 2025 22:25:44 +0100 Subject: [PATCH 068/105] Remove folder_list file and simulations configuration files for my grid --- folder_list.txt | 958 ------------------ input/demos/escape_comparison.toml | 318 ------ input/demos/escape_grid_0_194Msun.toml | 354 ------- input/demos/escape_grid_1Msun.toml | 354 ------- input/ensembles/escape_comparison_on_off.toml | 34 - input/ensembles/escape_grid_0_194Msun.toml | 59 -- input/ensembles/escape_grid_1Msun.toml | 129 --- input/ensembles/toi561b_grid_a_0_0106AU.toml | 75 -- input/ensembles/toi561b_grid_a_0_0287AU.toml | 69 -- 9 files changed, 2350 deletions(-) delete mode 100644 folder_list.txt delete mode 100644 input/demos/escape_comparison.toml delete mode 100644 input/demos/escape_grid_0_194Msun.toml delete mode 100644 input/demos/escape_grid_1Msun.toml delete mode 100644 input/ensembles/escape_comparison_on_off.toml delete mode 100644 input/ensembles/escape_grid_0_194Msun.toml delete mode 100644 input/ensembles/escape_grid_1Msun.toml delete mode 100644 input/ensembles/toi561b_grid_a_0_0106AU.toml delete mode 100644 input/ensembles/toi561b_grid_a_0_0287AU.toml diff --git a/folder_list.txt b/folder_list.txt deleted file mode 100644 index 9f99473a5..000000000 --- a/folder_list.txt +++ /dev/null @@ -1,958 +0,0 @@ -case_000000 -case_000001 -case_000002 -case_000003 -case_000004 -case_000005 -case_000006 -case_000007 -case_000008 -case_000009 -case_000010 -case_000011 -case_000012 -case_000013 -case_000014 -case_000015 -case_000016 -case_000017 -case_000018 -case_000019 -case_000020 -case_000021 -case_000022 -case_000023 -case_000024 -case_000025 -case_000026 -case_000027 -case_000028 -case_000029 -case_000030 -case_000031 -case_000032 -case_000033 -case_000034 -case_000035 -case_000036 -case_000037 -case_000038 -case_000039 -case_000040 -case_000041 -case_000042 -case_000043 -case_000044 -case_000045 -case_000046 -case_000047 -case_000048 -case_000049 -case_000050 -case_000051 -case_000052 -case_000053 -case_000054 -case_000055 -case_000056 -case_000057 -case_000058 -case_000059 -case_000060 -case_000061 -case_000062 -case_000063 -case_000064 -case_000065 -case_000066 -case_000067 -case_000068 -case_000069 -case_000070 -case_000071 -case_000072 -case_000073 -case_000074 -case_000075 -case_000076 -case_000077 -case_000078 -case_000079 -case_000080 -case_000081 -case_000082 -case_000083 -case_000084 -case_000085 -case_000086 -case_000087 -case_000088 -case_000089 -case_000090 -case_000091 -case_000092 -case_000093 -case_000094 -case_000095 -case_000096 -case_000097 -case_000098 -case_000099 -case_000100 -case_000101 -case_000102 -case_000103 -case_000104 -case_000105 -case_000106 -case_000107 -case_000108 -case_000109 -case_000110 -case_000111 -case_000112 -case_000113 -case_000114 -case_000115 -case_000116 -case_000117 -case_000118 -case_000119 -case_000120 -case_000121 -case_000122 -case_000123 -case_000124 -case_000125 -case_000126 -case_000127 -case_000128 -case_000129 -case_000130 -case_000131 -case_000132 -case_000133 -case_000134 -case_000135 -case_000136 -case_000137 -case_000138 -case_000139 -case_000140 -case_000141 -case_000142 -case_000143 -case_000144 -case_000145 -case_000146 -case_000147 -case_000148 -case_000149 -case_000150 -case_000151 -case_000152 -case_000153 -case_000154 -case_000155 -case_000156 -case_000157 -case_000158 -case_000159 -case_000160 -case_000161 -case_000162 -case_000163 -case_000164 -case_000165 -case_000166 -case_000167 -case_000168 -case_000169 -case_000170 -case_000171 -case_000172 -case_000173 -case_000174 -case_000175 -case_000176 -case_000177 -case_000178 -case_000179 -case_000180 -case_000181 -case_000182 -case_000183 -case_000184 -case_000185 -case_000186 -case_000187 -case_000188 -case_000189 -case_000190 -case_000191 -case_000192 -case_000193 -case_000194 -case_000195 -case_000196 -case_000197 -case_000198 -case_000199 -case_000200 -case_000201 -case_000202 -case_000203 -case_000204 -case_000205 -case_000206 -case_000207 -case_000208 -case_000209 -case_000210 -case_000211 -case_000212 -case_000213 -case_000214 -case_000215 -case_000216 -case_000217 -case_000218 -case_000219 -case_000220 -case_000221 -case_000222 -case_000223 -case_000224 -case_000225 -case_000226 -case_000227 -case_000228 -case_000229 -case_000230 -case_000231 -case_000232 -case_000233 -case_000234 -case_000235 -case_000236 -case_000237 -case_000238 -case_000239 -case_000240 -case_000241 -case_000242 -case_000243 -case_000244 -case_000245 -case_000246 -case_000247 -case_000248 -case_000249 -case_000250 -case_000251 -case_000252 -case_000253 -case_000254 -case_000255 -case_000256 -case_000257 -case_000258 -case_000259 -case_000260 -case_000261 -case_000262 -case_000263 -case_000264 -case_000265 -case_000266 -case_000267 -case_000268 -case_000269 -case_000270 -case_000271 -case_000272 -case_000273 -case_000274 -case_000275 -case_000276 -case_000277 -case_000278 -case_000279 -case_000280 -case_000281 -case_000282 -case_000283 -case_000284 -case_000285 -case_000286 -case_000287 -case_000288 -case_000289 -case_000290 -case_000291 -case_000292 -case_000293 -case_000294 -case_000295 -case_000296 -case_000297 -case_000298 -case_000299 -case_000300 -case_000301 -case_000302 -case_000303 -case_000304 -case_000305 -case_000306 -case_000307 -case_000308 -case_000309 -case_000310 -case_000311 -case_000312 -case_000313 -case_000314 -case_000315 -case_000316 -case_000317 -case_000318 -case_000319 -case_000320 -case_000321 -case_000322 -case_000323 -case_000324 -case_000325 -case_000326 -case_000327 -case_000328 -case_000329 -case_000330 -case_000331 -case_000332 -case_000333 -case_000334 -case_000335 -case_000336 -case_000337 -case_000338 -case_000339 -case_000340 -case_000341 -case_000342 -case_000343 -case_000344 -case_000345 -case_000346 -case_000347 -case_000348 -case_000349 -case_000350 -case_000351 -case_000352 -case_000353 -case_000354 -case_000355 -case_000356 -case_000357 -case_000358 -case_000359 -case_000360 -case_000361 -case_000362 -case_000363 -case_000364 -case_000365 -case_000366 -case_000367 -case_000368 -case_000369 -case_000370 -case_000371 -case_000372 -case_000373 -case_000374 -case_000375 -case_000376 -case_000377 -case_000378 -case_000379 -case_000380 -case_000381 -case_000382 -case_000383 -case_000384 -case_000385 -case_000386 -case_000387 -case_000388 -case_000389 -case_000390 -case_000391 -case_000392 -case_000393 -case_000394 -case_000395 -case_000396 -case_000397 -case_000398 -case_000399 -case_000400 -case_000401 -case_000402 -case_000403 -case_000404 -case_000405 -case_000406 -case_000407 -case_000408 -case_000409 -case_000410 -case_000411 -case_000412 -case_000413 -case_000414 -case_000415 -case_000416 -case_000417 -case_000418 -case_000419 -case_000420 -case_000421 -case_000422 -case_000423 -case_000424 -case_000425 -case_000426 -case_000427 -case_000428 -case_000429 -case_000430 -case_000431 -case_000432 -case_000433 -case_000434 -case_000435 -case_000436 -case_000437 -case_000438 -case_000439 -case_000440 -case_000441 -case_000442 -case_000443 -case_000444 -case_000445 -case_000446 -case_000447 -case_000448 -case_000449 -case_000450 -case_000451 -case_000452 -case_000453 -case_000454 -case_000455 -case_000456 -case_000457 -case_000458 -case_000459 -case_000460 -case_000461 -case_000462 -case_000463 -case_000464 -case_000465 -case_000466 -case_000467 -case_000468 -case_000469 -case_000470 -case_000471 -case_000472 -case_000473 -case_000474 -case_000475 -case_000476 -case_000477 -case_000478 -case_000479 -case_000480 -case_000481 -case_000482 -case_000483 -case_000484 -case_000485 -case_000486 -case_000487 -case_000488 -case_000489 -case_000490 -case_000491 -case_000492 -case_000493 -case_000494 -case_000495 -case_000496 -case_000497 -case_000498 -case_000499 -case_000500 -case_000501 -case_000502 -case_000503 -case_000504 -case_000505 -case_000506 -case_000507 -case_000508 -case_000509 -case_000510 -case_000511 -case_000512 -case_000513 -case_000514 -case_000515 -case_000516 -case_000517 -case_000518 -case_000519 -case_000520 -case_000521 -case_000522 -case_000523 -case_000524 -case_000525 -case_000526 -case_000527 -case_000528 -case_000529 -case_000530 -case_000531 -case_000532 -case_000533 -case_000534 -case_000535 -case_000536 -case_000537 -case_000538 -case_000539 -case_000540 -case_000541 -case_000542 -case_000543 -case_000544 -case_000545 -case_000546 -case_000547 -case_000548 -case_000549 -case_000550 -case_000551 -case_000552 -case_000553 -case_000554 -case_000555 -case_000556 -case_000557 -case_000558 -case_000559 -case_000560 -case_000561 -case_000562 -case_000563 -case_000564 -case_000565 -case_000566 -case_000567 -case_000569 -case_000573 -case_000574 -case_000575 -case_000576 -case_000577 -case_000579 -case_000580 -case_000582 -case_000583 -case_000584 -case_000586 -case_000587 -case_000588 -case_000589 -case_000591 -case_000592 -case_000593 -case_000594 -case_000595 -case_000596 -case_000597 -case_000598 -case_000599 -case_000600 -case_000601 -case_000602 -case_000603 -case_000604 -case_000605 -case_000606 -case_000607 -case_000608 -case_000609 -case_000610 -case_000611 -case_000612 -case_000613 -case_000614 -case_000615 -case_000616 -case_000617 -case_000618 -case_000619 -case_000620 -case_000621 -case_000622 -case_000623 -case_000624 -case_000625 -case_000626 -case_000627 -case_000628 -case_000629 -case_000630 -case_000631 -case_000632 -case_000633 -case_000634 -case_000635 -case_000636 -case_000637 -case_000638 -case_000639 -case_000640 -case_000641 -case_000642 -case_000643 -case_000644 -case_000645 -case_000646 -case_000647 -case_000648 -case_000649 -case_000650 -case_000651 -case_000652 -case_000653 -case_000654 -case_000655 -case_000656 -case_000657 -case_000658 -case_000659 -case_000660 -case_000661 -case_000662 -case_000663 -case_000664 -case_000665 -case_000666 -case_000667 -case_000668 -case_000669 -case_000670 -case_000671 -case_000672 -case_000673 -case_000674 -case_000675 -case_000676 -case_000677 -case_000678 -case_000679 -case_000680 -case_000681 -case_000683 -case_000684 -case_000685 -case_000687 -case_000688 -case_000689 -case_000690 -case_000692 -case_000693 -case_000694 -case_000695 -case_000696 -case_000697 -case_000698 -case_000699 -case_000701 -case_000702 -case_000703 -case_000704 -case_000706 -case_000707 -case_000708 -case_000709 -case_000710 -case_000711 -case_000713 -case_000714 -case_000715 -case_000716 -case_000717 -case_000718 -case_000719 -case_000720 -case_000721 -case_000722 -case_000723 -case_000724 -case_000725 -case_000727 -case_000728 -case_000732 -case_000733 -case_000734 -case_000735 -case_000736 -case_000737 -case_000738 -case_000739 -case_000740 -case_000741 -case_000742 -case_000743 -case_000744 -case_000745 -case_000746 -case_000747 -case_000748 -case_000749 -case_000750 -case_000751 -case_000752 -case_000753 -case_000754 -case_000755 -case_000756 -case_000757 -case_000758 -case_000759 -case_000760 -case_000761 -case_000762 -case_000763 -case_000764 -case_000765 -case_000766 -case_000767 -case_000768 -case_000769 -case_000770 -case_000771 -case_000772 -case_000773 -case_000774 -case_000775 -case_000776 -case_000777 -case_000778 -case_000779 -case_000780 -case_000781 -case_000782 -case_000783 -case_000784 -case_000785 -case_000786 -case_000787 -case_000788 -case_000789 -case_000790 -case_000791 -case_000792 -case_000793 -case_000794 -case_000795 -case_000796 -case_000797 -case_000798 -case_000799 -case_000800 -case_000801 -case_000802 -case_000803 -case_000804 -case_000805 -case_000806 -case_000807 -case_000808 -case_000809 -case_000810 -case_000811 -case_000812 -case_000813 -case_000814 -case_000815 -case_000816 -case_000817 -case_000818 -case_000819 -case_000820 -case_000821 -case_000822 -case_000823 -case_000824 -case_000825 -case_000826 -case_000827 -case_000828 -case_000829 -case_000830 -case_000831 -case_000832 -case_000833 -case_000834 -case_000835 -case_000836 -case_000837 -case_000838 -case_000839 -case_000840 -case_000841 -case_000842 -case_000843 -case_000844 -case_000845 -case_000846 -case_000847 -case_000848 -case_000849 -case_000850 -case_000851 -case_000852 -case_000853 -case_000854 -case_000855 -case_000856 -case_000857 -case_000858 -case_000859 -case_000860 -case_000861 -case_000862 -case_000863 -case_000864 -case_000865 -case_000866 -case_000867 -case_000868 -case_000869 -case_000870 -case_000871 -case_000872 -case_000873 -case_000874 -case_000875 -case_000876 -case_000877 -case_000878 -case_000879 -case_000880 -case_000881 -case_000882 -case_000883 -case_000884 -case_000885 -case_000886 -case_000887 -case_000888 -case_000889 -case_000890 -case_000891 -case_000892 -case_000893 -case_000894 -case_000895 -case_000896 -case_000897 -case_000898 -case_000899 -case_000900 -case_000901 -case_000902 -case_000903 -case_000904 -case_000905 -case_000906 -case_000907 -case_000908 -case_000909 -case_000910 -case_000911 -case_000912 -case_000913 -case_000914 -case_000915 -case_000916 -case_000917 -case_000918 -case_000919 -case_000920 -case_000921 -case_000922 -case_000923 -case_000924 -case_000925 -case_000926 -case_000927 -case_000928 -case_000929 -case_000930 -case_000931 -case_000932 -case_000933 -case_000934 -case_000935 -case_000936 -case_000937 -case_000938 -case_000939 -case_000940 -case_000941 -case_000942 -case_000943 -case_000944 -case_000945 -case_000946 -case_000947 -case_000948 -case_000949 -case_000950 -case_000951 -case_000952 -case_000953 -case_000954 -case_000955 -case_000956 -case_000957 -case_000958 -case_000959 -case_000961 -case_000963 -case_000964 -case_000965 -case_000966 -case_000967 -case_000968 -case_000969 -case_000971 -cfgs -copy.grid.toml -logs -manager.log -post_processing_grid -ref_config.toml -slurm_dispatch.sh diff --git a/input/demos/escape_comparison.toml b/input/demos/escape_comparison.toml deleted file mode 100644 index 66ef04d1a..000000000 --- a/input/demos/escape_comparison.toml +++ /dev/null @@ -1,318 +0,0 @@ -# PROTEUS configuration file (version 2.0) - -# Root tables should be physical, with the exception of "params" -# Software related options should go within the appropriate physical table - -# The general structure is: -# [root] metadata -# [params] parameters for code execution, output files, time-stepping, convergence -# [star] stellar parameters, model selection -# [orbit] planetary orbital parameters -# [struct] planetary structure (mass, radius) -# [atmos] atmosphere parameters, model selection -# [escape] escape parameters, model selection -# [interior] magma ocean model selection and parameters -# [outgas] outgassing parameters (fO2) and included volatiles -# [delivery] initial volatile inventory, and delivery model selection -# [observe] synthetic observations - -# ---------------------------------------------------- -# Metadata -version = "2.0" -author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" - -# ---------------------------------------------------- -# Parameters -[params] - # output files - [params.out] - path = "scratch/escape_on_off_janus_agni_1Msun/" # Path to output folder relative to PROTEUS output folder - logging = "INFO" - plot_mod = 5 # Plotting frequency, 0: wait until completion | n: every n iterations - plot_fmt = "pdf" # Plotting image file format, "png" or "pdf" recommended - write_mod = 1 # Write CSV frequency, 0: wait until completion | n: every n iterations - archive_mod = 'none' # Archive frequency, 0: wait until completion | n: every n iterations | none: do not archive - remove_sf = true - - # time-stepping - [params.dt] - minimum = 3e2 # yr, minimum time-step - minimum_rel = 1e-5 # relative minimum time-step [dimensionless] - maximum = 3e7 # yr, maximum time-step - initial = 1e4 # yr, inital step size - starspec = 100e6 # yr, interval to re-calculate the stellar spectrum - starinst = 100 # yr, interval to re-calculate the instellation - method = "adaptive" # proportional | adaptive | maximum - - [params.dt.proportional] - propconst = 52.0 # Proportionality constant - - [params.dt.adaptive] - atol = 0.02 # Step size atol - rtol = 0.07 # Step size rtol - - # Termination criteria - # Set enabled=true/false in each section to enable/disable that termination criterion - [params.stop] - - # Require criteria to be satisfied twice before model will exit? - strict = false - - # required number of iterations - [params.stop.iters] - enabled = true - minimum = 5 - maximum = 9000 - - # required time constraints - [params.stop.time] - enabled = true - minimum = 1.0e3 # yr, model will certainly run to t > minimum - maximum = 4.567e+9 # yr, model will terminate when t > maximum - - # solidification - [params.stop.solid] - enabled = true - phi_crit = 0.005 # non-dim., model will terminate when global melt fraction < phi_crit - - # radiative equilibrium - [params.stop.radeqm] - enabled = true # Tim said false in the meeting, true is a try for this grid - atol = 0.2 # absolute tolerance [W m-2] - rtol = 1e-3 # relative tolerance - - # atmospheric escape - [params.stop.escape] - enabled = true - p_stop = 1.0 # bar, model will terminate with p_surf < p_stop - - -# ---------------------------------------------------- -# Star -[star] - - # Physical parameters - mass = 1.0 # M_sun - age_ini = 0.100 # Gyr, model initialisation/start age - - module = "mors" - [star.mors] - rot_pcntle = 50.0 # rotation percentile - # rot_period = 80.6 # rotation period [days] - tracks = "spada" # evolution tracks: spada | baraffe - age_now = 4.567 # Gyr, current age of star used for scaling - spec = "stellar_spectra/Named/sun.txt" # stellar spectrum - - [star.dummy] - radius = 1.0 # R_sun - calculate_radius = false # Calculate star radius using scaling from Teff? - Teff = 5772.0 # K - -# Orbital system -[orbit] - instellation_method = 'sma' # whether to define orbit using semi major axis ('sma') or instellation flux ('inst') - instellationflux = 1.0 # instellation flux received from the planet in [Earth units] - semimajoraxis = 0.5 # initial semi-major axis of planet's orbit [AU] - eccentricity = 0.0 # initial eccentricity of planet's orbit [dimensionless] - zenith_angle = 48.19 # characteristic zenith angle [degrees] - s0_factor = 0.375 # instellation scale factor [dimensionless] - - evolve = false # whether to evolve the SMaxis and eccentricity - module = "none" # module used to calculate tidal heating - - [orbit.dummy] - H_tide = 1e-11 # Fixed tidal power density [W kg-1] - Phi_tide = "<0.3" # Tidal heating applied when inequality locally satisfied - Imk2 = 0.0 # Fixed imaginary part of k2 love number, cannot be positive - - [orbit.lovepy] - visc_thresh = 1e9 # Minimum viscosity required for heating [Pa s] - -# Planetary structure - physics table -[struct] - mass_tot = 1.0 # Total planet mass [M_earth] - # radius_int = 1.0 # Radius at mantle-atmosphere boundary [R_earth] - corefrac = 0.55 # non-dim., radius fraction - core_density = 10738.33 # Core density [kg m-3] - core_heatcap = 880.0 # Core specific heat capacity [J K-1 kg-1] - - module = "self" # self | zalmoxis - -# Atmosphere - physics table -[atmos_clim] - prevent_warming = false # do not allow the planet to heat up - surface_d = 0.01 # m, conductive skin thickness - surface_k = 2.0 # W m-1 K-1, conductive skin thermal conductivity - cloud_enabled = false # enable water cloud radiative effects - cloud_alpha = 0.0 # condensate retention fraction (1 -> fully retained) - surf_state = "skin" # surface scheme: "mixed_layer" | "fixed" | "skin" - surf_greyalbedo = 0.1 # surface grey albedo - albedo_pl = 0.0 # Bond albedo (scattering) - rayleigh = true # enable rayleigh scattering - tmp_minimum = 0.5 # temperature floor on solver - tmp_maximum = 5000.0 # temperature ceiling on solver - - module = "janus" # Which atmosphere module to use - - [atmos_clim.agni] - p_top = 1.0e-5 # bar, top of atmosphere grid pressure - spectral_group = "Honeyside" # which gas opacities to include - spectral_bands = "256" # how many spectral bands? - num_levels = 50 # Number of atmospheric grid levels - chemistry = "none" # "none" | "eq" - surf_material = "greybody" # surface material file for scattering - solve_energy = true # solve for energy-conserving atmosphere profile - solution_atol = 1e-3 # solver absolute tolerance - solution_rtol = 2e-2 # solver relative tolerance - overlap_method = "ee" # gas overlap method - condensation = true # volatile condensation - real_gas = true # use real-gas equations of state - - [atmos_clim.janus] - p_top = 1.0e-5 # bar, top of atmosphere grid pressure - p_obs = 1.0e-3 # bar, observed pressure level - spectral_group = "Honeyside" # which gas opacities to include - spectral_bands = "256" # how many spectral bands? - F_atm_bc = 0 # measure outgoing flux at: (0) TOA | (1) Surface - num_levels = 50 # Number of atmospheric grid levels - tropopause = "none" # none | skin | dynamic - overlap_method = "ee" # gas overlap method - - [atmos_clim.dummy] - gamma = 0.7 # atmosphere opacity between 0 and 1 - -# Volatile escape - physics table -[escape] - - module = "zephyrus" # Which escape module to use - reservoir = "bulk" # Escaping reservoir: "bulk", "outgas", "pxuv". - - - [escape.zephyrus] - Pxuv = 1e-2 # Pressure at which XUV radiation become opaque in the planetary atmosphere [bar] - efficiency = 0.5 # Escape efficiency factor - tidal = false # Tidal contribution enabled - - [escape.dummy] - rate = 2e-3 # Bulk unfractionated escape rate [kg s-1] - -# Interior - physics table -[interior] - grain_size = 0.1 # crystal settling grain size [m] - F_initial = 8.0E4 # Initial heat flux guess [W m-2] - radiogenic_heat = false # enable radiogenic heat production - tidal_heat = false # enable tidal heat production - rheo_phi_loc = 0.4 # Centre of rheological transition - rheo_phi_wid = 0.15 # Width of rheological transition - bulk_modulus = 260e9 # Bulk modulus [Pa] - - module = "spider" # Which interior module to use - - [interior.spider] - num_levels = 200 # Number of SPIDER grid levels - mixing_length = 2 # Mixing length parameterization - tolerance = 1.0e-10 # solver tolerance - tolerance_rel = 1.0e-8 # relative solver tolerance - solver_type = "bdf" # SUNDIALS solver method - tsurf_atol = 20.0 # tsurf_poststep_change - tsurf_rtol = 0.01 # tsurf_poststep_change_frac - ini_entropy = 3300.0 # Surface entropy conditions [J K-1 kg-1] - ini_dsdr = -4.698e-6 # Interior entropy gradient [J K-1 kg-1 m-1] - -# Outgassing - physics table -[outgas] - fO2_shift_IW = 4 # log10(ΔIW), atmosphere/interior boundary oxidation state - - module = "calliope" # Which outgassing module to use - - [outgas.calliope] - include_H2O = true # Include H2O compound - include_CO2 = true # Include CO2 compound - include_N2 = true # Include N2 compound - include_S2 = true # Include S2 compound - include_SO2 = true # Include SO2 compound - include_H2S = true # Include H2S compound - include_NH3 = true # Include NH3 compound - include_H2 = true # Include H2 compound - include_CH4 = true # Include CH4 compound - include_CO = true # Include CO compound - T_floor = 700.0 # Temperature floor applied to outgassing calculation [K]. - - [outgas.atmodeller] - some_parameter = "some_value" - -# Volatile delivery - physics table -[delivery] - - # Radionuclide parameters - radio_tref = 4.55 # Reference age for concentrations [Gyr] - radio_K = 310.0 # ppmw of potassium (all isotopes) - radio_U = 0.031 # ppmw of uranium (all isotopes) - radio_Th = 0.124 # ppmw of thorium (all isotopes) - - # Which initial inventory to use? - initial = 'elements' # "elements" | "volatiles" - - # No module for accretion as of yet - module = "none" - - # Set initial volatile inventory by planetary element abundances - [delivery.elements] - use_metallicity = false # whether or not to specify the elemental abundances in terms of solar metallicity - metallicity = 1000 # metallicity relative to solar metallicity - - H_oceans = 10.0 # Hydrogen inventory in units of equivalent Earth oceans - # H_ppmw = 0.0 # Hydrogen inventory in ppmw relative to mantle mass - - CH_ratio = 1.0 # C/H mass ratio in mantle/atmosphere system - # C_ppmw = 0.0 # Carbon inventory in ppmw relative to mantle mass - - # NH_ratio = 0.0 # N/H mass ratio in mantle/atmosphere system - N_ppmw = 2.0 # Nitrogen inventory in ppmw relative to mantle mass - - # SH_ratio = 0.0 # S/H mass ratio in mantle/atmosphere system - S_ppmw = 200.0 # Sulfur inventory in ppmw relative to mantle mass - - # Set initial volatile inventory by partial pressures in atmosphere - [delivery.volatiles] - H2O = 30.0 # partial pressure of H2O - CO2 = 0.0 # partial pressure of CO2 - N2 = 0.0 # etc - S2 = 0.0 - SO2 = 0.0 - H2S = 0.0 - NH3 = 0.0 - H2 = 0.0 - CH4 = 0.0 - CO = 0.0 - -# Atmospheric chemistry postprocessing -[atmos_chem] - - module = "vulcan" # Atmospheric chemistry module - when = "manually" # When to run chemistry (manually, offline, online) - - # Physics flags - photo_on = true # Enable photochemistry - Kzz_on = true # Enable eddy diffusion - Kzz_const = "none" # Constant eddy diffusion coefficient (none => use profile) - moldiff_on = true # Enable molecular diffusion in the atmosphere - updraft_const = 0.0 # Set constant updraft velocity - - # Vulcan-specific atmospheric chemistry parameters - [atmos_chem.vulcan] - clip_fl = 1e-20 # Floor on stellar spectrum [erg s-1 cm-2 nm-1] - clip_vmr = 1e-10 # Neglect species with vmr < clip_vmr - make_funs = true # Generate reaction network functions - ini_mix = "profile" # Initial mixing ratios (profile, outgas) - fix_surf = false # Fixed surface mixing ratios - network = "SNCHO" # Class of chemical network to use (CHO, NCHO, SNCHO) - save_frames = true # Plot frames during iterations - yconv_cri = 0.05 # Convergence criterion, value of mixing ratios - slope_cri = 0.0001 # Convergence criterion, rate of change of mixing ratios - -# Calculate simulated observations -[observe] - - # Module with which to calculate the synthetic observables - synthesis = "none" diff --git a/input/demos/escape_grid_0_194Msun.toml b/input/demos/escape_grid_0_194Msun.toml deleted file mode 100644 index f4bbb24f3..000000000 --- a/input/demos/escape_grid_0_194Msun.toml +++ /dev/null @@ -1,354 +0,0 @@ -# PROTEUS configuration file (version 2.0) - -# Root tables should be physical, with the exception of "params" -# Software related options should go within the appropriate physical table - -# The general structure is: -# [root] metadata -# [params] parameters for code execution, output files, time-stepping, convergence -# [star] stellar parameters, model selection -# [orbit] planetary orbital parameters -# [struct] planetary structure (mass, radius) -# [atmos] atmosphere parameters, model selection -# [escape] escape parameters, model selection -# [interior] magma ocean model selection and parameters -# [outgas] outgassing parameters (fO2) and included volatiles -# [delivery] initial volatile inventory, and delivery model selection -# [observe] synthetic observations - -# ---------------------------------------------------- -# Metadata -version = "2.0" -author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" - -# ---------------------------------------------------- -# Parameters -[params] - # output files - [params.out] - path = "scratch/escape_0_194Msun" - logging = "INFO" - plot_mod = 5 # Plotting frequency, 0: wait until completion | n: every n iterations - plot_fmt = "pdf" # Plotting image file format, "png" or "pdf" recommended - write_mod = 1 # Write CSV frequency, 0: wait until completion | n: every n iterations - archive_mod = 0 # Archive frequency, 0: wait until completion | n: every n iterations | none: do not archive - remove_sf = true - - # time-stepping - [params.dt] - minimum = 3e2 # yr, minimum time-step - minimum_rel = 1e-5 # relative minimum time-step [dimensionless] - maximum = 3e7 # yr, maximum time-step - initial = 1e4 # yr, inital step size - starspec = 1e8 # yr, interval to re-calculate the stellar spectrum - starinst = 100 # yr, interval to re-calculate the instellation - method = "adaptive" # proportional | adaptive | maximum - - [params.dt.proportional] - propconst = 52.0 # Proportionality constant - - [params.dt.adaptive] - atol = 0.02 # Step size atol - rtol = 0.07 # Step size rtol - - # Termination criteria - # Set enabled=true/false in each section to enable/disable that termination criterion - [params.stop] - - # Require criteria to be satisfied twice before model will exit? - strict = false - - # required number of iterations - [params.stop.iters] - enabled = true - minimum = 5 - maximum = 9000 - - # required time constraints - [params.stop.time] - enabled = true - minimum = 1.0e3 # yr, model will certainly run to t > minimum - maximum = 5e+9 # yr, model will terminate when t > maximum - - # solidification - [params.stop.solid] - enabled = true - phi_crit = 0.005 # non-dim., model will terminate when global melt fraction < phi_crit - - # radiative equilibrium - [params.stop.radeqm] - enabled = true # Tim said false in the meeting, true is a try for this grid - atol = 0.2 # absolute tolerance [W m-2] - rtol = 1e-3 # relative tolerance - - # atmospheric escape - [params.stop.escape] - enabled = true - p_stop = 1.0 # bar, model will terminate with p_surf < p_stop - - -# ---------------------------------------------------- -# Star -[star] - - # Physical parameters - mass = 0.194 # M_sun - age_ini = 0.100 # Gyr, model initialisation/start age - - module = "mors" - [star.mors] - rot_pcntle = 50.0 # rotation percentile - # rot_period = 122 # rotation period [days] - tracks = "spada" # evolution tracks: spada | baraffe - age_now = 5 # Gyr, current age of star used for scaling - spec = "stellar_spectra/Named/gj1132.txt" # stellar spectrum - - [star.dummy] - radius = 0.2211 # R_sun - calculate_radius = false # Calculate star radius using scaling from Teff? - Teff = 3229.0 # K - -# Orbital system -[orbit] - instellation_method = 'sma' # whether to define orbit using semi major axis ('sma') or instellation flux ('inst') - instellationflux = 1.0 # instellation flux received from the planet in [Earth units] - semimajoraxis = 0.0188 # initial semi-major axis of planet's orbit [AU] - eccentricity = 0.0 # initial eccentricity of planet's orbit [dimensionless] - zenith_angle = 48.19 # characteristic zenith angle [degrees] - s0_factor = 0.375 # instellation scale factor [dimensionless] - - evolve = false # whether to evolve the SMaxis and eccentricity - module = "none" # module used to calculate tidal heating - - [orbit.dummy] - H_tide = 1e-11 # Fixed tidal power density [W kg-1] - Phi_tide = "<0.3" # Tidal heating applied when inequality locally satisfied - Imk2 = 0.0 # Fixed imaginary part of k2 love number, cannot be positive - - [orbit.lovepy] - visc_thresh = 1e9 # Minimum viscosity required for heating [Pa s] - -# Planetary structure - physics table -[struct] - mass_tot = 1.0 # Total planet mass [M_earth] - # radius_int = 1.0 # Radius at mantle-atmosphere boundary [R_earth] - corefrac = 0.55 # non-dim., radius fraction - core_density = 10738.33 # Core density [kg m-3] - core_heatcap = 880.0 # Core specific heat capacity [J K-1 kg-1] - - module = "self" # self | zalmoxis - - [struct.zalmoxis] - coremassfrac = 0.325 # core mass fraction [non-dim.] - inner_mantle_mass_fraction = 0 # inner mantle mass fraction [non-dim.] - weight_iron_frac = 0.325 # iron fraction in the planet [non-dim.] - num_levels = 100 # number of Zalmoxis radius layers - EOSchoice = "Tabulated:iron/silicate" # iron/silicate for super-Earths, water for water planets with Earth-like rocky cores - max_iterations_outer = 20 # max. iterations for the outer loop - tolerance_outer = 1e-3 # tolerance for the outer loop - max_iterations_inner = 100 # max. iterations for the inner loop - tolerance_inner = 1e-4 # tolerance for the inner loop - relative_tolerance = 1e-5 # relative tolerance for solve_ivp - absolute_tolerance = 1e-6 # absolute tolerance for solve_ivp - target_surface_pressure = 101325 # target surface pressure - pressure_tolerance = 1e11 # tolerance surface pressure - max_iterations_pressure = 200 # max. iterations for the innermost loop - pressure_adjustment_factor = 1.1 # factor for adjusting the pressure in the innermost loop - -# Atmosphere - physics table -[atmos_clim] - prevent_warming = false # do not allow the planet to heat up - surface_d = 0.01 # m, conductive skin thickness - surface_k = 2.0 # W m-1 K-1, conductive skin thermal conductivity - cloud_enabled = false # enable water cloud radiative effects - cloud_alpha = 0.0 # condensate retention fraction (1 -> fully retained) - surf_state = "skin" # surface scheme: "mixed_layer" | "fixed" | "skin" - surf_greyalbedo = 0.1 # surface grey albedo - albedo_pl = 0.0 # Bond albedo (scattering) - rayleigh = true # enable rayleigh scattering - tmp_minimum = 0.5 # temperature floor on solver - tmp_maximum = 5000.0 # temperature ceiling on solver - - module = "agni" # Which atmosphere module to use - - [atmos_clim.agni] - p_top = 1.0e-5 # bar, top of atmosphere grid pressure - spectral_group = "Honeyside" # which gas opacities to include - spectral_bands = "256" # how many spectral bands? - num_levels = 50 # Number of atmospheric grid levels - chemistry = "none" # "none" | "eq" - surf_material = "greybody" # surface material file for scattering - solve_energy = true # solve for energy-conserving atmosphere profile - solution_atol = 1e-3 # solver absolute tolerance - solution_rtol = 2e-2 # solver relative tolerance - overlap_method = "ee" # gas overlap method - condensation = true # volatile condensation - real_gas = true # use real-gas equations of state - - [atmos_clim.janus] - p_top = 1.0e-5 # bar, top of atmosphere grid pressure - p_obs = 1.0e-3 # bar, observed pressure level - spectral_group = "Honeyside" # which gas opacities to include - spectral_bands = "256" # how many spectral bands? - F_atm_bc = 0 # measure outgoing flux at: (0) TOA | (1) Surface - num_levels = 50 # Number of atmospheric grid levels - tropopause = "none" # none | skin | dynamic - overlap_method = "ee" # gas overlap method - - [atmos_clim.dummy] - gamma = 0.7 # atmosphere opacity between 0 and 1 - -# Volatile escape - physics table -[escape] - - module = "zephyrus" # Which escape module to use - reservoir = "outgas" # Escaping reservoir: "bulk", "outgas", "pxuv". - - - [escape.zephyrus] - Pxuv = 1e-3 # Pressure at which XUV radiation become opaque in the planetary atmosphere [bar] - efficiency = 1.0 # Escape efficiency factor - tidal = false # Tidal contribution enabled - - [escape.dummy] - rate = 2e-3 # Bulk unfractionated escape rate [kg s-1] - -# Interior - physics table -[interior] - grain_size = 0.1 # crystal settling grain size [m] - F_initial = 8.0E4 # Initial heat flux guess [W m-2] - radiogenic_heat = false # enable radiogenic heat production - tidal_heat = false # enable tidal heat production - rheo_phi_loc = 0.4 # Centre of rheological transition - rheo_phi_wid = 0.15 # Width of rheological transition - bulk_modulus = 260e9 # Bulk modulus [Pa] - - module = "spider" # Which interior module to use - - [interior.spider] - num_levels = 200 # Number of SPIDER grid levels - mixing_length = 2 # Mixing length parameterization - tolerance = 1.0e-10 # solver tolerance - tolerance_rel = 1.0e-8 # relative solver tolerance - solver_type = "bdf" # SUNDIALS solver method - tsurf_atol = 20.0 # tsurf_poststep_change - tsurf_rtol = 0.01 # tsurf_poststep_change_frac - ini_entropy = 3300.0 # Surface entropy conditions [J K-1 kg-1] - ini_dsdr = -4.698e-6 # Interior entropy gradient [J K-1 kg-1 m-1] - - [interior.aragog] - logging = "ERROR" - num_levels = 200 # Number of Aragog grid levels - tolerance = 1.0e-10 # solver tolerance - ini_tmagma = 3500.0 # Initial magma surface temperature [K] - inner_boundary_condition = 1 # 1 = core cooling model, 2 = prescribed heat flux, 3 = prescribed temperature - inner_boundary_value = 4000 # core temperature [K], if inner_boundary_condition = 3. CMB heat flux [W/m^2], if if inner_boundary_condition = 2 - conduction = true # enable conductive heat transfer - convection = true # enable convective heat transfer - gravitational_separation = false # enable gravitational separation - mixing = false # enable mixing - dilatation = false # enable dilatation source term - mass_coordinates = false # enable mass coordinates - tsurf_poststep_change = 30 # threshold of maximum change on surface temperature - event_triggering = true # enable events triggering to avoid abrupt jumps in surface temperature - - [interior.dummy] - ini_tmagma = 3500.0 # Initial magma surface temperature [K] - -# Outgassing - physics table -[outgas] - fO2_shift_IW = 0 # log10(ΔIW), atmosphere/interior boundary oxidation state - - module = "calliope" # Which outgassing module to use - - [outgas.calliope] - include_H2O = true # Include H2O compound - include_CO2 = true # Include CO2 compound - include_N2 = true # Include N2 compound - include_S2 = true # Include S2 compound - include_SO2 = true # Include SO2 compound - include_H2S = true # Include H2S compound - include_NH3 = true # Include NH3 compound - include_H2 = true # Include H2 compound - include_CH4 = true # Include CH4 compound - include_CO = true # Include CO compound - T_floor = 700.0 # Temperature floor applied to outgassing calculation [K]. - - [outgas.atmodeller] - some_parameter = "some_value" - -# Volatile delivery - physics table -[delivery] - - # Radionuclide parameters - radio_tref = 4.55 # Reference age for concentrations [Gyr] - radio_K = 310.0 # ppmw of potassium (all isotopes) - radio_U = 0.031 # ppmw of uranium (all isotopes) - radio_Th = 0.124 # ppmw of thorium (all isotopes) - - # Which initial inventory to use? - initial = 'elements' # "elements" | "volatiles" - - # No module for accretion as of yet - module = "none" - - # Set initial volatile inventory by planetary element abundances - [delivery.elements] - use_metallicity = false # whether or not to specify the elemental abundances in terms of solar metallicity - metallicity = 1000 # metallicity relative to solar metallicity - - H_oceans = 1.0 # Hydrogen inventory in units of equivalent Earth oceans - # H_ppmw = 0.0 # Hydrogen inventory in ppmw relative to mantle mass - - CH_ratio = 1.0 # C/H mass ratio in mantle/atmosphere system - # C_ppmw = 0.0 # Carbon inventory in ppmw relative to mantle mass - - # NH_ratio = 0.0 # N/H mass ratio in mantle/atmosphere system - N_ppmw = 2.0 # Nitrogen inventory in ppmw relative to mantle mass - - # SH_ratio = 0.0 # S/H mass ratio in mantle/atmosphere system - S_ppmw = 200.0 # Sulfur inventory in ppmw relative to mantle mass - - # Set initial volatile inventory by partial pressures in atmosphere - [delivery.volatiles] - H2O = 30.0 # partial pressure of H2O - CO2 = 0.0 # partial pressure of CO2 - N2 = 0.0 # etc - S2 = 0.0 - SO2 = 0.0 - H2S = 0.0 - NH3 = 0.0 - H2 = 0.0 - CH4 = 0.0 - CO = 0.0 - -# Atmospheric chemistry postprocessing -[atmos_chem] - - module = "vulcan" # Atmospheric chemistry module - when = "manually" # When to run chemistry (manually, offline, online) - - # Physics flags - photo_on = true # Enable photochemistry - Kzz_on = true # Enable eddy diffusion - Kzz_const = "none" # Constant eddy diffusion coefficient (none => use profile) - moldiff_on = true # Enable molecular diffusion in the atmosphere - updraft_const = 0.0 # Set constant updraft velocity - - # Vulcan-specific atmospheric chemistry parameters - [atmos_chem.vulcan] - clip_fl = 1e-20 # Floor on stellar spectrum [erg s-1 cm-2 nm-1] - clip_vmr = 1e-10 # Neglect species with vmr < clip_vmr - make_funs = true # Generate reaction network functions - ini_mix = "profile" # Initial mixing ratios (profile, outgas) - fix_surf = false # Fixed surface mixing ratios - network = "SNCHO" # Class of chemical network to use (CHO, NCHO, SNCHO) - save_frames = true # Plot frames during iterations - yconv_cri = 0.05 # Convergence criterion, value of mixing ratios - slope_cri = 0.0001 # Convergence criterion, rate of change of mixing ratios - -# Calculate simulated observations -[observe] - - # Module with which to calculate the synthetic observables - synthesis = "none" diff --git a/input/demos/escape_grid_1Msun.toml b/input/demos/escape_grid_1Msun.toml deleted file mode 100644 index aeb4e25f5..000000000 --- a/input/demos/escape_grid_1Msun.toml +++ /dev/null @@ -1,354 +0,0 @@ -# PROTEUS configuration file (version 2.0) - -# Root tables should be physical, with the exception of "params" -# Software related options should go within the appropriate physical table - -# The general structure is: -# [root] metadata -# [params] parameters for code execution, output files, time-stepping, convergence -# [star] stellar parameters, model selection -# [orbit] planetary orbital parameters -# [struct] planetary structure (mass, radius) -# [atmos] atmosphere parameters, model selection -# [escape] escape parameters, model selection -# [interior] magma ocean model selection and parameters -# [outgas] outgassing parameters (fO2) and included volatiles -# [delivery] initial volatile inventory, and delivery model selection -# [observe] synthetic observations - -# ---------------------------------------------------- -# Metadata -version = "2.0" -author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" - -# ---------------------------------------------------- -# Parameters -[params] - # output files - [params.out] - path = "scratch/escape_grid_1Msun" - logging = "INFO" - plot_mod = 5 # Plotting frequency, 0: wait until completion | n: every n iterations - plot_fmt = "pdf" # Plotting image file format, "png" or "pdf" recommended - write_mod = 1 # Write CSV frequency, 0: wait until completion | n: every n iterations - archive_mod = 0 # Archive frequency, 0: wait until completion | n: every n iterations | none: do not archive - remove_sf = true - - # time-stepping - [params.dt] - minimum = 3e2 # yr, minimum time-step - minimum_rel = 1e-5 # relative minimum time-step [dimensionless] - maximum = 3e7 # yr, maximum time-step - initial = 1e4 # yr, inital step size - starspec = 100e6 # yr, interval to re-calculate the stellar spectrum - starinst = 100 # yr, interval to re-calculate the instellation - method = "adaptive" # proportional | adaptive | maximum - - [params.dt.proportional] - propconst = 52.0 # Proportionality constant - - [params.dt.adaptive] - atol = 0.02 # Step size atol - rtol = 0.07 # Step size rtol - - # Termination criteria - # Set enabled=true/false in each section to enable/disable that termination criterion - [params.stop] - - # Require criteria to be satisfied twice before model will exit? - strict = false - - # required number of iterations - [params.stop.iters] - enabled = true - minimum = 5 - maximum = 9000 - - # required time constraints - [params.stop.time] - enabled = true - minimum = 1.0e3 # yr, model will certainly run to t > minimum - maximum = 4.567e+9 # yr, model will terminate when t > maximum - - # solidification - [params.stop.solid] - enabled = true - phi_crit = 0.005 # non-dim., model will terminate when global melt fraction < phi_crit - - # radiative equilibrium - [params.stop.radeqm] - enabled = true # Tim said false in the meeting, true is a try for this grid - atol = 0.2 # absolute tolerance [W m-2] - rtol = 1e-3 # relative tolerance - - # atmospheric escape - [params.stop.escape] - enabled = true - p_stop = 1.0 # bar, model will terminate with p_surf < p_stop - - -# ---------------------------------------------------- -# Star -[star] - - # Physical parameters - mass = 1.0 # M_sun - age_ini = 0.100 # Gyr, model initialisation/start age - - module = "mors" - [star.mors] - rot_pcntle = 50.0 # rotation percentile - # rot_period = 80.6 # rotation period [days] - tracks = "spada" # evolution tracks: spada | baraffe - age_now = 4.567 # Gyr, current age of star used for scaling - spec = "stellar_spectra/Named/sun.txt" # stellar spectrum - - [star.dummy] - radius = 1.0 # R_sun - calculate_radius = false # Calculate star radius using scaling from Teff? - Teff = 5772.0 # K - -# Orbital system -[orbit] - instellation_method = 'sma' # whether to define orbit using semi major axis ('sma') or instellation flux ('inst') - instellationflux = 1.0 # instellation flux received from the planet in [Earth units] - semimajoraxis = 1.0 # initial semi-major axis of planet's orbit [AU] - eccentricity = 0.0 # initial eccentricity of planet's orbit [dimensionless] - zenith_angle = 48.19 # characteristic zenith angle [degrees] - s0_factor = 0.375 # instellation scale factor [dimensionless] - - evolve = false # whether to evolve the SMaxis and eccentricity - module = "none" # module used to calculate tidal heating - - [orbit.dummy] - H_tide = 1e-11 # Fixed tidal power density [W kg-1] - Phi_tide = "<0.3" # Tidal heating applied when inequality locally satisfied - Imk2 = 0.0 # Fixed imaginary part of k2 love number, cannot be positive - - [orbit.lovepy] - visc_thresh = 1e9 # Minimum viscosity required for heating [Pa s] - -# Planetary structure - physics table -[struct] - mass_tot = 1.0 # Total planet mass [M_earth] - # radius_int = 1.0 # Radius at mantle-atmosphere boundary [R_earth] - corefrac = 0.55 # non-dim., radius fraction - core_density = 10738.33 # Core density [kg m-3] - core_heatcap = 880.0 # Core specific heat capacity [J K-1 kg-1] - - module = "self" # self | zalmoxis - - [struct.zalmoxis] - coremassfrac = 0.325 # core mass fraction [non-dim.] - inner_mantle_mass_fraction = 0 # inner mantle mass fraction [non-dim.] - weight_iron_frac = 0.325 # iron fraction in the planet [non-dim.] - num_levels = 100 # number of Zalmoxis radius layers - EOSchoice = "Tabulated:iron/silicate" # iron/silicate for super-Earths, water for water planets with Earth-like rocky cores - max_iterations_outer = 20 # max. iterations for the outer loop - tolerance_outer = 1e-3 # tolerance for the outer loop - max_iterations_inner = 100 # max. iterations for the inner loop - tolerance_inner = 1e-4 # tolerance for the inner loop - relative_tolerance = 1e-5 # relative tolerance for solve_ivp - absolute_tolerance = 1e-6 # absolute tolerance for solve_ivp - target_surface_pressure = 101325 # target surface pressure - pressure_tolerance = 1e11 # tolerance surface pressure - max_iterations_pressure = 200 # max. iterations for the innermost loop - pressure_adjustment_factor = 1.1 # factor for adjusting the pressure in the innermost loop - -# Atmosphere - physics table -[atmos_clim] - prevent_warming = false # do not allow the planet to heat up - surface_d = 0.01 # m, conductive skin thickness - surface_k = 2.0 # W m-1 K-1, conductive skin thermal conductivity - cloud_enabled = false # enable water cloud radiative effects - cloud_alpha = 0.0 # condensate retention fraction (1 -> fully retained) - surf_state = "skin" # surface scheme: "mixed_layer" | "fixed" | "skin" - surf_greyalbedo = 0.1 # surface grey albedo - albedo_pl = 0.0 # Bond albedo (scattering) - rayleigh = true # enable rayleigh scattering - tmp_minimum = 0.5 # temperature floor on solver - tmp_maximum = 5000.0 # temperature ceiling on solver - - module = "agni" # Which atmosphere module to use - - [atmos_clim.agni] - p_top = 1.0e-5 # bar, top of atmosphere grid pressure - spectral_group = "Honeyside" # which gas opacities to include - spectral_bands = "256" # how many spectral bands? - num_levels = 50 # Number of atmospheric grid levels - chemistry = "none" # "none" | "eq" - surf_material = "greybody" # surface material file for scattering - solve_energy = true # solve for energy-conserving atmosphere profile - solution_atol = 1e-3 # solver absolute tolerance - solution_rtol = 2e-2 # solver relative tolerance - overlap_method = "ee" # gas overlap method - condensation = true # volatile condensation - real_gas = true # use real-gas equations of state - - [atmos_clim.janus] - p_top = 1.0e-5 # bar, top of atmosphere grid pressure - p_obs = 1.0e-3 # bar, observed pressure level - spectral_group = "Honeyside" # which gas opacities to include - spectral_bands = "256" # how many spectral bands? - F_atm_bc = 0 # measure outgoing flux at: (0) TOA | (1) Surface - num_levels = 50 # Number of atmospheric grid levels - tropopause = "none" # none | skin | dynamic - overlap_method = "ee" # gas overlap method - - [atmos_clim.dummy] - gamma = 0.7 # atmosphere opacity between 0 and 1 - -# Volatile escape - physics table -[escape] - - module = "zephyrus" # Which escape module to use - reservoir = "outgas" # Escaping reservoir: "bulk", "outgas", "pxuv". - - - [escape.zephyrus] - Pxuv = 1e-3 # Pressure at which XUV radiation become opaque in the planetary atmosphere [bar] - efficiency = 1.0 # Escape efficiency factor - tidal = false # Tidal contribution enabled - - [escape.dummy] - rate = 2e-3 # Bulk unfractionated escape rate [kg s-1] - -# Interior - physics table -[interior] - grain_size = 0.1 # crystal settling grain size [m] - F_initial = 8.0E4 # Initial heat flux guess [W m-2] - radiogenic_heat = false # enable radiogenic heat production - tidal_heat = false # enable tidal heat production - rheo_phi_loc = 0.4 # Centre of rheological transition - rheo_phi_wid = 0.15 # Width of rheological transition - bulk_modulus = 260e9 # Bulk modulus [Pa] - - module = "spider" # Which interior module to use - - [interior.spider] - num_levels = 200 # Number of SPIDER grid levels - mixing_length = 2 # Mixing length parameterization - tolerance = 1.0e-10 # solver tolerance - tolerance_rel = 1.0e-8 # relative solver tolerance - solver_type = "bdf" # SUNDIALS solver method - tsurf_atol = 20.0 # tsurf_poststep_change - tsurf_rtol = 0.01 # tsurf_poststep_change_frac - ini_entropy = 3300.0 # Surface entropy conditions [J K-1 kg-1] - ini_dsdr = -4.698e-6 # Interior entropy gradient [J K-1 kg-1 m-1] - - [interior.aragog] - logging = "ERROR" - num_levels = 200 # Number of Aragog grid levels - tolerance = 1.0e-10 # solver tolerance - ini_tmagma = 3500.0 # Initial magma surface temperature [K] - inner_boundary_condition = 1 # 1 = core cooling model, 2 = prescribed heat flux, 3 = prescribed temperature - inner_boundary_value = 4000 # core temperature [K], if inner_boundary_condition = 3. CMB heat flux [W/m^2], if if inner_boundary_condition = 2 - conduction = true # enable conductive heat transfer - convection = true # enable convective heat transfer - gravitational_separation = false # enable gravitational separation - mixing = false # enable mixing - dilatation = false # enable dilatation source term - mass_coordinates = false # enable mass coordinates - tsurf_poststep_change = 30 # threshold of maximum change on surface temperature - event_triggering = true # enable events triggering to avoid abrupt jumps in surface temperature - - [interior.dummy] - ini_tmagma = 3500.0 # Initial magma surface temperature [K] - -# Outgassing - physics table -[outgas] - fO2_shift_IW = 0 # log10(ΔIW), atmosphere/interior boundary oxidation state - - module = "calliope" # Which outgassing module to use - - [outgas.calliope] - include_H2O = true # Include H2O compound - include_CO2 = true # Include CO2 compound - include_N2 = true # Include N2 compound - include_S2 = true # Include S2 compound - include_SO2 = true # Include SO2 compound - include_H2S = true # Include H2S compound - include_NH3 = true # Include NH3 compound - include_H2 = true # Include H2 compound - include_CH4 = true # Include CH4 compound - include_CO = true # Include CO compound - T_floor = 700.0 # Temperature floor applied to outgassing calculation [K]. - - [outgas.atmodeller] - some_parameter = "some_value" - -# Volatile delivery - physics table -[delivery] - - # Radionuclide parameters - radio_tref = 4.55 # Reference age for concentrations [Gyr] - radio_K = 310.0 # ppmw of potassium (all isotopes) - radio_U = 0.031 # ppmw of uranium (all isotopes) - radio_Th = 0.124 # ppmw of thorium (all isotopes) - - # Which initial inventory to use? - initial = 'elements' # "elements" | "volatiles" - - # No module for accretion as of yet - module = "none" - - # Set initial volatile inventory by planetary element abundances - [delivery.elements] - use_metallicity = false # whether or not to specify the elemental abundances in terms of solar metallicity - metallicity = 1000 # metallicity relative to solar metallicity - - H_oceans = 1.0 # Hydrogen inventory in units of equivalent Earth oceans - # H_ppmw = 0.0 # Hydrogen inventory in ppmw relative to mantle mass - - CH_ratio = 1.0 # C/H mass ratio in mantle/atmosphere system - # C_ppmw = 0.0 # Carbon inventory in ppmw relative to mantle mass - - # NH_ratio = 0.0 # N/H mass ratio in mantle/atmosphere system - N_ppmw = 2.0 # Nitrogen inventory in ppmw relative to mantle mass - - # SH_ratio = 0.0 # S/H mass ratio in mantle/atmosphere system - S_ppmw = 200.0 # Sulfur inventory in ppmw relative to mantle mass - - # Set initial volatile inventory by partial pressures in atmosphere - [delivery.volatiles] - H2O = 30.0 # partial pressure of H2O - CO2 = 0.0 # partial pressure of CO2 - N2 = 0.0 # etc - S2 = 0.0 - SO2 = 0.0 - H2S = 0.0 - NH3 = 0.0 - H2 = 0.0 - CH4 = 0.0 - CO = 0.0 - -# Atmospheric chemistry postprocessing -[atmos_chem] - - module = "vulcan" # Atmospheric chemistry module - when = "manually" # When to run chemistry (manually, offline, online) - - # Physics flags - photo_on = true # Enable photochemistry - Kzz_on = true # Enable eddy diffusion - Kzz_const = "none" # Constant eddy diffusion coefficient (none => use profile) - moldiff_on = true # Enable molecular diffusion in the atmosphere - updraft_const = 0.0 # Set constant updraft velocity - - # Vulcan-specific atmospheric chemistry parameters - [atmos_chem.vulcan] - clip_fl = 1e-20 # Floor on stellar spectrum [erg s-1 cm-2 nm-1] - clip_vmr = 1e-10 # Neglect species with vmr < clip_vmr - make_funs = true # Generate reaction network functions - ini_mix = "profile" # Initial mixing ratios (profile, outgas) - fix_surf = false # Fixed surface mixing ratios - network = "SNCHO" # Class of chemical network to use (CHO, NCHO, SNCHO) - save_frames = true # Plot frames during iterations - yconv_cri = 0.05 # Convergence criterion, value of mixing ratios - slope_cri = 0.0001 # Convergence criterion, rate of change of mixing ratios - -# Calculate simulated observations -[observe] - - # Module with which to calculate the synthetic observables - synthesis = "none" diff --git a/input/ensembles/escape_comparison_on_off.toml b/input/ensembles/escape_comparison_on_off.toml deleted file mode 100644 index fdfd38647..000000000 --- a/input/ensembles/escape_comparison_on_off.toml +++ /dev/null @@ -1,34 +0,0 @@ -# Config file for running a grid of forward models - -# Path to output folder where grid will be saved (relative to PROTEUS output folder) -output = "scratch/escape_on_off_janus_agni_1Msun_solid/" - -# Make `output` a symbolic link to this absolute location. To disable: set to empty string. -symlink = "" - -# Path to base (reference) config file relative to PROTEUS root folder -ref_config = "input/demos/escape_comparison.toml" - -# Use SLURM? -use_slurm = true - -# Execution limits -max_jobs = 5 # maximum number of concurrent tasks (e.g. 500 on Habrok) -max_days = 5 # maximum number of days to run (e.g. 1) -max_mem = 3 # maximum memory per CPU in GB (e.g. 3) - -# Now define grid axes... -# Each axis must be a new section (table) in this file. -# Each table corresponds to the name of the parameter to be varied. -# Each table name must be written in double quotes. -# See examples below - -# Escape module set directly -["escape.module"] - method = "direct" - values = ['none', 'zephyrus'] - -# Atmosphere module set directly -["atmos_clim.module"] - method = "direct" - values = ['janus', 'agni'] diff --git a/input/ensembles/escape_grid_0_194Msun.toml b/input/ensembles/escape_grid_0_194Msun.toml deleted file mode 100644 index c99f52433..000000000 --- a/input/ensembles/escape_grid_0_194Msun.toml +++ /dev/null @@ -1,59 +0,0 @@ -# Config file for running a grid of forward models - -# Path to output folder where grid will be saved (relative to PROTEUS output folder) -output = "scratch/escape_grid_0_194Msun/" - -# Make `output` a symbolic link to this absolute location. To disable: set to empty string. -symlink = "" - -# Path to base (reference) config file relative to PROTEUS root folder -ref_config = "input/demos/escape_grid_0_194Msun.toml" - -# Use SLURM? -use_slurm = true - -# Execution limits -max_jobs = 500 # maximum number of concurrent tasks (e.g. 500 on Habrok) -max_days = 5 # maximum number of days to run (e.g. 1) -max_mem = 3 # maximum memory per CPU in GB (e.g. 3) - -# Now define grid axes... -# Each axis must be a new section (table) in this file. -# Each table corresponds to the name of the parameter to be varied. -# Each table name must be written in double quotes. -# See examples below - -# Atmosphere module set directly -["atmos_clim.module"] - method = "direct" - values = ['agni', 'janus'] - -# Semi-major axis set by direct -["orbit.semimajoraxis"] - method = "direct" - values = [0.006618, 0.033091, 0.066182] - -# Escape efficiency set by direct -["escape.zephyrus.efficiency"] - method = "direct" - values = [0.1, 0.5, 1.0] - -# XUV Pressure set by direct -["escape.zephyrus.Pxuv"] - method = "direct" - values = [1e-5, 1e1] - -# Oxygen fugacity set by direct -["outgas.fO2_shift_IW"] - method = "direct" - values = [-4, 0, 4] - -# Planet bulk C/H ratio set by direct -["delivery.elements.CH_ratio"] - method = "direct" - values = [0.1, 1.0, 2.0] - -# Hydrogen inventory set by direct -["delivery.elements.H_oceans"] - method = "direct" - values = [1.0, 5.0, 10.0] diff --git a/input/ensembles/escape_grid_1Msun.toml b/input/ensembles/escape_grid_1Msun.toml deleted file mode 100644 index fd8bd207f..000000000 --- a/input/ensembles/escape_grid_1Msun.toml +++ /dev/null @@ -1,129 +0,0 @@ -# Config file for running a grid of forward models - -# Path to output folder where grid will be saved (relative to PROTEUS output folder) -output = "scratch/escape_grid_1Msun/" - -# Make `output` a symbolic link to this absolute location. To disable: set to empty string. -symlink = "" - -# Path to base (reference) config file relative to PROTEUS root folder -ref_config = "input/demos/escape_grid_1Msun.toml" - -# Use SLURM? -use_slurm = true - -# Execution limits -max_jobs = 1000 # maximum number of concurrent tasks (e.g. 500 on Habrok) -max_days = 5 # maximum number of days to run (e.g. 1) -max_mem = 3 # maximum memory per CPU in GB (e.g. 3) - -# Now define grid axes... -# Each axis must be a new section (table) in this file. -# Each table corresponds to the name of the parameter to be varied. -# Each table name must be written in double quotes. -# See examples below - -# Atmosphere module set directly -["atmos_clim.module"] - method = "direct" - values = ['agni', 'janus'] - -# Semi-major axis set by direct -["orbit.semimajoraxis"] - method = "direct" - values = [0.1, 0.5, 1.0] - -# Escape efficiency set by direct -["escape.zephyrus.efficiency"] - method = "direct" - values = [0.1, 0.5, 1.0] - -# XUV Pressure set by direct -["escape.zephyrus.Pxuv"] - method = "direct" - values = [1e-5, 1e1] - -# Oxygen fugacity set by direct -["outgas.fO2_shift_IW"] - method = "direct" - values = [-4, 0, 4] - -# Planet bulk C/H ratio set by direct -["delivery.elements.CH_ratio"] - method = "direct" - values = [0.1, 1.0, 2.0] - -# Hydrogen inventory set by direct -["delivery.elements.H_oceans"] - method = "direct" - values = [1.0, 5.0, 10.0] - -# Plotting configuration for ECDF -[plot.input_parameters] - - [plot.parameters.atmos_clim.module] - label = "Atmosphere module" - colormap = "viridis" - log_scale = false - - [plot.parameters.orbit.semimajoraxis] - label = "a [AU]" - colormap = "viridis" - log_scale = false - - [plot.parameters.escape.zephyrus.efficiency] - label = "\\rm \\epsilon" - colormap = "viridis" - log_scale = false - - [plot.parameters.escape.zephyrus.Pxuv] - label = "P_{\\rm XUV} [bar]" - colormap = "viridis" - log_scale = true - - [plot.parameters.outgas.fO2_shift_IW] - label = "\\rm \\log_{10} fO_2 [IW]" - colormap = "viridis" - log_scale = false - - [plot.parameters.delivery.elements.CH_ratio] - label = "C/H ratio" - colormap = "viridis" - log_scale = false - - [plot.parameters.delivery.elements.H_oceans] - label = "[H] [oceans]" - colormap = "viridis" - log_scale = false - -[plot.outputs] - - [plot.outputs.solidification_time] - label = "Solidification [yr]" - log_scale = true - scale = 1.0 - - [plot.outputs.Phi_global] - label = "Melt fraction [%]" - log_scale = false - scale = 100.0 - - [plot.outputs.T_surf] - label = "T_{\\rm surf} [10^3 K]" - log_scale = false - scale = 0.001 - - [plot.outputs.P_surf] - label = "P_{\\rm surf} [bar]" - log_scale = true - scale = 1.0 - - [plot.outputs.atm_kg_per_mol] - label = "MMW [g/mol]" - log_scale = false - scale = 1000.0 - - [plot.outputs.esc_rate_total] - label = "Escape rate [kg/s]" - log_scale = true - scale = 1.0 diff --git a/input/ensembles/toi561b_grid_a_0_0106AU.toml b/input/ensembles/toi561b_grid_a_0_0106AU.toml deleted file mode 100644 index 4ba595545..000000000 --- a/input/ensembles/toi561b_grid_a_0_0106AU.toml +++ /dev/null @@ -1,75 +0,0 @@ -# Config file for running a grid of forward models - -# Path to output folder where grid will be saved (relative to PROTEUS output folder) -output = "scratch/toi561b_grid_a_0_0106AU" - -# Make `output` a symbolic link to this absolute location. To disable: set to empty string. -symlink = "" - -# Path to base (reference) config file relative to PROTEUS root folder -ref_config = "input/planets/toi561b.toml" - -# Use SLURM? -use_slurm = true - -# Execution limits -max_jobs = 500 # maximum number of concurrent tasks (e.g. 500 on Habrok) -max_days = 5 # maximum number of days to run (e.g. 1) -max_mem = 3 # maximum memory per CPU in GB (e.g. 3) - -# Now define grid axes... -# Each axis must be a new section (table) in this file. -# Each table corresponds to the name of the parameter to be varied. -# Each table name must be written in double quotes. -# See examples below - -# Semi-major axis set directly -# We investigate 2 scenarios: period = 0.44 days (a=0.0106 AU, correct one) and test at 2 days (a=0.0287 AU) to reduce escape ? -["orbit.semimajoraxis"] - method = "direct" - values = [0.0106] - -# Core-Radius Fraction (CRF) set directly -["struct.corefrac"] - method = "direct" - values = [0.40, 0.55] - -# Bond albedo set directly -["atmos_clim.albedo_pl"] - method = "direct" - values = [0.0, 0.6] - -# Escape efficiency factor set directly -["escape.zephyrus.efficiency"] - method = "direct" - values = [0.01, 0.1] - -# Oxygen fugacity set directly -["outgas.fO2_shift_IW"] - method = "direct" - values = [0, 4] - -# # Hydrogen inventory set by arange -# ["delivery.elements.H_oceans"] -# method = "logspace" -# start = 1.0 -# stop = 1000.0 -# count = 4 - -# Hydrogen inventory set directly -["delivery.elements.H_oceans"] - method = "direct" - values = [1.0, 10.0, 100.0, 1000.0] - - -# Planet bulk C/H ratio set directly -["delivery.elements.CH_ratio"] - method = "direct" - values = [0.1, 2.0] # 1.0 out - -# Planet bulk S/H ratio set directly -["delivery.elements.SH_ratio"] - method = "direct" - values = [0.216, 21.6] # 2.16 out - -# Stellar rotation period to test ? diff --git a/input/ensembles/toi561b_grid_a_0_0287AU.toml b/input/ensembles/toi561b_grid_a_0_0287AU.toml deleted file mode 100644 index 0326e12a7..000000000 --- a/input/ensembles/toi561b_grid_a_0_0287AU.toml +++ /dev/null @@ -1,69 +0,0 @@ -# Config file for running a grid of forward models - -# Path to output folder where grid will be saved (relative to PROTEUS output folder) -output = "scratch/toi561b_grid_a_0_0287AU" - -# Make `output` a symbolic link to this absolute location. To disable: set to empty string. -symlink = "" - -# Path to base (reference) config file relative to PROTEUS root folder -ref_config = "input/planets/toi561b.toml" - -# Use SLURM? -use_slurm = true - -# Execution limits -max_jobs = 500 # maximum number of concurrent tasks (e.g. 500 on Habrok) -max_days = 5 # maximum number of days to run (e.g. 1) -max_mem = 3 # maximum memory per CPU in GB (e.g. 3) - -# Now define grid axes... -# Each axis must be a new section (table) in this file. -# Each table corresponds to the name of the parameter to be varied. -# Each table name must be written in double quotes. -# See examples below - -# Semi-major axis set directly -# We investigate 2 scenarios: period = 0.44 days (a=0.0106 AU, correct one) and test at 2 days (a=0.0287 AU) to reduce escape ? -["orbit.semimajoraxis"] - method = "direct" - values = [0.0287] - -# Core-Radius Fraction (CRF) set directly -["struct.corefrac"] - method = "direct" - values = [0.40, 0.55] - -# Bond albedo set directly -["atmos_clim.albedo_pl"] - method = "direct" - values = [0.0, 0.6] - -# Escape efficiency factor set directly -["escape.zephyrus.efficiency"] - method = "direct" - values = [0.01, 0.1] - -# Oxygen fugacity set directly -["outgas.fO2_shift_IW"] - method = "direct" - values = [0, 4] - -# Hydrogen inventory set by arange -["delivery.elements.H_oceans"] - method = "logspace" - start = 1.0 - stop = 1000.0 - count = 4 - -# Planet bulk C/H ratio set directly -["delivery.elements.CH_ratio"] - method = "direct" - values = [0.1, 1.0, 2.0] - -# Planet bulk S/H ratio set directly -["delivery.elements.SH_ratio"] - method = "direct" - values = [0.216, 2.16, 21.6] - -# Stellar rotation period to test ? From 0c6144863e82032d33028f74512ada54c17761bf Mon Sep 17 00:00:00 2001 From: Emma Postolec Date: Mon, 22 Dec 2025 11:06:14 +0100 Subject: [PATCH 069/105] Update src/proteus/grid/post_processing.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- src/proteus/grid/post_processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index 8626a928d..ef9ea7848 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -70,7 +70,7 @@ def load_grid_cases(grid_dir: Path): init_params = {} if init_file.exists(): try: - init_params = toml.load(open(init_file)) + init_params = toml.load(init_file) except Exception as e: print(f"Error reading init file in {case.name}: {e}") From a75fbc562b12c95006ef21c72d2ae81a696fca71 Mon Sep 17 00:00:00 2001 From: Emma Postolec Date: Mon, 22 Dec 2025 11:07:37 +0100 Subject: [PATCH 070/105] Update src/proteus/grid/post_processing.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- src/proteus/grid/post_processing.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index ef9ea7848..e989ec6a3 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -196,7 +196,8 @@ def load_phi_crit(grid_dir: str | Path): if not ref_file.exists(): raise FileNotFoundError(f"ref_config.toml not found in {grid_dir}") - ref = toml.load(open(ref_file)) + with ref_file.open("rb") as f: + ref = toml.load(f) # Find phi_crit value try: From fa577db13263725fdd261da439b09438552e14c1 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Fri, 6 Feb 2026 18:02:32 +0100 Subject: [PATCH 071/105] update toi561b time step --- input/planets/toi561b.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/input/planets/toi561b.toml b/input/planets/toi561b.toml index 0330a42d9..d920d56bc 100644 --- a/input/planets/toi561b.toml +++ b/input/planets/toi561b.toml @@ -13,12 +13,12 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" plot_mod = 50 # Plotting frequency, 0: wait until completion | n: every n iterations plot_fmt = "pdf" # Plotting image file format, "png" or "pdf" recommended write_mod = 50 # Write CSV frequency, 0: wait until completion | n: every n iterations - archive_mod = 60 # Archive frequency, 0: wait until completion | n: every n iterations | none: do not archive + archive_mod = 10 # Archive frequency, 0: wait until completion | n: every n iterations | none: do not archive remove_sf = true # Remove SOCRATES spectral file when simulation ends. # time-stepping [params.dt] - minimum = 3e2 # yr, minimum time-step + minimum = 5e4 # yr, minimum time-step minimum_rel = 1e-5 # relative minimum time-step [dimensionless] maximum = 5e8 # yr, maximum time-step # if higher like 1e9, will produce too few snapshots initial = 1e3 # yr, inital step size From 663658c885962e50878465567b7e76d3767d5369 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Thu, 19 Mar 2026 10:08:52 +0100 Subject: [PATCH 072/105] update --- input/planets/toi561b.toml | 110 ++++++++++++++++++++++------ src/proteus/grid/post_processing.py | 13 ++-- 2 files changed, 95 insertions(+), 28 deletions(-) diff --git a/input/planets/toi561b.toml b/input/planets/toi561b.toml index d920d56bc..0bfc2f86a 100644 --- a/input/planets/toi561b.toml +++ b/input/planets/toi561b.toml @@ -1,9 +1,32 @@ -# PROTEUS configuration file (version 2.0) +# PROTEUS configuration file -version = "2.0" -author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" +# This is a comprehensive outline of all configuration options. It includes variables +# which have default values, in order to showcase the range of potential options available. +# Variable defaults are defined in `src/proteus/config/*.py` + +# Root tables should be physical, with the exception of "params" +# Software related options should go within the appropriate physical table +# For configuration see https://fwl-proteus.readthedocs.io/en/latest/config.html # ---------------------------------------------------- + +# The general structure is: +# [params] parameters for code execution, output files, time-stepping, convergence +# [star] stellar parameters, model selection +# [orbit] planetary orbital parameters +# [struct] planetary structure (mass, radius) +# [atmos_clim] atmosphere climate parameters, model selection +# [atmos_chem] atmosphere chemistry parameters, model selection +# [escape] escape parameters, model selection +# [interior] magma ocean model selection and parameters +# [outgas] outgassing parameters (fO2) and included volatiles +# [delivery] initial volatile inventory, and delivery model selection +# [observe] synthetic observations + +# ---------------------------------------------------- + +version = "2.0" + # Parameters [params] # output files @@ -13,7 +36,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" plot_mod = 50 # Plotting frequency, 0: wait until completion | n: every n iterations plot_fmt = "pdf" # Plotting image file format, "png" or "pdf" recommended write_mod = 50 # Write CSV frequency, 0: wait until completion | n: every n iterations - archive_mod = 10 # Archive frequency, 0: wait until completion | n: every n iterations | none: do not archive + archive_mod = 100 # Archive frequency, 0: wait until completion | n: every n iterations | none: do not archive remove_sf = true # Remove SOCRATES spectral file when simulation ends. # time-stepping @@ -31,7 +54,7 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [params.dt.adaptive] atol = 0.02 # Step size atol - rtol = 0.07 # Step size rtol + rtol = 0.10 # Step size rtol # Termination criteria # Set enabled=true/false in each section to enable/disable that termination criterion @@ -70,8 +93,10 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" # disintegration [params.stop.disint] enabled = false + roche_enabled = false offset_roche = 0 # correction to calculated Roche limit [m] + spin_enabled = false offset_spin = 0 # correction to calculated Breakup period [s] @@ -91,7 +116,18 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" rot_period = 'none' # rotation period [days] tracks = "spada" # evolution tracks: spada | baraffe age_now = 11 # [Gyr] from Lacedelli et al., 2022 - spec = "stellar_spectra/Named/toi561.txt" # stellar spectrum + spectrum_source = "solar" # Spectrum source: 'solar' for solar spectra; 'muscles' for MUSCLES spectra; 'phoenix' for synthetic PHOENIX spectrum; see https://proteus-framework.org/proteus/data.html#stellar-spectra + star_name = "sun" # star name, relevant for when spectrum_source = 'solar' (use e.g. 'sun' or 'Sun0.6Ga') or when spectrum_source = 'muscles' (use e.g. 'trappist-1' or 'gj1214'). Not relevent when spectrum_source = 'phoenix'. + star_path = "../FWL_DATA/stellar_spectra/Named/toi561.txt" # optional override star path to custom stellar spectrum, e.g. "$FWL_DATA/stellar_spectra/solar/sun.txt" + + # PHOENIX parameters, only relevant if spectrum_source = "phoenix". Defaults to solar (0.0). + phoenix_FeH = 0.0 # metallicity [Fe/H] + phoenix_alpha = 0.0 # alpha enhancement [α/M] + + # if None, calculated by mors + phoenix_radius = "none" # Stellar radius [R_sun] used for PHOENIX spectrum scaling + phoenix_log_g = "none" # Stellar surface gravity [dex] + phoenix_Teff = "none" # Stellar effective temperature [K] [star.dummy] radius = 0.843 # from Lacedelli et al., 2022 [R_sun] @@ -134,22 +170,30 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" module = "self" # self | zalmoxis [struct.zalmoxis] - verbose = false # verbose printing? - coremassfrac = 0.325 # core mass fraction [non-dim.] - inner_mantle_mass_fraction = 0 # inner mantle mass fraction [non-dim.] - weight_iron_frac = 0.325 # iron fraction in the planet [non-dim.] - num_levels = 100 # number of Zalmoxis radius layers - EOSchoice = "Tabulated:iron/silicate" # iron/silicate for super-Earths, water for water planets with Earth-like rocky cores - max_iterations_outer = 20 # max. iterations for the outer loop - tolerance_outer = 1e-3 # tolerance for the outer loop - max_iterations_inner = 100 # max. iterations for the inner loop - tolerance_inner = 1e-4 # tolerance for the inner loop - relative_tolerance = 1e-5 # relative tolerance for solve_ivp - absolute_tolerance = 1e-6 # absolute tolerance for solve_ivp - target_surface_pressure = 101325 # target surface pressure - pressure_tolerance = 1e11 # tolerance surface pressure - max_iterations_pressure = 200 # max. iterations for the innermost loop - pressure_adjustment_factor = 1.1 # factor for adjusting the pressure in the innermost loop + EOSchoice = "Tabulated:iron/Tdep_silicate" # EOS choices: "Tabulated:iron/silicate", "Tabulated:iron/Tdep_silicate", "Tabulated:water" + coremassfrac = 0.325 # core mass fraction [non-dim.] + mantle_mass_fraction = 0 # mantle mass fraction [non-dim.] + weight_iron_frac = 0.325 # iron fraction in the planet [non-dim.] + temperature_mode = "linear" # Input temperature profile choices: "isothermal", "linear", "prescribed" + surface_temperature = 3500 # Surface temperature [K], required for temperature_mode="isothermal" or "linear" + center_temperature = 6000 # Center temperature [K], required for temperature_mode="linear" + temperature_profile_file = "zalmoxis_ini_input_temp.txt" # filename with a prescribed temperature profile, required for temperature_mode="prescribed" + num_levels = 150 # number of Zalmoxis radius layers + max_iterations_outer = 100 # max. iterations for the outer loop + tolerance_outer = 3e-3 # tolerance for the outer loop + max_iterations_inner = 100 # max. iterations for the inner loop + tolerance_inner = 1e-4 # tolerance for the inner loop + relative_tolerance = 1e-5 # relative tolerance for solve_ivp + absolute_tolerance = 1e-6 # absolute tolerance for solve_ivp + maximum_step = 250000 # maximum integration step size [m] + adaptive_radial_fraction = 0.98 # radial fraction for transition from adaptive integration to fixed-step integration when using "Tabulated:iron/Tdep_silicate" EOS + max_center_pressure_guess = 0.99e12 # maximum central pressure guess based on "Tabulated:iron/Tdep_silicate" EOS limit [Pa] + target_surface_pressure = 101325 # target surface pressure [Pa] + pressure_tolerance = 1e9 # tolerance surface pressure [Pa] + max_iterations_pressure = 200 # max. iterations for the innermost loop + pressure_adjustment_factor = 1.1 # factor for adjusting the pressure in the innermost loop + verbose = false # detailed convergence info and warnings printing? + iteration_profiles_enabled = false # pressure and density profiles for each iteration logging? # Atmosphere - physics table [atmos_clim] @@ -232,6 +276,26 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" [escape.dummy] rate = 0.0 # Bulk unfractionated escape rate [kg s-1] + [escape.boreas] + fractionate = true # Include fractionation in outflow? + efficiency = 0.1 # Escape efficiency factor + sigma_H = 1.89e-18 # H absorption cross-section in XUV [cm2] + sigma_O = 2.00e-18 # O absorption ^ + sigma_C = 2.50e-18 # C absorption ^ + sigma_N = 3.00e-18 # N absorption ^ + sigma_S = 6.00e-18 # S absorption ^ + kappa_H2 = 0.01 # H2 opacity in IR, grey [cm2 g-1] + kappa_H2O = 1.0 # H2O opacity ^ + kappa_O2 = 1.0 # O2 opacity ^ + kappa_CO2 = 1.0 # CO2 opacity ^ + kappa_CO = 1.0 # CO opacity ^ + kappa_CH4 = 1.0 # CH4 opacity ^ + kappa_N2 = 1.0 # N2 opacity ^ + kappa_NH3 = 1.0 # NH3 opacity ^ + kappa_H2S = 1.0 # H2S opacity ^ + kappa_SO2 = 1.0 # SO2 opacity ^ + kappa_S2 = 1.0 # S2 opacity ^ + # Interior - physics table [interior] grain_size = 0.1 # crystal settling grain size [m] @@ -240,10 +304,10 @@ author = "Harrison Nicholls, Tim Lichtenberg, Emma Postolec" tidal_heat = false # enable tidal heat production rheo_phi_loc = 0.4 # Centre of rheological transition rheo_phi_wid = 0.15 # Width of rheological transition - bulk_modulus = 260e9 # Bulk modulus [Pa] melting_dir = "Monteux-600" # Name of folder constaining melting curves lookup_dir = "1TPa-dK09-elec-free/MgSiO3_Wolf_Bower_2018_1TPa" # Name of folder with EOS tables, etc. + module = "spider" # Which interior module to use [interior.spider] diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index e989ec6a3..7f7a80e4f 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -196,10 +196,9 @@ def load_phi_crit(grid_dir: str | Path): if not ref_file.exists(): raise FileNotFoundError(f"ref_config.toml not found in {grid_dir}") - with ref_file.open("rb") as f: + with ref_file.open("r", encoding="utf-8") as f: ref = toml.load(f) - # Find phi_crit value try: phi_crit = ref["params"]["stop"]["solid"]["phi_crit"] except KeyError: @@ -672,6 +671,7 @@ def ecdf_grid_plot(grouped_data: dict, param_settings: dict, output_settings: di tested_params = {} for key, value in raw_params.items(): if isinstance(value, dict) and "values" in value: + print(key, value["values"]) # Only store the 'values' list tested_params[key] = value["values"] grid_params = tested_params @@ -743,11 +743,12 @@ def color_func(v): sns.ecdfplot( data=raw, log_scale=out_settings.get("log_scale", False), + stat="percent", color=color_func(val), linewidth=4, linestyle='-', ax=ax - ) + ) # Configure x-axis labels, ticks, grids if i == n_rows - 1: @@ -760,13 +761,15 @@ def color_func(v): # Configure y-axis (shared label added later) if j == 0: ax.set_ylabel("") - ticks = [0.0, 0.5, 1.0] + ticks = [0.0, 50, 100] ax.set_yticks(ticks) ax.tick_params(axis='y', labelsize=22) else: ax.set_ylabel("") ax.set_yticks(ticks) ax.tick_params(axis='y', labelleft=False) + ax.tick_params(axis='x', which='minor', direction='in', top=True, bottom=True, length=2) + ax.tick_params(axis='x', which='major', direction='inout', top=True, bottom=True, length=6) ax.grid(alpha=0.4) @@ -785,7 +788,7 @@ def color_func(v): ax.legend(handles=handles, fontsize=24,bbox_to_anchor=(1.01, 1), loc='upper left') # Add a single, shared y-axis label - fig.text(0.07, 0.5, 'Empirical cumulative fraction of grid simulations', va='center', rotation='vertical', fontsize=40) + fig.text(0.07, 0.5, 'Empirical cumulative distribution of grid simulations [%]', va='center', rotation='vertical', fontsize=40) # Save figure output_dir = grid_dir / "post_processing" / "grid_plots" From 99a3c83626988d5975ad39242e5e4498a2e85514 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 7 Apr 2026 11:29:46 +0200 Subject: [PATCH 073/105] update the generate_csv funciton to avoid redundancy as suggested by copilot --- src/proteus/grid/post_processing.py | 175 +++++++--------------------- 1 file changed, 44 insertions(+), 131 deletions(-) diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index 7f7a80e4f..d401ef911 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -257,150 +257,69 @@ def extract_solidification_time(cases_data: list, grid_dir: str | Path): return solidification_times -def generate_summary_csv(cases_data: list, case_params: dict, grid_dir: str | Path, grid_name: str): +def generate_summary_csv( + cases_data: list, + case_params: dict, + grid_dir: str | Path, + grid_name: str, +): """ - Generate CSV file summarizing all simulation cases in the grid, including: - - Case status - - Values of tested grid parameters - - All extracted values from runtime_helpfile.csv (at last timestep) - - Solidification time - - Parameters - ---------- - cases_data : list - List of dictionaries containing simulation data. - case_params : dict - Dictionary mapping case index -> {parameter_name: value} - grid_dir : str or Path - Path to the grid directory containing ref_config.toml. - grid_name : str - Name of the grid. + Generate CSV files summarizing simulation cases: + - All cases + - Completed cases only + - Running + Error cases only """ - # Compute solidification times - solidification_times = extract_solidification_time(cases_data, grid_dir) - - # Extract data for each case - summary_rows = [] - for case_index, case in enumerate(cases_data): - row = {} - - # Case status - row["case_number"] = case_index - row["status"] = case["status"] - - # Values of tested grid parameters for each case - params = case_params.get(case_index, {}) - for k, v in params.items(): - row[k] = v - - # Output values (at last timestep) - df = case["output_values"] - if df is not None: - for col in df.columns: - row[col] = df[col].iloc[-1] - - # Solidification time - row["solidification_time"] = solidification_times[case_index] - - summary_rows.append(row) - - # Create DataFrame and save it in the grid directory in post_processing/extracted_data/ - summary_df = pd.DataFrame(summary_rows) - output_dir = grid_dir / "post_processing" / "extracted_data" - output_dir.mkdir(parents=True, exist_ok=True) - output_file = output_dir / f"{grid_name}_final_extracted_data_all.csv" - summary_df.to_csv(output_file, sep="\t", index=False) + def include_case(status: str, mode: str) -> bool: + status = status.lower() + if mode == "all": + return True + elif mode == "completed": + return status.startswith("completed") + elif mode == "running_error": + return status.startswith("running") or status.startswith("error") + else: + raise ValueError(f"Unknown mode: {mode}") -def generate_completed_summary_csv(cases_data: list, case_params: dict, grid_dir: str | Path, grid_name: str): - """ - Same function as generate_summary_csv, but only include fully 'Completed' cases. - """ - # Compute solidification times + # Compute solidification times once solidification_times = extract_solidification_time(cases_data, grid_dir) - # Extract data for each fully completed case - summary_rows = [] - for case_index, case in enumerate(cases_data): - status = case.get("status", "").lower() - if not status.startswith("completed"): - continue # skip non-completed cases - - row = {} - - # Case status - row["case_number"] = case_index - row["status"] = case["status"] - - # Values of tested grid parameters for each case - params = case_params.get(case_index, {}) - for k, v in params.items(): - row[k] = v - - # Output values (at last timestep) - df = case["output_values"] - if df is not None: - for col in df.columns: - row[col] = df[col].iloc[-1] - - # Solidification time - row["solidification_time"] = solidification_times[case_index] - - summary_rows.append(row) - - # Create DataFrame and save - summary_df = pd.DataFrame(summary_rows) output_dir = grid_dir / "post_processing" / "extracted_data" output_dir.mkdir(parents=True, exist_ok=True) - # Updated CSV name to indicate only completed cases - output_file = output_dir / f"{grid_name}_final_extracted_data_completed.csv" - summary_df.to_csv(output_file, sep="\t", index=False) + modes = ["all", "completed", "running_error"] -def generate_running_error_summary_csv(cases_data: list, case_params: dict, grid_dir: str | Path, grid_name: str): - """ - Same function as generate_summary_csv, but only include 'Running' and 'Error' cases. - """ - # Compute solidification times - solidification_times = extract_solidification_time(cases_data, grid_dir) + for mode in modes: + summary_rows = [] - # Extract data for each running or error case - summary_rows = [] - for case_index, case in enumerate(cases_data): - status = case.get("status", "").lower() - if not (status.startswith("running") or status.startswith("error")): - continue # skip cases that are neither running nor error + for case_index, case in enumerate(cases_data): + status = case.get("status", "") - row = {} + if not include_case(status, mode): + continue - # Case status - row["case_number"] = case_index - row["status"] = case["status"] + row = { + "case_number": case_index, + "status": status, + } - # Values of tested grid parameters for each case - params = case_params.get(case_index, {}) - for k, v in params.items(): - row[k] = v + # Parameters + row.update(case_params.get(case_index, {})) - # Output values (at last timestep) - df = case["output_values"] - if df is not None: - for col in df.columns: - row[col] = df[col].iloc[-1] + # Output values + df = case.get("output_values") + if df is not None and not df.empty: + row.update(df.iloc[-1].to_dict()) - # Solidification time - row["solidification_time"] = solidification_times[case_index] + # Solidification time + row["solidification_time"] = solidification_times[case_index] - summary_rows.append(row) + summary_rows.append(row) - # Create DataFrame and save - summary_df = pd.DataFrame(summary_rows) - output_dir = grid_dir / "post_processing" / "extracted_data" - output_dir.mkdir(parents=True, exist_ok=True) + summary_df = pd.DataFrame(summary_rows) - # Updated CSV name to indicate only running/error cases - output_file = output_dir / f"{grid_name}_final_extracted_data_running_error.csv" - summary_df.to_csv(output_file, sep="\t", index=False) + output_file = output_dir / f"{grid_name}_final_extracted_data_{mode}.csv" + summary_df.to_csv(output_file, sep="\t", index=False) # --------------------------------------------------------- # Plotting functions @@ -831,12 +750,6 @@ def main(grid_analyse_toml_file: str | Path = None): generate_summary_csv( data, input_param_grid_per_case, grid_path, grid_name ) - generate_completed_summary_csv( - data, input_param_grid_per_case, grid_path, grid_name - ) - generate_running_error_summary_csv( - data, input_param_grid_per_case, grid_path, grid_name - ) else: # Check that CSVs exist for f in [summary_csv_all, summary_csv_completed, summary_csv_running_error]: From 08bac68df553f5abc0f8851fcc5fc60833b3f78e Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 7 Apr 2026 11:32:28 +0200 Subject: [PATCH 074/105] update doc of group_output_by_parameter funvtion as suggested by copilot --- src/proteus/grid/post_processing.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index d401ef911..81ef308ee 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -513,18 +513,18 @@ def load_ecdf_plot_settings(cfg): def group_output_by_parameter(df, grid_parameters, outputs): """ - Groups output values (like P_surf) by a specific grid parameter. + Groups output values (like P_surf) by one or more grid parameters. Parameters ---------- df : pd.DataFrame - DataFrame containing simulation results including value of the grid parameter and the corresponding extracted output. + DataFrame containing simulation results including values of the grid parameters and the corresponding extracted outputs. - grid_parameters : str - Column name of the grid parameter to group by (like 'escape.zephyrus.efficiency'). + grid_parameters : list of str + Column names of the grid parameters to group by (for example, ['escape.zephyrus.efficiency']). - outputs : str - Column name of the output to extract (like 'P_surf'). + outputs : list of str + Column names of the outputs to extract (for example, ['P_surf']). Returns ------- From bf1b8fcbf7a9e0650f65acd610fe1a3355f13586 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 7 Apr 2026 11:39:11 +0200 Subject: [PATCH 075/105] update doc of ecdf_grid_plot function --- src/proteus/grid/post_processing.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index 81ef308ee..193bbf252 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -563,9 +563,6 @@ def ecdf_grid_plot(grouped_data: dict, param_settings: dict, output_settings: di Parameters ---------- - grid_params : dict - Dictionary of tested grid parameters and their grid values (directly from copy.grid.toml) - grouped_data : dict Dictionary where each key is of the form '[output]_per_[parameter]', and each value is a dict {param_value: [output_values]}. @@ -581,8 +578,11 @@ def ecdf_grid_plot(grouped_data: dict, param_settings: dict, output_settings: di - "log_scale": bool, whether to plot the x-axis on log scale - "scale": float, a factor to multiply raw values by before plotting - plots_path : str - Path to the grid where to create "single_plots_ecdf" and save all .png plots + grid_dir : str or Path + Path to the grid directory (used for saving the plot and loading tested parameters). + + grid_name : str + Name of the grid (used for saving the plot). """ # Load tested grid parameters From 91ce43aa8a92a224cb0e9d2773e15f9e35b4ccc1 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 7 Apr 2026 11:46:15 +0200 Subject: [PATCH 076/105] context manager in ecdf_grid_plot to close toml file for copilot issue --- src/proteus/grid/post_processing.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index 193bbf252..d5a45b90e 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -586,7 +586,9 @@ def ecdf_grid_plot(grouped_data: dict, param_settings: dict, output_settings: di """ # Load tested grid parameters - raw_params = toml.load(grid_dir / "copy.grid.toml") + #raw_params = toml.load(grid_dir / "copy.grid.toml") + with open(grid_dir / "copy.grid.toml", "r") as f: + raw_params = toml.load(f) tested_params = {} for key, value in raw_params.items(): if isinstance(value, dict) and "values" in value: From 03ea9d39b2687fc5c0998add60fcd0535d97acc5 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 7 Apr 2026 11:49:27 +0200 Subject: [PATCH 077/105] update doc --- docs/How-to/usage.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/How-to/usage.md b/docs/How-to/usage.md index 11606d9db..47163c712 100644 --- a/docs/How-to/usage.md +++ b/docs/How-to/usage.md @@ -182,12 +182,12 @@ Before running the command, update the `example.grid_analyse.toml` file to match proteus grid-analyse input/ensembles/example.grid_analyse.toml ``` -Executing the command creates a `post_processing` folder inside your grid directory containing all post-processing outputs: +Executing the command creates a `post_processing` folder inside your grid directory containing all post-processing outputs: -- Extracted data: CSV files with simulation status, input parameters, and output values at the last time step are stored in: - `post_processing/extracted_data/` -- Plots: Status summaries and ECDF grid plots are saved in: - `post_processing/plots/` +- Extracted data: CSV files with simulation status, input parameters, and output values at the last time step are stored in: + `post_processing/extracted_data/` +- Plots: Status summaries and ECDF grid plots are saved in: + `post_processing/grid_plots/` ## Retrieval scheme (Bayesian optimisation) From df59de7989fc83a31810c04f16191c421bb359e2 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 7 Apr 2026 11:51:33 +0200 Subject: [PATCH 078/105] clean extra docstring in post_processing.py --- src/proteus/grid/post_processing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index d5a45b90e..645a512e7 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -176,7 +176,7 @@ def get_tested_grid_parameters(cases_data: list, grid_dir: str | Path): return case_params, tested_params def load_phi_crit(grid_dir: str | Path): - """" + """ Load the critical melt fraction (phi_crit) from the reference configuration file of the grid. Parameters @@ -207,7 +207,7 @@ def load_phi_crit(grid_dir: str | Path): return phi_crit def extract_solidification_time(cases_data: list, grid_dir: str | Path): - """" + """ Extract solidification time for each simulation of the grid for the condition Phi_global < phi_crit at last time step. From d97a5bf14c6c50362564f1598127a45e71384952 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 7 Apr 2026 11:52:56 +0200 Subject: [PATCH 079/105] update docstring latex function --- src/proteus/grid/post_processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index 645a512e7..8b5bf9516 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -551,7 +551,7 @@ def group_output_by_parameter(df, grid_parameters, outputs): def latex(label: str) -> str: """ - Wraps a label in dollar signs for LaTeX formatting if it contains 2 backslashes. + Wraps a label in dollar signs for LaTeX formatting if it contains a backslash. """ return f"${label}$" if "\\" in label else label From aefbc036228cfa3a4e471e809a3640291ba93129 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 7 Apr 2026 11:58:25 +0200 Subject: [PATCH 080/105] remove grid_analyse_toml_file = None by default --- src/proteus/grid/post_processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index 8b5bf9516..c2f78a9d5 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -721,7 +721,7 @@ def color_func(v): # --------------------------------------------------------- # main # --------------------------------------------------------- -def main(grid_analyse_toml_file: str | Path = None): +def main(grid_analyse_toml_file: str | Path): # Load configuration from grid_analyse.toml with open(grid_analyse_toml_file, "rb") as f: From 5877426af14bf3fe3796c6960bd7781152a8fdd7 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 7 Apr 2026 11:59:27 +0200 Subject: [PATCH 081/105] typo in example.grid_analyse.toml --- input/ensembles/example.grid_analyse.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/input/ensembles/example.grid_analyse.toml b/input/ensembles/example.grid_analyse.toml index 0f1f64069..f8d2ee8ac 100644 --- a/input/ensembles/example.grid_analyse.toml +++ b/input/ensembles/example.grid_analyse.toml @@ -57,7 +57,7 @@ plot_ecdf = true # Generate ECDF grid plot for input parameters and o [output_variables.T_surf] label = "T_{\\rm surf}\\,[10^{3}\\,\\mathrm{K}]" log_scale = false - scale = 0.001 # convert K to 10^3 K for beyter readability on plot + scale = 0.001 # convert K to 10^3 K for better readability on plot [output_variables.P_surf] label = "P_{\\rm surf}\\,[\\mathrm{bar}]" From 9e879a9b5d6f5a588eda8680787cf6af0caf1c6f Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 7 Apr 2026 12:14:22 +0200 Subject: [PATCH 082/105] update legend fO2 plot and update cli command to be consitent with PROTEUS command --- input/ensembles/example.grid_analyse.toml | 2 +- src/proteus/cli.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/input/ensembles/example.grid_analyse.toml b/input/ensembles/example.grid_analyse.toml index f8d2ee8ac..3e36edcf8 100644 --- a/input/ensembles/example.grid_analyse.toml +++ b/input/ensembles/example.grid_analyse.toml @@ -30,7 +30,7 @@ plot_ecdf = true # Generate ECDF grid plot for input parameters and o log_scale = true [input_parameters.outgas.fO2_shift_IW] - label = "\\log_{10} fO_2\\,[\\Delta IW]" + label = "\\Delta IW" log_scale = false [input_parameters.delivery.elements.CH_ratio] diff --git a/src/proteus/cli.py b/src/proteus/cli.py index 35bfa7f14..f45af9cd8 100644 --- a/src/proteus/cli.py +++ b/src/proteus/cli.py @@ -550,15 +550,15 @@ def observe(config_path: Path): # ---------------- @click.command() -@click.argument("path_grid_analyse", type=str, required=True) +@config_option -def grid_analyse(path_grid_analyse: str): +def grid_analyse(config_path: Path): """Generate grid analysis plots and CSV summary files from a grid - path_grid_analyse : Path to the toml file containing grid analysis configuration + config_path : Path to the toml file containing grid analysis configuration """ from proteus.grid.post_processing import main - main(path_grid_analyse) + main(config_path) cli.add_command(grid_analyse) From 21b311378cadc7447c4996fa7f805b44d9c40c13 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 7 Apr 2026 12:15:18 +0200 Subject: [PATCH 083/105] update doc with latest cli changes --- docs/How-to/usage.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/How-to/usage.md b/docs/How-to/usage.md index 47163c712..a4c8c5a3a 100644 --- a/docs/How-to/usage.md +++ b/docs/How-to/usage.md @@ -179,7 +179,7 @@ Results from a PROTEUS grid can be post-processed using the `proteus grid-analys Before running the command, update the `example.grid_analyse.toml` file to match your grid. Specify the input parameters used in your simulations and select the output variables you want to visualize. To post-process a grid and generate ECDF plots for further analysis, run the following command: ``` -proteus grid-analyse input/ensembles/example.grid_analyse.toml +proteus grid-analyse --config input/ensembles/example.grid_analyse.toml ``` Executing the command creates a `post_processing` folder inside your grid directory containing all post-processing outputs: From 25b30bef1e4ea904f3f78330abb204ce724dfbaf Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 7 Apr 2026 13:30:17 +0200 Subject: [PATCH 084/105] allow the user to choose the format of plot png or pdf --- src/proteus/grid/post_processing.py | 36 ++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index c2f78a9d5..0f9944a11 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -325,7 +325,7 @@ def include_case(status: str, mode: str) -> bool: # Plotting functions # --------------------------------------------------------- -def plot_grid_status(df: pd.DataFrame, grid_dir: str | Path, grid_name: str): +def plot_grid_status(df: pd.DataFrame, cfg: dict, grid_dir: str | Path, grid_name: str): """ Plot histogram summary of number of simulation statuses in the grid using the generated CSV file for all cases. @@ -335,12 +335,17 @@ def plot_grid_status(df: pd.DataFrame, grid_dir: str | Path, grid_name: str): df : pandas.DataFrame DataFrame loaded from grid_name_final_extracted_data_all.csv. + cfg : dict + Configuration dictionary containing plotting options. + grid_dir : Path Path to the grid directory. grid_name : str Name of the grid. """ + # Extract plot_format from cfg + plot_format = cfg.get("plot_format") if "status" not in df.columns: raise ValueError("CSV must contain a 'status' column") @@ -412,7 +417,7 @@ def plot_grid_status(df: pd.DataFrame, grid_dir: str | Path, grid_name: str): # Save output_dir = Path(grid_dir) / "post_processing" / "grid_plots" output_dir.mkdir(parents=True, exist_ok=True) - output_file = output_dir / f"summary_grid_statuses_{grid_name}.png" + output_file = output_dir / f"summary_grid_statuses_{grid_name}.{plot_format}" plt.savefig(output_file, dpi=300, bbox_inches='tight') plt.close() @@ -484,6 +489,10 @@ def load_ecdf_plot_settings(cfg): Whether to use a logarithmic x-axis. - "scale" : float Factor applied to raw output values before plotting. + + plot_format : str + Format for saving plots ("png" or "pdf"). + """ # Load input parameter settings @@ -509,7 +518,9 @@ def load_ecdf_plot_settings(cfg): "scale": val.get("scale", 1.0), } - return param_settings, output_settings + plot_format = cfg.get("plot_format") + + return param_settings, output_settings, plot_format def group_output_by_parameter(df, grid_parameters, outputs): """ @@ -555,10 +566,10 @@ def latex(label: str) -> str: """ return f"${label}$" if "\\" in label else label -def ecdf_grid_plot(grouped_data: dict, param_settings: dict, output_settings: dict, grid_dir: str | Path, grid_name: str): +def ecdf_grid_plot(grouped_data: dict, param_settings: dict, output_settings: dict, plot_format: str, grid_dir: str | Path, grid_name: str): """ Creates ECDF grid plots where each row corresponds to one input parameter - and each column corresponds to one output. Saves the resulting figure as a PNG. + and each column corresponds to one output. Saves the resulting figure as a {plot_format}. Parameters ---------- @@ -578,6 +589,9 @@ def ecdf_grid_plot(grouped_data: dict, param_settings: dict, output_settings: di - "log_scale": bool, whether to plot the x-axis on log scale - "scale": float, a factor to multiply raw values by before plotting + plot_format : str + Format for saving plots ("png" or "pdf"). + grid_dir : str or Path Path to the grid directory (used for saving the plot and loading tested parameters). @@ -586,13 +600,12 @@ def ecdf_grid_plot(grouped_data: dict, param_settings: dict, output_settings: di """ # Load tested grid parameters - #raw_params = toml.load(grid_dir / "copy.grid.toml") with open(grid_dir / "copy.grid.toml", "r") as f: raw_params = toml.load(f) tested_params = {} for key, value in raw_params.items(): if isinstance(value, dict) and "values" in value: - print(key, value["values"]) + #print(key, value["values"]) # Only store the 'values' list tested_params[key] = value["values"] grid_params = tested_params @@ -714,7 +727,7 @@ def color_func(v): # Save figure output_dir = grid_dir / "post_processing" / "grid_plots" output_dir.mkdir(parents=True, exist_ok=True) - output_file = output_dir / f"ecdf_grid_plot_{grid_name}.png" + output_file = output_dir / f"ecdf_grid_plot_{grid_name}.{plot_format}" fig.savefig(output_file, dpi=300, bbox_inches='tight') plt.close(fig) @@ -764,7 +777,7 @@ def main(grid_analyse_toml_file: str | Path): # --- Plot grid status --- if cfg.get("plot_status", True): all_simulations_data_csv = pd.read_csv(summary_csv_all, sep="\t") - plot_grid_status(all_simulations_data_csv, grid_path, grid_name) + plot_grid_status(all_simulations_data_csv, cfg, grid_path, grid_name) print("Plot grid status summary is available.") # --- ECDF plots --- @@ -780,13 +793,14 @@ def main(grid_analyse_toml_file: str | Path): ) grouped_data.update(group) - param_settings_grid, output_settings_grid = load_ecdf_plot_settings(cfg) + param_settings_grid, output_settings_grid, plot_format = load_ecdf_plot_settings(cfg) ecdf_grid_plot( grouped_data, param_settings_grid, output_settings_grid, + plot_format, grid_path, - grid_name, + grid_name ) print("ECDF grid plot is available.") From 134833ce4d904a516ee4c2518453a444b7f129a2 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 7 Apr 2026 13:36:35 +0200 Subject: [PATCH 085/105] remove printing statement --- src/proteus/grid/post_processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index 0f9944a11..0a9a67f15 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -744,7 +744,7 @@ def main(grid_analyse_toml_file: str | Path): grid_path = Path(cfg["grid_path"]) grid_name = get_grid_name(grid_path) - print(grid_path) + #print(grid_path) print(f"Analyzing grid: {grid_name}") # Load grid data From 92348f6c1323735747483f05e6b76711feaf2e98 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 7 Apr 2026 15:32:58 +0200 Subject: [PATCH 086/105] update grid path from same config file and add preset_labels --- input/ensembles/example.grid_test_ppgrid.toml | 110 ++++++++++++++++++ src/proteus/grid/post_processing.py | 3 +- src/proteus/utils/plot.py | 69 +++++++++++ 3 files changed, 181 insertions(+), 1 deletion(-) create mode 100644 input/ensembles/example.grid_test_ppgrid.toml diff --git a/input/ensembles/example.grid_test_ppgrid.toml b/input/ensembles/example.grid_test_ppgrid.toml new file mode 100644 index 000000000..38ce42616 --- /dev/null +++ b/input/ensembles/example.grid_test_ppgrid.toml @@ -0,0 +1,110 @@ +# Config file for running a grid of forward models + +# Path to output folder where grid will be saved (relative to PROTEUS output folder) +output = "scratch/grid_demo/" + +# Make `output` a symbolic link to this absolute location. To disable: set to empty string. +symlink = "" + +# Post-processing options +update_csv = true # Whether to update the summary CSV files before plotting +plot_format = "png" # Format for saving plots ("png" or "pdf") +plot_status = true # Generate status summary plot of the grid +plot_ecdf = true # Generate ECDF grid plot for input parameters tested in the grid and output_variables defined below +#output_variables = ["solidification_time", "Phi_global", "T_surf", "P_surf", "atm_kg_per_mol", "esc_rate_total", "rho_obs"] # List of output variables to include in ECDF plot + +# Path to base (reference) config file relative to PROTEUS root folder +ref_config = "input/demos/dummy.toml" + +# Use SLURM? +use_slurm = true + +# Execution limits +max_jobs = 10 # maximum number of concurrent tasks (e.g. 500 on Habrok) +max_days = 1 # maximum number of days to run (e.g. 1) +max_mem = 3 # maximum memory per CPU in GB (e.g. 3) + +# Now define grid axes... +# Each axis must be a new section (table) in this file. +# Each table corresponds to the name of the parameter to be varied. +# Each table name must be written in double quotes. +# See examples below + +# Planet mass set directly +["struct.mass_tot"] + method = "direct" + values = [1.0, 2.0] + +# Hydrogen inventory set by arange +["delivery.elements.H_ppmw"] + method = "arange" + start = 1 + stop = 100 + step = 50 + + +# Input parameters configuration for ECDF plot (1 row per input parameter) +[input_parameters] + + colormap = "viridis" # Default colormap for all input parameters + + [input_parameters.struct.mass_tot] + label = "Mass [M_\\oplus]" + log_scale = false + + [input_parameters.delivery.elements.H_ppmw] + label = "[H] [ppmw]" + log_scale = false + +# Output variables configuration for ECDF plot (1 column per output variable) +[output_variables] + + [output_variables.solidification_time] + label = "Solidification [yr]" # label of the output variable on x-axis + log_scale = true # whether to use log scale or not on x-axis + scale = 1.0 # scaling factor to apply to the output variable values + + [output_variables.Phi_global] + label = "Melt fraction [%]" + log_scale = false + scale = 100.0 # convert melt fraction to percentage + + [output_variables.T_surf] + label = "T_{\\rm surf}\\,[10^{3}\\,\\mathrm{K}]" + log_scale = false + scale = 0.001 # convert K to 10^3 K for better readability on plot + + [output_variables.P_surf] + label = "P_{\\rm surf}\\,[\\mathrm{bar}]" + log_scale = true + scale = 1.0 + + [output_variables.atm_kg_per_mol] + label = "MMW [g/mol]" + log_scale = false + scale = 1000.0 # convert kg/mol to g/mol + + [output_variables.esc_rate_total] + label = "Escape rate [g/s]" + log_scale = true + scale = 1000.0 + + [output_variables.bond_albedo] + label = "Bond albedo" + log_scale = false + scale = 1.0 + + [output_variables.rho_obs] + label = "\\rho_{\\rm obs}\\,[\\mathrm{g/cm^3}]" + log_scale = false + scale = 0.001 # convert kg/m^3 to g/cm^3 + + [output_variables.R_obs] + label = "R_{\\rm obs}\\,[R_\\oplus]" # in units of Earth radii + log_scale = false + scale = 1.56961231e-7 # convert m to Earth radii + + [output_variables.M_planet] + label = "M_{\\rm p}\\,[M_\\oplus]" # in units of Earth masses + log_scale = false + scale = 1.6744809e-25 # convert kg to Earth masses diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index 0a9a67f15..e40d322e8 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -741,7 +741,8 @@ def main(grid_analyse_toml_file: str | Path): cfg = tomllib.load(f) # Get grid path and name - grid_path = Path(cfg["grid_path"]) + grid_path = Path('output/' + cfg["output"] + '/') + print(f"Grid path: {grid_path}") grid_name = get_grid_name(grid_path) #print(grid_path) diff --git a/src/proteus/utils/plot.py b/src/proteus/utils/plot.py index 583e7d42c..9982e89bc 100644 --- a/src/proteus/utils/plot.py +++ b/src/proteus/utils/plot.py @@ -86,6 +86,75 @@ 'nacljet': '#ee29f5', } +# Standard label for input and output variables +_preset_labels = { + ## Input parameters (from input.toml files) + # Orbit module + 'orbit.semimajoraxis': 'a [AU]', + 'orbit.eccentricity': 'e', + + # Structure module + 'struct.mass_tot': 'Mass [M_\\oplus]', + 'struct.radius_int': 'Radius [R_\\oplus]', + 'struct.corefrac': 'CRF', + + # Atmosphere module + 'atmos_clim.module': 'Atmospheric\ntreatment', + + # Escape module + 'escape.zephyrus.efficiency': '\\rm \\epsilon', + 'escape.zephyrus.Pxuv': 'P_{\\rm XUV}\\,[bar]', + + # Outgassing module + 'outgas.fO2_shift_IW': '\\Delta\\,IW', + + # Delivery module + 'delivery.elements.H_oceans': 'H [Earth oceans]', + 'delivery.elements.H_ppmw': 'H [ppmw]', + 'delivery.elements.H_kg': 'H [kg]', + 'delivery.elements.CH_ratio': 'C/H ratio', + 'delivery.elements.C_ppmw': 'C [ppmw]', + 'delivery.elements.C_kg': 'C [kg]', + 'delivery.elements.NH_ratio': 'N/H ratio', + 'delivery.elements.N_ppmw': 'N [ppmw]', + 'delivery.elements.N_kg': 'N [kg]', + 'delivery.elements.SH_ratio': 'S/H ratio', + 'delivery.elements.S_ppmw': 'S [ppmw]', + 'delivery.elements.S_kg': 'S [kg]', + + ## Output variables (from runtime_helpfile.csv) + # Model tracking + 'Time': 'Time [yr]', + 'solidification_time': 'Solidification time [yr]', # computed in post-processing script, not in runtime_helpfile.csv + + # Orbital parameters + 'semimajorax': 'a [m]', + 'eccentricity': 'e', + + # Planet structure + 'R_int': 'Radius [m]', + 'M_int': 'Interior Mass [kg]', + 'M_planet': 'Planet Mass [kg]', + + # Temperatures + 'T_surf': 'T_{\\rm surf}\\,[\\mathrm{K}]', + 'T_magma': 'T_{\\rm magma}\\,[\\mathrm{K}]', + 'T_eqm': 'T_{\\rm eqm}\\,[\\mathrm{K}]', + 'T_skin': 'T_{\\rm skin}\\,[\\mathrm{K}]', + + # Planet interior properties + 'Phi_global': 'Melt fraction', + + # Planet observational properties + 'R_obs': 'Transit radius [R_\\oplus]', + 'rho_obs': 'Transit bulk density [kg/m^3]', + + # Atmospheric composition from outgassing + 'M_atm': 'Atmosphere mass [kg]', + 'P_surf': 'P_{\\rm surf}\\,[bar]', + 'atm_kg_per_mol': 'Atmosphere mean molar mass [kg/mol]', +} + def _generate_colour(gas: str): """ From 36dcf4b61ce5887a83a74f48079254d332302b85 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 7 Apr 2026 15:45:35 +0200 Subject: [PATCH 087/105] update the label for plotting --- input/ensembles/example.grid_test_ppgrid.toml | 1 - src/proteus/grid/post_processing.py | 25 +++++++++++++++++-- src/proteus/utils/plot.py | 8 +++--- 3 files changed, 27 insertions(+), 7 deletions(-) diff --git a/input/ensembles/example.grid_test_ppgrid.toml b/input/ensembles/example.grid_test_ppgrid.toml index 38ce42616..908ba9774 100644 --- a/input/ensembles/example.grid_test_ppgrid.toml +++ b/input/ensembles/example.grid_test_ppgrid.toml @@ -53,7 +53,6 @@ max_mem = 3 # maximum memory per CPU in GB (e.g. 3) log_scale = false [input_parameters.delivery.elements.H_ppmw] - label = "[H] [ppmw]" log_scale = false # Output variables configuration for ECDF plot (1 column per output variable) diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index e40d322e8..7422a3813 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -11,6 +11,8 @@ import toml from matplotlib import cm +from proteus.utils.plot import _preset_labels + # --------------------------------------------------------- # Data loading, extraction, and CSV generation functions # --------------------------------------------------------- @@ -325,6 +327,25 @@ def include_case(status: str, mode: str) -> bool: # Plotting functions # --------------------------------------------------------- +def get_label(quant): + """" + Get label for a given quantity, using preset labels if available. + Parameters + ---------- + quant : str + Quantity for which to get label. + + Returns + ------- + str + Label for the quantity. + """ + + if quant in _preset_labels: + return _preset_labels(quant) + else: + return quant + def plot_grid_status(df: pd.DataFrame, cfg: dict, grid_dir: str | Path, grid_name: str): """ Plot histogram summary of number of simulation statuses in @@ -505,7 +526,7 @@ def load_ecdf_plot_settings(cfg): param_settings = {} for key, val in flat_params.items(): param_settings[key] = { - "label": val.get("label", key), + "label": _preset_labels.get(key, val.get("label", key)), "colormap": default_cmap, "log_scale": val.get("log_scale", False), } @@ -513,7 +534,7 @@ def load_ecdf_plot_settings(cfg): output_settings = {} for key, val in cfg.get("output_variables", {}).items(): output_settings[key] = { - "label": val.get("label", key), + "label": _preset_labels.get(key, val.get("label", key)), "log_scale": val.get("log_scale", False), "scale": val.get("scale", 1.0), } diff --git a/src/proteus/utils/plot.py b/src/proteus/utils/plot.py index 9982e89bc..f1b205885 100644 --- a/src/proteus/utils/plot.py +++ b/src/proteus/utils/plot.py @@ -132,7 +132,7 @@ 'eccentricity': 'e', # Planet structure - 'R_int': 'Radius [m]', + 'R_int': 'Interior Radius [m]', 'M_int': 'Interior Mass [kg]', 'M_planet': 'Planet Mass [kg]', @@ -146,13 +146,13 @@ 'Phi_global': 'Melt fraction', # Planet observational properties - 'R_obs': 'Transit radius [R_\\oplus]', - 'rho_obs': 'Transit bulk density [kg/m^3]', + 'R_obs': 'R_{\\rm obs}\\,[R_\\oplus]', + 'rho_obs': '\\rho_{\\rm obs}\\,[\\mathrm{kg/m^3}]', # Atmospheric composition from outgassing 'M_atm': 'Atmosphere mass [kg]', 'P_surf': 'P_{\\rm surf}\\,[bar]', - 'atm_kg_per_mol': 'Atmosphere mean molar mass [kg/mol]', + 'atm_kg_per_mol': 'MMW [kg/mol]', } From 2651831ddb14c761c900751fb0b5994f91875e27 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Tue, 7 Apr 2026 16:22:37 +0200 Subject: [PATCH 088/105] trying to get input param from grid config file --- input/ensembles/example.grid_test_ppgrid.toml | 5 --- src/proteus/grid/post_processing.py | 35 ++++++++++++++++--- 2 files changed, 31 insertions(+), 9 deletions(-) diff --git a/input/ensembles/example.grid_test_ppgrid.toml b/input/ensembles/example.grid_test_ppgrid.toml index 908ba9774..f69ab5e9d 100644 --- a/input/ensembles/example.grid_test_ppgrid.toml +++ b/input/ensembles/example.grid_test_ppgrid.toml @@ -48,12 +48,7 @@ max_mem = 3 # maximum memory per CPU in GB (e.g. 3) colormap = "viridis" # Default colormap for all input parameters - [input_parameters.struct.mass_tot] - label = "Mass [M_\\oplus]" - log_scale = false - [input_parameters.delivery.elements.H_ppmw] - log_scale = false # Output variables configuration for ECDF plot (1 column per output variable) [output_variables] diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index 7422a3813..01cee12e6 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -145,13 +145,40 @@ def get_tested_grid_parameters(cases_data: list, grid_dir: str | Path): # 1. Load tested input parameters in the grid raw_params = toml.load(grid_dir / "copy.grid.toml") + print(raw_params) - # Keep only the parameters and their values (ignore 'method' keys) + # Keep only the parameters and their values tested_params = {} + for key, value in raw_params.items(): - if isinstance(value, dict) and "values" in value: - # Only store the 'values' list - tested_params[key] = value["values"] + if isinstance(value, dict) and "method" in value: # filter to only get tested parameters (those with a "method" key) + method = value["method"] + + if method == "direct": + tested_params[key] = value["values"] + + elif method == "linspace": + tested_params[key] = np.linspace( + value['start'], value['stop'], value['count'] + ) + + elif method == "logspace": + tested_params[key] = np.logspace( + np.log10(value['start']), + np.log10(value['stop']), + value['count'] + ) + + elif method == "arange": + arr = list(np.arange(value['start'], value['stop'], value['step'])) + # Ensure endpoint is included + if not np.isclose(arr[-1], value['stop']): + arr.append(value['stop']) + tested_params[key] = np.array(arr, dtype=float) + + else: + print(f"⚠️ Unknown method for {key}: {method}") + continue grid_param_paths = list(tested_params.keys()) From 3bfaa3637d0b09a2c5523e0486f969383420a257 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 8 Apr 2026 12:25:04 +0200 Subject: [PATCH 089/105] update so input param are not duplicate for ecdf grid plot --- input/ensembles/example.grid_test_ppgrid.toml | 72 +--------- src/proteus/grid/post_processing.py | 130 ++++++++++-------- 2 files changed, 76 insertions(+), 126 deletions(-) diff --git a/input/ensembles/example.grid_test_ppgrid.toml b/input/ensembles/example.grid_test_ppgrid.toml index f69ab5e9d..8472de0ca 100644 --- a/input/ensembles/example.grid_test_ppgrid.toml +++ b/input/ensembles/example.grid_test_ppgrid.toml @@ -7,11 +7,12 @@ output = "scratch/grid_demo/" symlink = "" # Post-processing options -update_csv = true # Whether to update the summary CSV files before plotting -plot_format = "png" # Format for saving plots ("png" or "pdf") -plot_status = true # Generate status summary plot of the grid -plot_ecdf = true # Generate ECDF grid plot for input parameters tested in the grid and output_variables defined below -#output_variables = ["solidification_time", "Phi_global", "T_surf", "P_surf", "atm_kg_per_mol", "esc_rate_total", "rho_obs"] # List of output variables to include in ECDF plot +update_csv = false # Whether to update the summary CSV files before plotting +plot_format = "png" # Format for saving plots ("png" or "pdf") +plot_status = false # Generate status summary plot of the grid +plot_ecdf = true # Generate ECDF grid plot for input parameters tested in the grid and output_variables defined below +colormap = "viridis" # Colormap for ECDF plot (e.g. "viridis", "plasma", "inferno", "magma", etc.) +output_variables = ["solidification_time", "Phi_global", "T_surf", "P_surf", "atm_kg_per_mol", "esc_rate_total", "rho_obs"] # List of output variables to include in ECDF plot # Path to base (reference) config file relative to PROTEUS root folder ref_config = "input/demos/dummy.toml" @@ -41,64 +42,3 @@ max_mem = 3 # maximum memory per CPU in GB (e.g. 3) start = 1 stop = 100 step = 50 - - -# Input parameters configuration for ECDF plot (1 row per input parameter) -[input_parameters] - - colormap = "viridis" # Default colormap for all input parameters - - - -# Output variables configuration for ECDF plot (1 column per output variable) -[output_variables] - - [output_variables.solidification_time] - label = "Solidification [yr]" # label of the output variable on x-axis - log_scale = true # whether to use log scale or not on x-axis - scale = 1.0 # scaling factor to apply to the output variable values - - [output_variables.Phi_global] - label = "Melt fraction [%]" - log_scale = false - scale = 100.0 # convert melt fraction to percentage - - [output_variables.T_surf] - label = "T_{\\rm surf}\\,[10^{3}\\,\\mathrm{K}]" - log_scale = false - scale = 0.001 # convert K to 10^3 K for better readability on plot - - [output_variables.P_surf] - label = "P_{\\rm surf}\\,[\\mathrm{bar}]" - log_scale = true - scale = 1.0 - - [output_variables.atm_kg_per_mol] - label = "MMW [g/mol]" - log_scale = false - scale = 1000.0 # convert kg/mol to g/mol - - [output_variables.esc_rate_total] - label = "Escape rate [g/s]" - log_scale = true - scale = 1000.0 - - [output_variables.bond_albedo] - label = "Bond albedo" - log_scale = false - scale = 1.0 - - [output_variables.rho_obs] - label = "\\rho_{\\rm obs}\\,[\\mathrm{g/cm^3}]" - log_scale = false - scale = 0.001 # convert kg/m^3 to g/cm^3 - - [output_variables.R_obs] - label = "R_{\\rm obs}\\,[R_\\oplus]" # in units of Earth radii - log_scale = false - scale = 1.56961231e-7 # convert m to Earth radii - - [output_variables.M_planet] - label = "M_{\\rm p}\\,[M_\\oplus]" # in units of Earth masses - log_scale = false - scale = 1.6744809e-25 # convert kg to Earth masses diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index 01cee12e6..70c03292a 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -1,5 +1,6 @@ from __future__ import annotations +import time import tomllib from pathlib import Path @@ -13,10 +14,20 @@ from proteus.utils.plot import _preset_labels + +# Time function +def timed(func): + def wrapper(*args, **kwargs): + start = time.time() + result = func(*args, **kwargs) + print(f"{func.__name__} took {time.time() - start:.2f} s") + return result + return wrapper + # --------------------------------------------------------- # Data loading, extraction, and CSV generation functions # --------------------------------------------------------- - +@timed def get_grid_name(grid_path: str | Path) -> str: """ Returns the grid name (last part of the path) from the given grid path. @@ -35,7 +46,7 @@ def get_grid_name(grid_path: str | Path) -> str: if not grid_path.is_dir(): raise ValueError(f"{grid_path} is not a valid directory") return grid_path.name - +@timed def load_grid_cases(grid_dir: Path): """ Load information for each simulation of a PROTEUS grid. @@ -119,7 +130,7 @@ def load_grid_cases(grid_dir: Path): print('-----------------------------------------------------------') return combined_data - +@timed def get_tested_grid_parameters(cases_data: list, grid_dir: str | Path): """ Extract tested grid parameters per case using: @@ -145,7 +156,6 @@ def get_tested_grid_parameters(cases_data: list, grid_dir: str | Path): # 1. Load tested input parameters in the grid raw_params = toml.load(grid_dir / "copy.grid.toml") - print(raw_params) # Keep only the parameters and their values tested_params = {} @@ -203,7 +213,7 @@ def get_tested_grid_parameters(cases_data: list, grid_dir: str | Path): case_params[idx] = params_for_case return case_params, tested_params - +@timed def load_phi_crit(grid_dir: str | Path): """ Load the critical melt fraction (phi_crit) from the reference configuration file of the grid. @@ -234,7 +244,7 @@ def load_phi_crit(grid_dir: str | Path): raise KeyError("phi_crit not found in ref_config.toml") return phi_crit - +@timed def extract_solidification_time(cases_data: list, grid_dir: str | Path): """ Extract solidification time for each simulation of the grid for @@ -285,7 +295,7 @@ def extract_solidification_time(cases_data: list, grid_dir: str | Path): solidification_times.append(np.nan) return solidification_times - +@timed def generate_summary_csv( cases_data: list, case_params: dict, @@ -353,7 +363,7 @@ def include_case(status: str, mode: str) -> bool: # --------------------------------------------------------- # Plotting functions # --------------------------------------------------------- - +@timed def get_label(quant): """" Get label for a given quantity, using preset labels if available. @@ -369,10 +379,10 @@ def get_label(quant): """ if quant in _preset_labels: - return _preset_labels(quant) + return _preset_labels[quant] else: return quant - +@timed def plot_grid_status(df: pd.DataFrame, cfg: dict, grid_dir: str | Path, grid_name: str): """ Plot histogram summary of number of simulation statuses in @@ -468,7 +478,7 @@ def plot_grid_status(df: pd.DataFrame, cfg: dict, grid_dir: str | Path, grid_nam output_file = output_dir / f"summary_grid_statuses_{grid_name}.{plot_format}" plt.savefig(output_file, dpi=300, bbox_inches='tight') plt.close() - +@timed def flatten_input_parameters(d: dict, parent_key: str = "") -> dict: """ Flattens a nested input-parameter dictionary from a TOML configuration @@ -505,8 +515,8 @@ def flatten_input_parameters(d: dict, parent_key: str = "") -> dict: flat.update(flatten_input_parameters(v, new_key)) return flat - -def load_ecdf_plot_settings(cfg): +@timed +def load_ecdf_plot_settings(cfg, tested_params=None): """ Load ECDF plotting settings for both input parameters and output variables from a configuration dictionary loaded from TOML. @@ -516,6 +526,9 @@ def load_ecdf_plot_settings(cfg): cfg : dict Configuration dictionary loaded from a TOML file. + tested_params : dict, optional + Dictionary of tested grid parameters and their grid values (directly from copy.grid.toml). + Returns ------- param_settings : dict @@ -543,33 +556,43 @@ def load_ecdf_plot_settings(cfg): """ - # Load input parameter settings - raw_params = cfg["input_parameters"] - default_cmap = getattr(cm, raw_params.get("colormap", "viridis")) + if tested_params is None or len(tested_params) == 0: + raise ValueError("No tested parameters found for ECDF plotting") - # Flatten input parameters dictionary - flat_params = flatten_input_parameters(raw_params) + # Optional colormap from config + cmap_name = cfg.get("input_parameters", {}).get("colormap", "viridis") + default_cmap = getattr(cm, cmap_name, cm.viridis) - param_settings = {} - for key, val in flat_params.items(): - param_settings[key] = { - "label": _preset_labels.get(key, val.get("label", key)), + # Build parameter settings from config + param_settings = { + key: { + "label": _preset_labels.get(key, key), "colormap": default_cmap, - "log_scale": val.get("log_scale", False), + "log_scale": False, } + for key in tested_params + } + # Build output settings from config output_settings = {} - for key, val in cfg.get("output_variables", {}).items(): + output_list = cfg.get("output_variables", []) + + for key in output_list: output_settings[key] = { - "label": _preset_labels.get(key, val.get("label", key)), - "log_scale": val.get("log_scale", False), - "scale": val.get("scale", 1.0), + "label": _preset_labels.get(key, key), + "log_scale": False, # default + "scale": 1.0, # default } + # Extract plot format plot_format = cfg.get("plot_format") return param_settings, output_settings, plot_format - +@timed +def clean_series(s): + '''Cleans a pandas Series by replacing inf values with NaN and dropping NaN values.''' + return s.replace([np.inf, -np.inf], np.nan).dropna().loc[lambda x: x > 0] +@timed def group_output_by_parameter(df, grid_parameters, outputs): """ Groups output values (like P_surf) by one or more grid parameters. @@ -598,23 +621,21 @@ def group_output_by_parameter(df, grid_parameters, outputs): value_dict = {} for param_value in df[param].dropna().unique(): subset = df[df[param] == param_value] - output_values = subset[output].replace([np.inf, -np.inf], np.nan) # Replace inf with NaN - output_values = output_values.dropna() # Remove NaN values - output_values = output_values[output_values > 0] # Keep only positive values + output_values = clean_series(subset[output]) value_dict[param_value] = output_values grouped[key_name] = value_dict return grouped - +@timed def latex(label: str) -> str: """ Wraps a label in dollar signs for LaTeX formatting if it contains a backslash. """ return f"${label}$" if "\\" in label else label - -def ecdf_grid_plot(grouped_data: dict, param_settings: dict, output_settings: dict, plot_format: str, grid_dir: str | Path, grid_name: str): +@timed +def ecdf_grid_plot(tested_params: dict, grouped_data: dict, param_settings: dict, output_settings: dict, plot_format: str, grid_dir: str | Path, grid_name: str): """ Creates ECDF grid plots where each row corresponds to one input parameter and each column corresponds to one output. Saves the resulting figure as a {plot_format}. @@ -622,6 +643,9 @@ def ecdf_grid_plot(grouped_data: dict, param_settings: dict, output_settings: di Parameters ---------- + tested_params : dict + Dictionary of tested grid parameters and their grid values (directly from copy.grid.toml). + grouped_data : dict Dictionary where each key is of the form '[output]_per_[parameter]', and each value is a dict {param_value: [output_values]}. @@ -648,14 +672,6 @@ def ecdf_grid_plot(grouped_data: dict, param_settings: dict, output_settings: di """ # Load tested grid parameters - with open(grid_dir / "copy.grid.toml", "r") as f: - raw_params = toml.load(f) - tested_params = {} - for key, value in raw_params.items(): - if isinstance(value, dict) and "values" in value: - #print(key, value["values"]) - # Only store the 'values' list - tested_params[key] = value["values"] grid_params = tested_params # List of parameter names (rows) and output names (columns) @@ -670,7 +686,7 @@ def ecdf_grid_plot(grouped_data: dict, param_settings: dict, output_settings: di # Loop through parameters (rows) and outputs (columns) for i, param_name in enumerate(param_names): tested_param = grid_params.get(param_name, []) - if not tested_param: + if tested_param is None or len(tested_param) == 0: print(f"⚠️ Skipping {param_name} — no tested values found in grid_params") continue settings = param_settings[param_name] @@ -712,9 +728,7 @@ def color_func(v): ha='left', color='black', bbox=dict(facecolor='white', edgecolor='silver', boxstyle='round,pad=0.2', alpha=0.8) - ) - - # Plot one ECDF per tested parameter value + ) # Plot one ECDF per tested parameter value for val in tested_param: data_key = f"{output_name}_per_{param_name}" if val not in grouped_data.get(data_key, {}): @@ -782,6 +796,7 @@ def color_func(v): # --------------------------------------------------------- # main # --------------------------------------------------------- +@timed def main(grid_analyse_toml_file: str | Path): # Load configuration from grid_analyse.toml @@ -793,9 +808,6 @@ def main(grid_analyse_toml_file: str | Path): print(f"Grid path: {grid_path}") grid_name = get_grid_name(grid_path) - #print(grid_path) - print(f"Analyzing grid: {grid_name}") - # Load grid data data = load_grid_cases(grid_path) input_param_grid_per_case, tested_params_grid = get_tested_grid_parameters( @@ -832,18 +844,16 @@ def main(grid_analyse_toml_file: str | Path): # --- ECDF plots --- if cfg.get("plot_ecdf", True): completed_simulations_data_csv = pd.read_csv(summary_csv_completed, sep="\t") - columns_output = list(cfg["output_variables"].keys()) - grouped_data = {} - for col in columns_output: - group = group_output_by_parameter( - completed_simulations_data_csv, - tested_params_grid, - [col], - ) - grouped_data.update(group) - - param_settings_grid, output_settings_grid, plot_format = load_ecdf_plot_settings(cfg) + columns_output = cfg["output_variables"] + grouped_data = group_output_by_parameter( + completed_simulations_data_csv, + list(tested_params_grid.keys()), + columns_output, + ) + + param_settings_grid, output_settings_grid, plot_format = load_ecdf_plot_settings(cfg, tested_params_grid) ecdf_grid_plot( + tested_params_grid, grouped_data, param_settings_grid, output_settings_grid, From 875dee92476738bc961a718bd56d85184782db1b Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 8 Apr 2026 16:24:18 +0200 Subject: [PATCH 090/105] update scale log scale preset and update post_processing so it works with all suggestions from last PR --- input/ensembles/example.grid_test_ppgrid.toml | 2 +- src/proteus/grid/post_processing.py | 235 ++++++++++++------ src/proteus/utils/plot.py | 165 +++++++++++- 3 files changed, 316 insertions(+), 86 deletions(-) diff --git a/input/ensembles/example.grid_test_ppgrid.toml b/input/ensembles/example.grid_test_ppgrid.toml index 8472de0ca..c3e981160 100644 --- a/input/ensembles/example.grid_test_ppgrid.toml +++ b/input/ensembles/example.grid_test_ppgrid.toml @@ -12,7 +12,7 @@ plot_format = "png" # Format for saving plots ("png" or "pdf") plot_status = false # Generate status summary plot of the grid plot_ecdf = true # Generate ECDF grid plot for input parameters tested in the grid and output_variables defined below colormap = "viridis" # Colormap for ECDF plot (e.g. "viridis", "plasma", "inferno", "magma", etc.) -output_variables = ["solidification_time", "Phi_global", "T_surf", "P_surf", "atm_kg_per_mol", "esc_rate_total", "rho_obs"] # List of output variables to include in ECDF plot +output_variables = ["solidification_time", "Phi_global", "T_surf", "P_surf", "atm_kg_per_mol", "esc_rate_total", "rho_obs", "H2_bar"] # List of output variables to include in ECDF plot # Path to base (reference) config file relative to PROTEUS root folder ref_config = "input/demos/dummy.toml" diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index 70c03292a..55cce3a8d 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -1,6 +1,5 @@ from __future__ import annotations -import time import tomllib from pathlib import Path @@ -12,22 +11,12 @@ import toml from matplotlib import cm -from proteus.utils.plot import _preset_labels - - -# Time function -def timed(func): - def wrapper(*args, **kwargs): - start = time.time() - result = func(*args, **kwargs) - print(f"{func.__name__} took {time.time() - start:.2f} s") - return result - return wrapper +from proteus.utils.plot import _preset_labels, _preset_log_scales, _preset_scales # --------------------------------------------------------- # Data loading, extraction, and CSV generation functions # --------------------------------------------------------- -@timed + def get_grid_name(grid_path: str | Path) -> str: """ Returns the grid name (last part of the path) from the given grid path. @@ -46,7 +35,7 @@ def get_grid_name(grid_path: str | Path) -> str: if not grid_path.is_dir(): raise ValueError(f"{grid_path} is not a valid directory") return grid_path.name -@timed + def load_grid_cases(grid_dir: Path): """ Load information for each simulation of a PROTEUS grid. @@ -130,7 +119,7 @@ def load_grid_cases(grid_dir: Path): print('-----------------------------------------------------------') return combined_data -@timed + def get_tested_grid_parameters(cases_data: list, grid_dir: str | Path): """ Extract tested grid parameters per case using: @@ -195,25 +184,26 @@ def get_tested_grid_parameters(cases_data: list, grid_dir: str | Path): # 2.Extract those parameters from loaded cases for each case of the grid case_params = {} - for idx, case in enumerate(cases_data): - params_for_case = {} - init_params = case["init_parameters"] + if cases_data: + for idx, case in enumerate(cases_data): + params_for_case = {} + init_params = case["init_parameters"] - for path in grid_param_paths: - keys = path.split(".") - val = init_params + for path in grid_param_paths: + keys = path.split(".") + val = init_params - try: - for k in keys: - val = val[k] - params_for_case[path] = val - except (KeyError, TypeError): - params_for_case[path] = None + try: + for k in keys: + val = val[k] + params_for_case[path] = val + except (KeyError, TypeError): + params_for_case[path] = None - case_params[idx] = params_for_case + case_params[idx] = params_for_case return case_params, tested_params -@timed + def load_phi_crit(grid_dir: str | Path): """ Load the critical melt fraction (phi_crit) from the reference configuration file of the grid. @@ -244,7 +234,7 @@ def load_phi_crit(grid_dir: str | Path): raise KeyError("phi_crit not found in ref_config.toml") return phi_crit -@timed + def extract_solidification_time(cases_data: list, grid_dir: str | Path): """ Extract solidification time for each simulation of the grid for @@ -295,7 +285,43 @@ def extract_solidification_time(cases_data: list, grid_dir: str | Path): solidification_times.append(np.nan) return solidification_times -@timed + +def validate_output_variables(df: pd.DataFrame, requested_outputs: list): + """ + Check that requested output variables exist in the DataFrame. + + Parameters + ---------- + df : pd.DataFrame + DataFrame loaded from runtime_helpfile.csv (or summary CSV). + + requested_outputs : list + List of output variable names from config. + + Returns + ------- + valid_outputs : list + Outputs that exist in the DataFrame. + """ + + available = list(df.columns) + valid_outputs = [] + + for var in requested_outputs: + if var not in available: + # Find index of 'Time' column + try: + idx = available.index("Time") + except ValueError: + idx = 0 # fallback if Time is not in columns + + print(f"WARNING: Output variable '{var}' not found in data.") + print(f"Available columns include: {available[idx:]}") # show all columns starting from 'Time' + else: + valid_outputs.append(var) + + return valid_outputs + def generate_summary_csv( cases_data: list, case_params: dict, @@ -363,26 +389,66 @@ def include_case(status: str, mode: str) -> bool: # --------------------------------------------------------- # Plotting functions # --------------------------------------------------------- -@timed + def get_label(quant): - """" - Get label for a given quantity, using preset labels if available. - Parameters - ---------- - quant : str - Quantity for which to get label. - - Returns - ------- - str - Label for the quantity. - """ - - if quant in _preset_labels: - return _preset_labels[quant] - else: - return quant -@timed + """ + Get label for a given quantity, using preset labels if available. + If not found in _preset_labels, use the last part of the dot-separated path. + + Parameters + ---------- + quant : str + Quantity for which to get label. + + Returns + ------- + str + Label for the quantity. + """ + if quant in _preset_labels: + return _preset_labels[quant] + else: + # Take only the last part after the last dot + return quant.split('.')[-1] + +def get_scale(quant): + """ + Get scale factor for a given quantity, using preset scales if available. + Parameters + ---------- + quant : str + Quantity for which to get scale factor. + + Returns + ------- + float + Scale factor for the quantity. + """ + + if quant in _preset_scales: + return _preset_scales[quant] + else: + return 1.0 + +def get_log_scale(quant): + """ + Get log scale flag for a given quantity, using preset log scales if available. + Parameters + ---------- + quant : str + Quantity for which to get log scale flag. + + Returns + ------- + bool + Log scale flag for the quantity. + """ + + if quant in _preset_log_scales: + return _preset_log_scales[quant] + else: + return False + def plot_grid_status(df: pd.DataFrame, cfg: dict, grid_dir: str | Path, grid_name: str): """ Plot histogram summary of number of simulation statuses in @@ -478,7 +544,7 @@ def plot_grid_status(df: pd.DataFrame, cfg: dict, grid_dir: str | Path, grid_nam output_file = output_dir / f"summary_grid_statuses_{grid_name}.{plot_format}" plt.savefig(output_file, dpi=300, bbox_inches='tight') plt.close() -@timed + def flatten_input_parameters(d: dict, parent_key: str = "") -> dict: """ Flattens a nested input-parameter dictionary from a TOML configuration @@ -515,7 +581,7 @@ def flatten_input_parameters(d: dict, parent_key: str = "") -> dict: flat.update(flatten_input_parameters(v, new_key)) return flat -@timed + def load_ecdf_plot_settings(cfg, tested_params=None): """ Load ECDF plotting settings for both input parameters and output variables @@ -566,9 +632,9 @@ def load_ecdf_plot_settings(cfg, tested_params=None): # Build parameter settings from config param_settings = { key: { - "label": _preset_labels.get(key, key), + "label": get_label(key), "colormap": default_cmap, - "log_scale": False, + "log_scale": get_log_scale(key) } for key in tested_params } @@ -579,20 +645,20 @@ def load_ecdf_plot_settings(cfg, tested_params=None): for key in output_list: output_settings[key] = { - "label": _preset_labels.get(key, key), - "log_scale": False, # default - "scale": 1.0, # default + "label": get_label(key), + "log_scale": get_log_scale(key), + "scale": get_scale(key), } # Extract plot format plot_format = cfg.get("plot_format") return param_settings, output_settings, plot_format -@timed + def clean_series(s): '''Cleans a pandas Series by replacing inf values with NaN and dropping NaN values.''' return s.replace([np.inf, -np.inf], np.nan).dropna().loc[lambda x: x > 0] -@timed + def group_output_by_parameter(df, grid_parameters, outputs): """ Groups output values (like P_surf) by one or more grid parameters. @@ -621,20 +687,20 @@ def group_output_by_parameter(df, grid_parameters, outputs): value_dict = {} for param_value in df[param].dropna().unique(): subset = df[df[param] == param_value] - output_values = clean_series(subset[output]) + output_values = clean_series(subset[output]) * get_scale(output) value_dict[param_value] = output_values grouped[key_name] = value_dict return grouped -@timed + def latex(label: str) -> str: """ Wraps a label in dollar signs for LaTeX formatting if it contains a backslash. """ return f"${label}$" if "\\" in label else label -@timed + def ecdf_grid_plot(tested_params: dict, grouped_data: dict, param_settings: dict, output_settings: dict, plot_format: str, grid_dir: str | Path, grid_name: str): """ Creates ECDF grid plots where each row corresponds to one input parameter @@ -687,7 +753,7 @@ def ecdf_grid_plot(tested_params: dict, grouped_data: dict, param_settings: dict for i, param_name in enumerate(param_names): tested_param = grid_params.get(param_name, []) if tested_param is None or len(tested_param) == 0: - print(f"⚠️ Skipping {param_name} — no tested values found in grid_params") + print(f"Skipping {param_name} — no tested values found in grid_params") continue settings = param_settings[param_name] @@ -728,23 +794,29 @@ def color_func(v): ha='left', color='black', bbox=dict(facecolor='white', edgecolor='silver', boxstyle='round,pad=0.2', alpha=0.8) - ) # Plot one ECDF per tested parameter value + ) + + # Plot one ECDF per tested parameter value for val in tested_param: data_key = f"{output_name}_per_{param_name}" - if val not in grouped_data.get(data_key, {}): + # if val not in grouped_data.get(data_key, {}): + # continue + # raw = np.array(grouped_data[data_key][val]) * out_settings["scale"] + data_dict = grouped_data.get(data_key, {}) + if val not in data_dict: continue - raw = np.array(grouped_data[data_key][val]) * out_settings.get("scale", 1.0) + raw = np.array(data_dict[val]) #* out_settings["scale"] # Plot ECDF sns.ecdfplot( data=raw, - log_scale=out_settings.get("log_scale", False), + #log_scale=out_settings["log_scale"], stat="percent", color=color_func(val), linewidth=4, linestyle='-', ax=ax - ) + ) # Configure x-axis labels, ticks, grids if i == n_rows - 1: @@ -769,6 +841,10 @@ def color_func(v): ax.grid(alpha=0.4) + # Configure log scale for x-axis if needed + if out_settings["log_scale"]: + ax.set_xscale("log") + # After plotting all outputs for this parameter (row), add colorbar or legend if colorbar_needed: # colorbar for numeric parameters sm = mpl.cm.ScalarMappable(cmap=settings["colormap"], norm=norm) @@ -796,7 +872,7 @@ def color_func(v): # --------------------------------------------------------- # main # --------------------------------------------------------- -@timed + def main(grid_analyse_toml_file: str | Path): # Load configuration from grid_analyse.toml @@ -808,11 +884,6 @@ def main(grid_analyse_toml_file: str | Path): print(f"Grid path: {grid_path}") grid_name = get_grid_name(grid_path) - # Load grid data - data = load_grid_cases(grid_path) - input_param_grid_per_case, tested_params_grid = get_tested_grid_parameters( - data, grid_path - ) # --- Summary CSVs --- update_csv = cfg.get("update_csv", True) @@ -823,6 +894,13 @@ def main(grid_analyse_toml_file: str | Path): summary_csv_running_error = summary_dir / f"{grid_name}_final_extracted_data_running_error.csv" if update_csv: + # Load grid data + data = load_grid_cases(grid_path) + input_param_grid_per_case, tested_params_grid = get_tested_grid_parameters( + data, grid_path + ) + + # Write CSV generate_summary_csv( data, input_param_grid_per_case, grid_path, grid_name ) @@ -834,6 +912,8 @@ def main(grid_analyse_toml_file: str | Path): f"{f.name} not found in {summary_dir}, " "but update_csv is set to False. Please set update_csv to True to generate it." ) + # Only load tested parameters from grid config + _, tested_params_grid = get_tested_grid_parameters([], grid_path) # --- Plot grid status --- if cfg.get("plot_status", True): @@ -844,7 +924,12 @@ def main(grid_analyse_toml_file: str | Path): # --- ECDF plots --- if cfg.get("plot_ecdf", True): completed_simulations_data_csv = pd.read_csv(summary_csv_completed, sep="\t") - columns_output = cfg["output_variables"] + columns_output = validate_output_variables( + completed_simulations_data_csv, + cfg["output_variables"] + ) + if len(columns_output) == 0: + raise ValueError("No valid output variables found. Check your config file.") grouped_data = group_output_by_parameter( completed_simulations_data_csv, list(tested_params_grid.keys()), diff --git a/src/proteus/utils/plot.py b/src/proteus/utils/plot.py index f1b205885..cb265af2f 100644 --- a/src/proteus/utils/plot.py +++ b/src/proteus/utils/plot.py @@ -10,6 +10,7 @@ import numpy as np from proteus.utils.archive import archive_exists +from proteus.utils.constants import M_earth, R_earth from proteus.utils.helper import mol_to_ele log = logging.getLogger('fwl.' + __name__) @@ -94,8 +95,8 @@ 'orbit.eccentricity': 'e', # Structure module - 'struct.mass_tot': 'Mass [M_\\oplus]', - 'struct.radius_int': 'Radius [R_\\oplus]', + 'struct.mass_tot': 'M_{\\mathrm{tot}} [M_\\oplus]', + 'struct.radius_int': 'R_{\\mathrm{int}} [R_\\oplus]', 'struct.corefrac': 'CRF', # Atmosphere module @@ -106,7 +107,7 @@ 'escape.zephyrus.Pxuv': 'P_{\\rm XUV}\\,[bar]', # Outgassing module - 'outgas.fO2_shift_IW': '\\Delta\\,IW', + 'outgas.fO2_shift_IW': '\\Delta\\,\\rm IW', # Delivery module 'delivery.elements.H_oceans': 'H [Earth oceans]', @@ -132,9 +133,9 @@ 'eccentricity': 'e', # Planet structure - 'R_int': 'Interior Radius [m]', - 'M_int': 'Interior Mass [kg]', - 'M_planet': 'Planet Mass [kg]', + 'R_int': 'R_{\\mathrm{int}} [R_\\oplus]', + 'M_int': 'M_{\\mathrm{int}} [M_\\oplus]', + 'M_planet': 'M_{\\mathrm{planet}} [M_\\oplus]', # Temperatures 'T_surf': 'T_{\\rm surf}\\,[\\mathrm{K}]', @@ -143,18 +144,162 @@ 'T_skin': 'T_{\\rm skin}\\,[\\mathrm{K}]', # Planet interior properties - 'Phi_global': 'Melt fraction', + 'Phi_global': 'Melt fraction [%]', # Planet observational properties 'R_obs': 'R_{\\rm obs}\\,[R_\\oplus]', - 'rho_obs': '\\rho_{\\rm obs}\\,[\\mathrm{kg/m^3}]', + 'rho_obs': '\\rho_{\\rm obs}\\,[\\mathrm{g/m^3}]', # Atmospheric composition from outgassing 'M_atm': 'Atmosphere mass [kg]', - 'P_surf': 'P_{\\rm surf}\\,[bar]', - 'atm_kg_per_mol': 'MMW [kg/mol]', + 'P_surf': 'P_{\\rm surf}\\,[\\mathrm{bar}]', + 'atm_kg_per_mol': 'MMW [g/mol]', + + # Atmospheric escape + 'esc_rate_total': 'Escape rate [g/s]', +} + +_preset_scales = { + ## Input parameters (from input.toml files) + # Orbit module + 'orbit.semimajoraxis': 1.0, + 'orbit.eccentricity': 1.0, + + # Structure module + 'struct.mass_tot': 1.0, + 'struct.radius_int': 1.0, + 'struct.corefrac': 1.0, + + # Atmosphere module + 'atmos_clim.module': 1.0, + + # Escape module + 'escape.zephyrus.efficiency': 1.0, + 'escape.zephyrus.Pxuv': 1.0, + + # Outgassing module + 'outgas.fO2_shift_IW': 1.0, + + # Delivery module + 'delivery.elements.H_oceans': 1.0, + 'delivery.elements.H_ppmw': 1.0, + 'delivery.elements.H_kg': 1.0, + 'delivery.elements.CH_ratio': 1.0, + 'delivery.elements.C_ppmw': 1.0, + 'delivery.elements.C_kg': 1.0, + 'delivery.elements.NH_ratio': 1.0, + 'delivery.elements.N_ppmw': 1.0, + 'delivery.elements.N_kg': 1.0, + 'delivery.elements.SH_ratio': 1.0, + 'delivery.elements.S_ppmw': 1.0, + 'delivery.elements.S_kg': 1.0, + + ## Output variables (from runtime_helpfile.csv) + # Model tracking + 'Time': 1.0, + 'solidification_time': 1.0, # computed in post-processing script, not in runtime_helpfile.csv + + # Orbital parameters + 'semimajorax': 1.0, + 'eccentricity': 1.0, + + # Planet structure + 'R_int': 1.0 / R_earth, + 'M_int': 1.0 / M_earth, + 'M_planet': 1.0 / M_earth, + + # Temperatures + 'T_surf': 1.0, + 'T_magma': 1.0, + 'T_eqm': 1.0, + 'T_skin': 1.0, + + # Planet interior properties + 'Phi_global': 100.0, + + # Planet observational properties + 'R_obs': 1.0 / R_earth, + 'rho_obs': 0.001, + + # Atmospheric composition from outgassing + 'M_atm': 1.0, + 'P_surf': 1.0, + 'atm_kg_per_mol': 1000.0, + + # Atmospheric escape + 'esc_rate_total': 1000.0, } +_preset_log_scales = { + ## Input parameters (from input.toml files) + # Orbit module + 'orbit.semimajoraxis': False, + 'orbit.eccentricity': False, + + # Structure module + 'struct.mass_tot': False, + 'struct.radius_int': False, + 'struct.corefrac': False, + + # Atmosphere module + 'atmos_clim.module': False, + + # Escape module + 'escape.zephyrus.efficiency': True, + 'escape.zephyrus.Pxuv': True, + + # Outgassing module + 'outgas.fO2_shift_IW': False, + + # Delivery module + 'delivery.elements.H_oceans': True, + 'delivery.elements.H_ppmw': True, + 'delivery.elements.H_kg': True, + 'delivery.elements.CH_ratio': True, + 'delivery.elements.C_ppmw': True, + 'delivery.elements.C_kg': True, + 'delivery.elements.NH_ratio': True, + 'delivery.elements.N_ppmw': True, + 'delivery.elements.N_kg': True, + 'delivery.elements.SH_ratio': True, + 'delivery.elements.S_ppmw': True, + 'delivery.elements.S_kg': True, + + ## Output variables (from runtime_helpfile.csv) + # Model tracking + 'Time': True, + 'solidification_time': True, # computed in post-processing script, not in runtime_helpfile.csv + + # Orbital parameters + 'semimajorax': False, + 'eccentricity': False, + + # Planet structure + 'R_int': False, + 'M_int': False, + 'M_planet': False, + + # Temperatures + 'T_surf': False, + 'T_magma': False, + 'T_eqm': False, + 'T_skin': False, + + # Planet interior properties + 'Phi_global': False, + + # Planet observational properties + 'R_obs': False, + 'rho_obs': False, + + # Atmospheric composition from outgassing + 'M_atm': False, + 'P_surf': True, + 'atm_kg_per_mol': False, + + # Atmospheric escape + 'esc_rate_total': True, +} def _generate_colour(gas: str): """ From a8196a17910e5661f0b8a02513e7839a5755adf9 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 8 Apr 2026 19:32:17 +0200 Subject: [PATCH 091/105] clean test example.toml files, update docs and create succesfull pytest --- docs/How-to/usage.md | 30 +++++-- input/ensembles/example.grid.toml | 10 +++ input/ensembles/example.grid_analyse.toml | 85 ------------------- input/ensembles/example.grid_test_ppgrid.toml | 44 ---------- tests/grid/dummy.grid.toml | 8 ++ tests/grid/test_grid.py | 16 ++++ 6 files changed, 57 insertions(+), 136 deletions(-) delete mode 100644 input/ensembles/example.grid_analyse.toml delete mode 100644 input/ensembles/example.grid_test_ppgrid.toml diff --git a/docs/How-to/usage.md b/docs/How-to/usage.md index e013ac1a6..908921df6 100644 --- a/docs/How-to/usage.md +++ b/docs/How-to/usage.md @@ -174,21 +174,37 @@ proteus grid-pack -o output/grid_demo/ ``` ## Postprocessing of grid results -Results from a PROTEUS grid can be post-processed using the `proteus grid-analyse` command. This generates ECDF plots that summarize the last time step of all simulation cases in the grid. (For more details on ECDF plots, see the [Seaborn `ecdfplot` documentation](https://seaborn.pydata.org/generated/seaborn.ecdfplot.html).) +Results from a PROTEUS grid can be post-processed using the `proteus grid-analyse` command. Running the analysis generates summary CSV files containing all tested input parameters and the final time-step outputs for each simulation, along with a status overview plot showing the distribution of simulation outcomes (e.g., completed, running, error) and empirical cumulative distribution function (ECDF) plots that summarize selected output variables across all completed simulations. (For more details on ECDF plots, see the [Seaborn `ecdfplot` documentation](https://seaborn.pydata.org/generated/seaborn.ecdfplot.html).) -Before running the command, update the `example.grid_analyse.toml` file to match your grid. Specify the input parameters used in your simulations and select the output variables you want to visualize. To post-process a grid and generate ECDF plots for further analysis, run the following command: +Before running the command, configure the post-processing options in your grid configuration file:`input/ensembles/example.grid.toml`. The following options can be set: + +- `update_csv` — Generate or update the CSV summary files +- `plot_format` — Output format for plots (e.g., `png`, `pdf`) +- `plot_status` — Enable/disable the status summary plot +- `plot_ecdf` — Enable/disable ECDF plot generation +- `output_variables` — List of variables to include in ECDF plots + +> **Note:** +> The variables specified in `output_variables` must match column names in `runtime_helpfile.csv`. +> The `solidification_time` is the only variable computed during post-processing and is not directly stored in the simulation output. + +--- + +To post-process a grid and generate ECDF plots for further analysis, run the following command: ``` proteus grid-analyse --config input/ensembles/example.grid_analyse.toml ``` -Executing the command creates a `post_processing` folder inside your grid directory containing all post-processing outputs: -- Extracted data: CSV files with simulation status, input parameters, and output values at the last time step are stored in: - `post_processing/extracted_data/` -- Plots: Status summaries and ECDF grid plots are saved in: - `post_processing/grid_plots/` +After execution, a `post_processing/` directory is created inside the grid folder with the following structure: + +- `extracted_data/` This directory contains three CSV files: + - `{grid_name}_final_extracted_data_all.csv` which includes every run in the grid + - `{grid_name}_final_extracted_data_completed.csv` which contains only successful runs (used for ECDF plots) + - `{grid_name}_final_extracted_data_running_error.csv` for only failed simulations with status `Running` or `Error`. +- `grid_plots/` This directory contains a status dummary plot and a ECDF plot ## Retrieval scheme (Bayesian optimisation) diff --git a/input/ensembles/example.grid.toml b/input/ensembles/example.grid.toml index 157b82a58..f6c738004 100644 --- a/input/ensembles/example.grid.toml +++ b/input/ensembles/example.grid.toml @@ -6,6 +6,16 @@ output = "grid_demo/" # Make `output` a symbolic link to this absolute location. To disable: set to empty string. symlink = "" +# Post-processing options +update_csv = true # Whether to update the summary CSV files before plotting +plot_format = "png" # Format for saving plots ("png" or "pdf") +plot_status = true # Generate status summary plot of the grid +plot_ecdf = true # Generate ECDF grid plot for input parameters tested in the grid and output_variables defined below +colormap = "viridis" # Colormap for ECDF plot (e.g. "viridis", "managua") +output_variables = ["solidification_time", "Phi_global", + "T_surf", "P_surf", "atm_kg_per_mol", + "esc_rate_total"] # List of output variables to include in ECDF plot (name must match variable name in runtime_helpfile.csv) + # Path to base (reference) config file relative to PROTEUS root folder ref_config = "input/demos/dummy.toml" diff --git a/input/ensembles/example.grid_analyse.toml b/input/ensembles/example.grid_analyse.toml deleted file mode 100644 index 3e36edcf8..000000000 --- a/input/ensembles/example.grid_analyse.toml +++ /dev/null @@ -1,85 +0,0 @@ -# Config file for grid post-processing analysis and plotting - -# Path to grid folder -grid_path = "/projects/p315557/Paper_1/DATA/Grids/escape_grid_1Msun/" - -# Post-processing options -update_csv = true # Whether to update the summary CSV file before plotting -plot_status = true # Generate status summary plots of the grid -plot_ecdf = true # Generate ECDF grid plot for input parameters and output - -# Input parameters configuration for ECDF plot (1 row per input parameter) -[input_parameters] - - colormap = "viridis" # Default colormap for all input parameters - - [input_parameters.atmos_clim.module] - label = "Atmosphere module" # label of the input parameter - log_scale = false # whether to use log scale or not for colorbar depending on parameters range - - [input_parameters.orbit.semimajoraxis] - label = "a [AU]" - log_scale = false - - [input_parameters.escape.zephyrus.efficiency] - label = "\\rm \\epsilon" # TOML file cannot read Latex command, so use double backslash - log_scale = false - - [input_parameters.escape.zephyrus.Pxuv] - label = "P_{\\rm XUV}\\,[\\mathrm{bar}]" - log_scale = true - - [input_parameters.outgas.fO2_shift_IW] - label = "\\Delta IW" - log_scale = false - - [input_parameters.delivery.elements.CH_ratio] - label = "C/H ratio" - log_scale = false - - [input_parameters.delivery.elements.H_oceans] - label = "[H] [oceans]" - log_scale = false - -# Output variables configuration for ECDF plot (1 column per output variable) -[output_variables] - - [output_variables.solidification_time] - label = "Solidification [yr]" # label of the output variable on x-axis - log_scale = true # whether to use log scale or not on x-axis - scale = 1.0 # scaling factor to apply to the output variable values - - [output_variables.Phi_global] - label = "Melt fraction [%]" - log_scale = false - scale = 100.0 # convert melt fraction to percentage - - [output_variables.T_surf] - label = "T_{\\rm surf}\\,[10^{3}\\,\\mathrm{K}]" - log_scale = false - scale = 0.001 # convert K to 10^3 K for better readability on plot - - [output_variables.P_surf] - label = "P_{\\rm surf}\\,[\\mathrm{bar}]" - log_scale = true - scale = 1.0 - - [output_variables.atm_kg_per_mol] - label = "MMW [g/mol]" - log_scale = false - scale = 1000.0 # convert kg/mol to g/mol - - [output_variables.esc_rate_total] - label = "Escape rate [kg/s]" - log_scale = true - scale = 1.0 - - [output_variables.H2O_kg_atm] - label = "\\ H_{2}O_\\mathrm{atm} [kg]" - log_scale = true - scale = 1.0 - - [output_variables.C_kg_atm] - label = "C_{\\rm atm} [kg]" - log_scale = true - scale = 1.0 diff --git a/input/ensembles/example.grid_test_ppgrid.toml b/input/ensembles/example.grid_test_ppgrid.toml deleted file mode 100644 index c3e981160..000000000 --- a/input/ensembles/example.grid_test_ppgrid.toml +++ /dev/null @@ -1,44 +0,0 @@ -# Config file for running a grid of forward models - -# Path to output folder where grid will be saved (relative to PROTEUS output folder) -output = "scratch/grid_demo/" - -# Make `output` a symbolic link to this absolute location. To disable: set to empty string. -symlink = "" - -# Post-processing options -update_csv = false # Whether to update the summary CSV files before plotting -plot_format = "png" # Format for saving plots ("png" or "pdf") -plot_status = false # Generate status summary plot of the grid -plot_ecdf = true # Generate ECDF grid plot for input parameters tested in the grid and output_variables defined below -colormap = "viridis" # Colormap for ECDF plot (e.g. "viridis", "plasma", "inferno", "magma", etc.) -output_variables = ["solidification_time", "Phi_global", "T_surf", "P_surf", "atm_kg_per_mol", "esc_rate_total", "rho_obs", "H2_bar"] # List of output variables to include in ECDF plot - -# Path to base (reference) config file relative to PROTEUS root folder -ref_config = "input/demos/dummy.toml" - -# Use SLURM? -use_slurm = true - -# Execution limits -max_jobs = 10 # maximum number of concurrent tasks (e.g. 500 on Habrok) -max_days = 1 # maximum number of days to run (e.g. 1) -max_mem = 3 # maximum memory per CPU in GB (e.g. 3) - -# Now define grid axes... -# Each axis must be a new section (table) in this file. -# Each table corresponds to the name of the parameter to be varied. -# Each table name must be written in double quotes. -# See examples below - -# Planet mass set directly -["struct.mass_tot"] - method = "direct" - values = [1.0, 2.0] - -# Hydrogen inventory set by arange -["delivery.elements.H_ppmw"] - method = "arange" - start = 1 - stop = 100 - step = 50 diff --git a/tests/grid/dummy.grid.toml b/tests/grid/dummy.grid.toml index 171511211..52c24b06a 100644 --- a/tests/grid/dummy.grid.toml +++ b/tests/grid/dummy.grid.toml @@ -6,6 +6,14 @@ output = "dummy_grid" # Make `output` a symbolic link to this absolute location. To disable: set to empty string. symlink = "" +# Post-processing options +update_csv = true # Whether to update the summary CSV files before plotting +plot_format = "png" # Format for saving plots ("png" or "pdf") +plot_status = true # Generate status summary plot of the grid +plot_ecdf = true # Generate ECDF grid plot for input parameters tested in the grid and output_variables defined below +colormap = "viridis" # Colormap for ECDF plot +output_variables = ["solidification_time", "Phi_global", "T_surf", "P_surf", "atm_kg_per_mol", "esc_rate_total", "rho_obs", "H2_bar"] # List of output variables to include in ECDF plot + # Path to base (reference) config file relative to PROTEUS root folder ref_config = "tests/grid/base.toml" diff --git a/tests/grid/test_grid.py b/tests/grid/test_grid.py index 18ac797be..aaa987c3f 100644 --- a/tests/grid/test_grid.py +++ b/tests/grid/test_grid.py @@ -9,9 +9,11 @@ from proteus.grid.manage import grid_from_config from proteus.grid.pack import pack as gpack +from proteus.grid.post_processing import main as gpostprocess from proteus.grid.summarise import summarise as gsummarise OUT_DIR = PROTEUS_ROOT / 'output' / 'dummy_grid' +GRID_NAME = 'dummy_grid' GRID_CONFIG = PROTEUS_ROOT / 'tests' / 'grid' / 'dummy.grid.toml' BASE_CONFIG = PROTEUS_ROOT / 'tests' / 'grid' / 'base.toml' @@ -74,3 +76,17 @@ def test_grid_pack(grid_run): # check zip exists assert os.path.isfile(OUT_DIR / 'pack.zip') + +@pytest.mark.integration +def test_grid_post_process(grid_run): + # Test running grid-post-process command + gpostprocess(GRID_CONFIG) + + # check post-processed summary CSV file exists + assert os.path.isfile(OUT_DIR / 'post_processing' / 'extracted_data' / f'{GRID_NAME}_final_extracted_data_all.csv') + + # check that status summary plot was generated + assert os.path.isfile(OUT_DIR / 'post_processing' / 'grid_plots' / f'summary_grid_statuses_{GRID_NAME}.png') + + # check that ECDF plot was generated + assert os.path.isfile(OUT_DIR / 'post_processing' / 'grid_plots' / f'ecdf_grid_plot_{GRID_NAME}.png') From e9f2f326fcf304352c57682d795e38dfbf6cba90 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 8 Apr 2026 19:51:15 +0200 Subject: [PATCH 092/105] ruff changes to pass tests --- src/proteus/cli.py | 3 +- src/proteus/grid/post_processing.py | 385 ++++++++++++++++------------ src/proteus/observe/platon.py | 2 - src/proteus/utils/plot.py | 46 +--- tests/grid/test_grid.py | 16 +- 5 files changed, 238 insertions(+), 214 deletions(-) diff --git a/src/proteus/cli.py b/src/proteus/cli.py index 05049ecac..aa0c5559c 100644 --- a/src/proteus/cli.py +++ b/src/proteus/cli.py @@ -558,9 +558,9 @@ def observe(config_path: Path): # 'grid_analyse' postprocessing commands # ---------------- + @click.command() @config_option - def grid_analyse(config_path: Path): """Generate grid analysis plots and CSV summary files from a grid config_path : Path to the toml file containing grid analysis configuration @@ -569,6 +569,7 @@ def grid_analyse(config_path: Path): main(config_path) + cli.add_command(grid_analyse) # ---------------- diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index 55cce3a8d..34a63ef71 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -17,6 +17,7 @@ # Data loading, extraction, and CSV generation functions # --------------------------------------------------------- + def get_grid_name(grid_path: str | Path) -> str: """ Returns the grid name (last part of the path) from the given grid path. @@ -33,9 +34,10 @@ def get_grid_name(grid_path: str | Path) -> str: """ grid_path = Path(grid_path) if not grid_path.is_dir(): - raise ValueError(f"{grid_path} is not a valid directory") + raise ValueError(f'{grid_path} is not a valid directory') return grid_path.name + def load_grid_cases(grid_dir: Path): """ Load information for each simulation of a PROTEUS grid. @@ -74,7 +76,7 @@ def load_grid_cases(grid_dir: Path): try: init_params = toml.load(init_file) except Exception as e: - print(f"Error reading init file in {case.name}: {e}") + print(f'Error reading init file in {case.name}: {e}') # Read runtime_helpfile.csv df = None @@ -82,13 +84,17 @@ def load_grid_cases(grid_dir: Path): try: df = pd.read_csv(runtime_file, sep='\t') except Exception as e: - print(f"WARNING : Error reading runtime_helpfile.csv for {case.name}: {e}") + print(f'WARNING : Error reading runtime_helpfile.csv for {case.name}: {e}') # Read status file status = 'Unknown' if status_file.exists(): try: - raw_lines = [ln.strip() for ln in status_file.read_text(encoding='utf-8').splitlines() if ln.strip()] + raw_lines = [ + ln.strip() + for ln in status_file.read_text(encoding='utf-8').splitlines() + if ln.strip() + ] if len(raw_lines) >= 2: status = raw_lines[1] elif raw_lines: @@ -96,30 +102,29 @@ def load_grid_cases(grid_dir: Path): else: status = 'Empty' except Exception as e: - print(f"WARNING : Error reading status file in {case.name}: {e}") + print(f'WARNING : Error reading status file in {case.name}: {e}') else: - print(f"WARNING : Missing status file in {case.name}") + print(f'WARNING : Missing status file in {case.name}') # Combine all info about simulations into a list of dictionaries - combined_data.append({ - 'init_parameters': init_params, - 'output_values' : df, - 'status' : status - }) + combined_data.append( + {'init_parameters': init_params, 'output_values': df, 'status': status} + ) # Print summary of statuses statuses = [c['status'] for c in combined_data] status_counts = pd.Series(statuses).value_counts().sort_values(ascending=False) print('-----------------------------------------------------------') - print(f"Total number of simulations: {len(statuses)}") + print(f'Total number of simulations: {len(statuses)}') print('-----------------------------------------------------------') - print("Number of simulations per status:") + print('Number of simulations per status:') for st, count in status_counts.items(): - print(f" - {st:<45} : {count}") + print(f' - {st:<45} : {count}') print('-----------------------------------------------------------') return combined_data + def get_tested_grid_parameters(cases_data: list, grid_dir: str | Path): """ Extract tested grid parameters per case using: @@ -144,31 +149,29 @@ def get_tested_grid_parameters(cases_data: list, grid_dir: str | Path): grid_dir = Path(grid_dir) # 1. Load tested input parameters in the grid - raw_params = toml.load(grid_dir / "copy.grid.toml") + raw_params = toml.load(grid_dir / 'copy.grid.toml') # Keep only the parameters and their values tested_params = {} for key, value in raw_params.items(): - if isinstance(value, dict) and "method" in value: # filter to only get tested parameters (those with a "method" key) - method = value["method"] + if ( + isinstance(value, dict) and 'method' in value + ): # filter to only get tested parameters (those with a "method" key) + method = value['method'] - if method == "direct": - tested_params[key] = value["values"] + if method == 'direct': + tested_params[key] = value['values'] - elif method == "linspace": - tested_params[key] = np.linspace( - value['start'], value['stop'], value['count'] - ) + elif method == 'linspace': + tested_params[key] = np.linspace(value['start'], value['stop'], value['count']) - elif method == "logspace": + elif method == 'logspace': tested_params[key] = np.logspace( - np.log10(value['start']), - np.log10(value['stop']), - value['count'] + np.log10(value['start']), np.log10(value['stop']), value['count'] ) - elif method == "arange": + elif method == 'arange': arr = list(np.arange(value['start'], value['stop'], value['step'])) # Ensure endpoint is included if not np.isclose(arr[-1], value['stop']): @@ -176,7 +179,7 @@ def get_tested_grid_parameters(cases_data: list, grid_dir: str | Path): tested_params[key] = np.array(arr, dtype=float) else: - print(f"⚠️ Unknown method for {key}: {method}") + print(f'⚠️ Unknown method for {key}: {method}') continue grid_param_paths = list(tested_params.keys()) @@ -187,10 +190,10 @@ def get_tested_grid_parameters(cases_data: list, grid_dir: str | Path): if cases_data: for idx, case in enumerate(cases_data): params_for_case = {} - init_params = case["init_parameters"] + init_params = case['init_parameters'] for path in grid_param_paths: - keys = path.split(".") + keys = path.split('.') val = init_params try: @@ -204,6 +207,7 @@ def get_tested_grid_parameters(cases_data: list, grid_dir: str | Path): return case_params, tested_params + def load_phi_crit(grid_dir: str | Path): """ Load the critical melt fraction (phi_crit) from the reference configuration file of the grid. @@ -220,21 +224,22 @@ def load_phi_crit(grid_dir: str | Path): """ grid_dir = Path(grid_dir) - ref_file = grid_dir / "ref_config.toml" + ref_file = grid_dir / 'ref_config.toml' if not ref_file.exists(): - raise FileNotFoundError(f"ref_config.toml not found in {grid_dir}") + raise FileNotFoundError(f'ref_config.toml not found in {grid_dir}') - with ref_file.open("r", encoding="utf-8") as f: + with ref_file.open('r', encoding='utf-8') as f: ref = toml.load(f) try: - phi_crit = ref["params"]["stop"]["solid"]["phi_crit"] + phi_crit = ref['params']['stop']['solid']['phi_crit'] except KeyError: - raise KeyError("phi_crit not found in ref_config.toml") + raise KeyError('phi_crit not found in ref_config.toml') return phi_crit + def extract_solidification_time(cases_data: list, grid_dir: str | Path): """ Extract solidification time for each simulation of the grid for @@ -275,17 +280,18 @@ def extract_solidification_time(cases_data: list, grid_dir: str | Path): idx = condition.idxmax() solidification_times.append(df.loc[idx, 'Time']) else: - solidification_times.append(np.nan) # if planet is not solidified, append NaN + solidification_times.append(np.nan) # if planet is not solidified, append NaN else: if not columns_printed: - print("Warning: Missing Phi_global or Time column.") - print("Columns available:", df.columns.tolist()) + print('Warning: Missing Phi_global or Time column.') + print('Columns available:', df.columns.tolist()) columns_printed = True solidification_times.append(np.nan) return solidification_times + def validate_output_variables(df: pd.DataFrame, requested_outputs: list): """ Check that requested output variables exist in the DataFrame. @@ -311,17 +317,20 @@ def validate_output_variables(df: pd.DataFrame, requested_outputs: list): if var not in available: # Find index of 'Time' column try: - idx = available.index("Time") + idx = available.index('Time') except ValueError: idx = 0 # fallback if Time is not in columns print(f"WARNING: Output variable '{var}' not found in data.") - print(f"Available columns include: {available[idx:]}") # show all columns starting from 'Time' + print( + f'Available columns include: {available[idx:]}' + ) # show all columns starting from 'Time' else: valid_outputs.append(var) return valid_outputs + def generate_summary_csv( cases_data: list, case_params: dict, @@ -337,59 +346,61 @@ def generate_summary_csv( def include_case(status: str, mode: str) -> bool: status = status.lower() - if mode == "all": + if mode == 'all': return True - elif mode == "completed": - return status.startswith("completed") - elif mode == "running_error": - return status.startswith("running") or status.startswith("error") + elif mode == 'completed': + return status.startswith('completed') + elif mode == 'running_error': + return status.startswith('running') or status.startswith('error') else: - raise ValueError(f"Unknown mode: {mode}") + raise ValueError(f'Unknown mode: {mode}') # Compute solidification times once solidification_times = extract_solidification_time(cases_data, grid_dir) - output_dir = grid_dir / "post_processing" / "extracted_data" + output_dir = grid_dir / 'post_processing' / 'extracted_data' output_dir.mkdir(parents=True, exist_ok=True) - modes = ["all", "completed", "running_error"] + modes = ['all', 'completed', 'running_error'] for mode in modes: summary_rows = [] for case_index, case in enumerate(cases_data): - status = case.get("status", "") + status = case.get('status', '') if not include_case(status, mode): continue row = { - "case_number": case_index, - "status": status, + 'case_number': case_index, + 'status': status, } # Parameters row.update(case_params.get(case_index, {})) # Output values - df = case.get("output_values") + df = case.get('output_values') if df is not None and not df.empty: row.update(df.iloc[-1].to_dict()) # Solidification time - row["solidification_time"] = solidification_times[case_index] + row['solidification_time'] = solidification_times[case_index] summary_rows.append(row) summary_df = pd.DataFrame(summary_rows) - output_file = output_dir / f"{grid_name}_final_extracted_data_{mode}.csv" - summary_df.to_csv(output_file, sep="\t", index=False) + output_file = output_dir / f'{grid_name}_final_extracted_data_{mode}.csv' + summary_df.to_csv(output_file, sep='\t', index=False) + # --------------------------------------------------------- # Plotting functions # --------------------------------------------------------- + def get_label(quant): """ Get label for a given quantity, using preset labels if available. @@ -411,6 +422,7 @@ def get_label(quant): # Take only the last part after the last dot return quant.split('.')[-1] + def get_scale(quant): """ Get scale factor for a given quantity, using preset scales if available. @@ -430,6 +442,7 @@ def get_scale(quant): else: return 1.0 + def get_log_scale(quant): """ Get log scale flag for a given quantity, using preset log scales if available. @@ -449,6 +462,7 @@ def get_log_scale(quant): else: return False + def plot_grid_status(df: pd.DataFrame, cfg: dict, grid_dir: str | Path, grid_name: str): """ Plot histogram summary of number of simulation statuses in @@ -469,37 +483,34 @@ def plot_grid_status(df: pd.DataFrame, cfg: dict, grid_dir: str | Path, grid_nam Name of the grid. """ # Extract plot_format from cfg - plot_format = cfg.get("plot_format") + plot_format = cfg.get('plot_format') - if "status" not in df.columns: + if 'status' not in df.columns: raise ValueError("CSV must contain a 'status' column") # Clean and count statuses - statuses = df["status"].astype(str) + statuses = df['status'].astype(str) status_counts = statuses.value_counts().sort_values(ascending=False) total_simulations = len(df) # Format status labels for better readability - formatted_status_keys = [s.replace(" (", " \n (") for s in status_counts.index] - palette = sns.color_palette("Accent", len(status_counts)) + formatted_status_keys = [s.replace(' (', ' \n (') for s in status_counts.index] + palette = sns.color_palette('Accent', len(status_counts)) palette = dict(zip(formatted_status_keys, palette)) # Prepare DataFrame for plotting - plot_df = pd.DataFrame({ - "Status": formatted_status_keys, - "Count": status_counts.values - }) + plot_df = pd.DataFrame({'Status': formatted_status_keys, 'Count': status_counts.values}) # Plot histogram plt.figure(figsize=(11, 7)) ax = sns.barplot( data=plot_df, - x="Status", - y="Count", - hue="Status", + x='Status', + y='Count', + hue='Status', palette=palette, dodge=False, - edgecolor="black" + edgecolor='black', ) # Remove legend @@ -513,39 +524,40 @@ def plot_grid_status(df: pd.DataFrame, cfg: dict, grid_dir: str | Path, grid_nam ax.text( i, count + offset, - f"{count} ({percentage:.1f}%)", - ha="center", - va="bottom", - fontsize=14 + f'{count} ({percentage:.1f}%)', + ha='center', + va='bottom', + fontsize=14, ) # Add total number of simulations text ax.text( 0.97, 0.94, - f"Total number of simulations : {total_simulations}", + f'Total number of simulations : {total_simulations}', transform=ax.transAxes, - ha="right", - va="top", - fontsize=16 + ha='right', + va='top', + fontsize=16, ) # Formatting - ax.grid(alpha=0.2, axis="y") - ax.set_title(f"Simulation status summary for grid {grid_name}", fontsize=16) - ax.set_xlabel("Simulation status", fontsize=16) - ax.set_ylabel("Number of simulations", fontsize=16) - ax.tick_params(axis="x", labelsize=14) - ax.tick_params(axis="y", labelsize=14) + ax.grid(alpha=0.2, axis='y') + ax.set_title(f'Simulation status summary for grid {grid_name}', fontsize=16) + ax.set_xlabel('Simulation status', fontsize=16) + ax.set_ylabel('Number of simulations', fontsize=16) + ax.tick_params(axis='x', labelsize=14) + ax.tick_params(axis='y', labelsize=14) # Save - output_dir = Path(grid_dir) / "post_processing" / "grid_plots" + output_dir = Path(grid_dir) / 'post_processing' / 'grid_plots' output_dir.mkdir(parents=True, exist_ok=True) - output_file = output_dir / f"summary_grid_statuses_{grid_name}.{plot_format}" + output_file = output_dir / f'summary_grid_statuses_{grid_name}.{plot_format}' plt.savefig(output_file, dpi=300, bbox_inches='tight') plt.close() -def flatten_input_parameters(d: dict, parent_key: str = "") -> dict: + +def flatten_input_parameters(d: dict, parent_key: str = '') -> dict: """ Flattens a nested input-parameter dictionary from a TOML configuration into a flat mapping of dot-separated parameter paths to their plotting @@ -568,12 +580,12 @@ def flatten_input_parameters(d: dict, parent_key: str = "") -> dict: flat = {} for k, v in d.items(): - if k == "colormap": + if k == 'colormap': continue - new_key = f"{parent_key}.{k}" if parent_key else k + new_key = f'{parent_key}.{k}' if parent_key else k - if isinstance(v, dict) and "label" in v: + if isinstance(v, dict) and 'label' in v: # Leaf parameter block flat[new_key] = v elif isinstance(v, dict): @@ -582,6 +594,7 @@ def flatten_input_parameters(d: dict, parent_key: str = "") -> dict: return flat + def load_ecdf_plot_settings(cfg, tested_params=None): """ Load ECDF plotting settings for both input parameters and output variables @@ -623,42 +636,44 @@ def load_ecdf_plot_settings(cfg, tested_params=None): """ if tested_params is None or len(tested_params) == 0: - raise ValueError("No tested parameters found for ECDF plotting") + raise ValueError('No tested parameters found for ECDF plotting') # Optional colormap from config - cmap_name = cfg.get("input_parameters", {}).get("colormap", "viridis") + cmap_name = cfg.get('input_parameters', {}).get('colormap', 'viridis') default_cmap = getattr(cm, cmap_name, cm.viridis) # Build parameter settings from config param_settings = { key: { - "label": get_label(key), - "colormap": default_cmap, - "log_scale": get_log_scale(key) + 'label': get_label(key), + 'colormap': default_cmap, + 'log_scale': get_log_scale(key), } for key in tested_params } # Build output settings from config output_settings = {} - output_list = cfg.get("output_variables", []) + output_list = cfg.get('output_variables', []) for key in output_list: output_settings[key] = { - "label": get_label(key), - "log_scale": get_log_scale(key), - "scale": get_scale(key), + 'label': get_label(key), + 'log_scale': get_log_scale(key), + 'scale': get_scale(key), } # Extract plot format - plot_format = cfg.get("plot_format") + plot_format = cfg.get('plot_format') return param_settings, output_settings, plot_format + def clean_series(s): - '''Cleans a pandas Series by replacing inf values with NaN and dropping NaN values.''' + """Cleans a pandas Series by replacing inf values with NaN and dropping NaN values.""" return s.replace([np.inf, -np.inf], np.nan).dropna().loc[lambda x: x > 0] + def group_output_by_parameter(df, grid_parameters, outputs): """ Groups output values (like P_surf) by one or more grid parameters. @@ -683,7 +698,7 @@ def group_output_by_parameter(df, grid_parameters, outputs): for param in grid_parameters: for output in outputs: - key_name = f"{output}_per_{param}" + key_name = f'{output}_per_{param}' value_dict = {} for param_value in df[param].dropna().unique(): subset = df[df[param] == param_value] @@ -695,13 +710,23 @@ def group_output_by_parameter(df, grid_parameters, outputs): return grouped + def latex(label: str) -> str: """ Wraps a label in dollar signs for LaTeX formatting if it contains a backslash. """ - return f"${label}$" if "\\" in label else label + return f'${label}$' if '\\' in label else label -def ecdf_grid_plot(tested_params: dict, grouped_data: dict, param_settings: dict, output_settings: dict, plot_format: str, grid_dir: str | Path, grid_name: str): + +def ecdf_grid_plot( + tested_params: dict, + grouped_data: dict, + param_settings: dict, + output_settings: dict, + plot_format: str, + grid_dir: str | Path, + grid_name: str, +): """ Creates ECDF grid plots where each row corresponds to one input parameter and each column corresponds to one output. Saves the resulting figure as a {plot_format}. @@ -742,18 +767,24 @@ def ecdf_grid_plot(tested_params: dict, grouped_data: dict, param_settings: dict # List of parameter names (rows) and output names (columns) param_names = list(param_settings.keys()) - out_names = list(output_settings.keys()) + out_names = list(output_settings.keys()) # Create subplot grid: rows = input parameters, columns = outputs variables n_rows = len(param_names) n_cols = len(out_names) - fig, axes = plt.subplots(n_rows, n_cols, figsize=(4 * n_cols, 2.75 * n_rows), squeeze=False, gridspec_kw={'wspace': 0.1, 'hspace': 0.2}) + fig, axes = plt.subplots( + n_rows, + n_cols, + figsize=(4 * n_cols, 2.75 * n_rows), + squeeze=False, + gridspec_kw={'wspace': 0.1, 'hspace': 0.2}, + ) # Loop through parameters (rows) and outputs (columns) for i, param_name in enumerate(param_names): tested_param = grid_params.get(param_name, []) if tested_param is None or len(tested_param) == 0: - print(f"Skipping {param_name} — no tested values found in grid_params") + print(f'Skipping {param_name} — no tested values found in grid_params') continue settings = param_settings[param_name] @@ -763,19 +794,23 @@ def ecdf_grid_plot(tested_params: dict, grouped_data: dict, param_settings: dict vmin, vmax = min(tested_param), max(tested_param) if vmin == vmax: vmin, vmax = vmin - 1e-9, vmax + 1e-9 - if settings.get("log_scale", False): + if settings.get('log_scale', False): norm = mpl.colors.LogNorm(vmin=vmin, vmax=vmax) else: norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax) + def color_func(v): - return settings["colormap"](norm(v)) + return settings['colormap'](norm(v)) + colorbar_needed = True else: unique_vals = sorted(set(tested_param)) - cmap = mpl.colormaps.get_cmap(settings["colormap"]).resampled(len(unique_vals)) + cmap = mpl.colormaps.get_cmap(settings['colormap']).resampled(len(unique_vals)) color_map = {val: cmap(j) for j, val in enumerate(unique_vals)} + def color_func(v): return color_map[v] + colorbar_needed = False for j, output_name in enumerate(out_names): @@ -785,7 +820,8 @@ def color_func(v): # Add panel number in upper-left corner panel_number = i * n_cols + j + 1 ax.text( - 0.03, 0.95, + 0.03, + 0.95, str(panel_number), transform=ax.transAxes, fontsize=18, @@ -793,34 +829,36 @@ def color_func(v): va='top', ha='left', color='black', - bbox=dict(facecolor='white', edgecolor='silver', boxstyle='round,pad=0.2', alpha=0.8) + bbox=dict( + facecolor='white', edgecolor='silver', boxstyle='round,pad=0.2', alpha=0.8 + ), ) # Plot one ECDF per tested parameter value for val in tested_param: - data_key = f"{output_name}_per_{param_name}" + data_key = f'{output_name}_per_{param_name}' # if val not in grouped_data.get(data_key, {}): # continue # raw = np.array(grouped_data[data_key][val]) * out_settings["scale"] data_dict = grouped_data.get(data_key, {}) if val not in data_dict: continue - raw = np.array(data_dict[val]) #* out_settings["scale"] + raw = np.array(data_dict[val]) # * out_settings["scale"] # Plot ECDF sns.ecdfplot( data=raw, - #log_scale=out_settings["log_scale"], - stat="percent", + # log_scale=out_settings["log_scale"], + stat='percent', color=color_func(val), linewidth=4, linestyle='-', - ax=ax - ) + ax=ax, + ) # Configure x-axis labels, ticks, grids if i == n_rows - 1: - ax.set_xlabel(latex(out_settings["label"]), fontsize=22) + ax.set_xlabel(latex(out_settings['label']), fontsize=22) ax.xaxis.set_label_coords(0.5, -0.3) ax.tick_params(axis='x', labelsize=22) else: @@ -828,70 +866,86 @@ def color_func(v): # Configure y-axis (shared label added later) if j == 0: - ax.set_ylabel("") + ax.set_ylabel('') ticks = [0.0, 50, 100] ax.set_yticks(ticks) ax.tick_params(axis='y', labelsize=22) else: - ax.set_ylabel("") + ax.set_ylabel('') ax.set_yticks(ticks) ax.tick_params(axis='y', labelleft=False) - ax.tick_params(axis='x', which='minor', direction='in', top=True, bottom=True, length=2) - ax.tick_params(axis='x', which='major', direction='inout', top=True, bottom=True, length=6) + ax.tick_params( + axis='x', which='minor', direction='in', top=True, bottom=True, length=2 + ) + ax.tick_params( + axis='x', which='major', direction='inout', top=True, bottom=True, length=6 + ) ax.grid(alpha=0.4) # Configure log scale for x-axis if needed - if out_settings["log_scale"]: - ax.set_xscale("log") + if out_settings['log_scale']: + ax.set_xscale('log') # After plotting all outputs for this parameter (row), add colorbar or legend - if colorbar_needed: # colorbar for numeric parameters - sm = mpl.cm.ScalarMappable(cmap=settings["colormap"], norm=norm) - rightmost_ax = axes[i, -1] # Get the rightmost axis in the current row - cbar = fig.colorbar(sm,ax=rightmost_ax,pad=0.03,aspect=10) - cbar.set_label(latex(settings["label"]), fontsize=24) + if colorbar_needed: # colorbar for numeric parameters + sm = mpl.cm.ScalarMappable(cmap=settings['colormap'], norm=norm) + rightmost_ax = axes[i, -1] # Get the rightmost axis in the current row + cbar = fig.colorbar(sm, ax=rightmost_ax, pad=0.03, aspect=10) + cbar.set_label(latex(settings['label']), fontsize=24) cbar.ax.yaxis.set_label_coords(6, 0.5) ticks = sorted(set(tested_param)) cbar.set_ticks(ticks) cbar.ax.tick_params(labelsize=22) - else: # legend for string parameters - handles = [mpl.lines.Line2D([0], [0], color=color_map[val], lw=4, label=str(val)) for val in unique_vals] - ax.legend(handles=handles, fontsize=24,bbox_to_anchor=(1.01, 1), loc='upper left') + else: # legend for string parameters + handles = [ + mpl.lines.Line2D([0], [0], color=color_map[val], lw=4, label=str(val)) + for val in unique_vals + ] + ax.legend(handles=handles, fontsize=24, bbox_to_anchor=(1.01, 1), loc='upper left') # Add a single, shared y-axis label - fig.text(0.07, 0.5, 'Empirical cumulative distribution of grid simulations [%]', va='center', rotation='vertical', fontsize=40) + fig.text( + 0.07, + 0.5, + 'Empirical cumulative distribution of grid simulations [%]', + va='center', + rotation='vertical', + fontsize=40, + ) # Save figure - output_dir = grid_dir / "post_processing" / "grid_plots" + output_dir = grid_dir / 'post_processing' / 'grid_plots' output_dir.mkdir(parents=True, exist_ok=True) - output_file = output_dir / f"ecdf_grid_plot_{grid_name}.{plot_format}" + output_file = output_dir / f'ecdf_grid_plot_{grid_name}.{plot_format}' fig.savefig(output_file, dpi=300, bbox_inches='tight') plt.close(fig) + # --------------------------------------------------------- # main # --------------------------------------------------------- -def main(grid_analyse_toml_file: str | Path): +def main(grid_analyse_toml_file: str | Path): # Load configuration from grid_analyse.toml - with open(grid_analyse_toml_file, "rb") as f: + with open(grid_analyse_toml_file, 'rb') as f: cfg = tomllib.load(f) # Get grid path and name - grid_path = Path('output/' + cfg["output"] + '/') - print(f"Grid path: {grid_path}") + grid_path = Path('output/' + cfg['output'] + '/') + print(f'Grid path: {grid_path}') grid_name = get_grid_name(grid_path) - # --- Summary CSVs --- - update_csv = cfg.get("update_csv", True) + update_csv = cfg.get('update_csv', True) - summary_dir = grid_path / "post_processing" / "extracted_data" - summary_csv_all = summary_dir / f"{grid_name}_final_extracted_data_all.csv" - summary_csv_completed = summary_dir / f"{grid_name}_final_extracted_data_completed.csv" - summary_csv_running_error = summary_dir / f"{grid_name}_final_extracted_data_running_error.csv" + summary_dir = grid_path / 'post_processing' / 'extracted_data' + summary_csv_all = summary_dir / f'{grid_name}_final_extracted_data_all.csv' + summary_csv_completed = summary_dir / f'{grid_name}_final_extracted_data_completed.csv' + summary_csv_running_error = ( + summary_dir / f'{grid_name}_final_extracted_data_running_error.csv' + ) if update_csv: # Load grid data @@ -901,42 +955,41 @@ def main(grid_analyse_toml_file: str | Path): ) # Write CSV - generate_summary_csv( - data, input_param_grid_per_case, grid_path, grid_name - ) + generate_summary_csv(data, input_param_grid_per_case, grid_path, grid_name) else: # Check that CSVs exist for f in [summary_csv_all, summary_csv_completed, summary_csv_running_error]: if not f.exists(): raise FileNotFoundError( - f"{f.name} not found in {summary_dir}, " - "but update_csv is set to False. Please set update_csv to True to generate it." + f'{f.name} not found in {summary_dir}, ' + 'but update_csv is set to False. Please set update_csv to True to generate it.' ) # Only load tested parameters from grid config _, tested_params_grid = get_tested_grid_parameters([], grid_path) # --- Plot grid status --- - if cfg.get("plot_status", True): - all_simulations_data_csv = pd.read_csv(summary_csv_all, sep="\t") + if cfg.get('plot_status', True): + all_simulations_data_csv = pd.read_csv(summary_csv_all, sep='\t') plot_grid_status(all_simulations_data_csv, cfg, grid_path, grid_name) - print("Plot grid status summary is available.") + print('Plot grid status summary is available.') # --- ECDF plots --- - if cfg.get("plot_ecdf", True): - completed_simulations_data_csv = pd.read_csv(summary_csv_completed, sep="\t") + if cfg.get('plot_ecdf', True): + completed_simulations_data_csv = pd.read_csv(summary_csv_completed, sep='\t') columns_output = validate_output_variables( - completed_simulations_data_csv, - cfg["output_variables"] + completed_simulations_data_csv, cfg['output_variables'] ) if len(columns_output) == 0: - raise ValueError("No valid output variables found. Check your config file.") + raise ValueError('No valid output variables found. Check your config file.') grouped_data = group_output_by_parameter( - completed_simulations_data_csv, - list(tested_params_grid.keys()), - columns_output, - ) + completed_simulations_data_csv, + list(tested_params_grid.keys()), + columns_output, + ) - param_settings_grid, output_settings_grid, plot_format = load_ecdf_plot_settings(cfg, tested_params_grid) + param_settings_grid, output_settings_grid, plot_format = load_ecdf_plot_settings( + cfg, tested_params_grid + ) ecdf_grid_plot( tested_params_grid, grouped_data, @@ -944,10 +997,10 @@ def main(grid_analyse_toml_file: str | Path): output_settings_grid, plot_format, grid_path, - grid_name + grid_name, ) - print("ECDF grid plot is available.") + print('ECDF grid plot is available.') -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/src/proteus/observe/platon.py b/src/proteus/observe/platon.py index 654f4afcc..bc526e860 100644 --- a/src/proteus/observe/platon.py +++ b/src/proteus/observe/platon.py @@ -224,7 +224,6 @@ def transit_depth(hf_row: dict, outdir: str, config: Config, source: str): Method for setting the mixing ratios: "outgas", "profile", or "offchem". """ from platon.transit_depth_calculator import TransitDepthCalculator - from proteus.observe.common import get_transit_fpath # All planet quantities in SI @@ -324,7 +323,6 @@ def eclipse_depth(hf_row: dict, outdir: str, config: Config, source: str): Method for setting the mixing ratios: "outgas", "profile", or "offchem". """ from platon.eclipse_depth_calculator import EclipseDepthCalculator - from proteus.observe.common import get_eclipse_fpath # All planet quantities in SI diff --git a/src/proteus/utils/plot.py b/src/proteus/utils/plot.py index cb265af2f..c005999c0 100644 --- a/src/proteus/utils/plot.py +++ b/src/proteus/utils/plot.py @@ -93,22 +93,17 @@ # Orbit module 'orbit.semimajoraxis': 'a [AU]', 'orbit.eccentricity': 'e', - # Structure module 'struct.mass_tot': 'M_{\\mathrm{tot}} [M_\\oplus]', 'struct.radius_int': 'R_{\\mathrm{int}} [R_\\oplus]', 'struct.corefrac': 'CRF', - # Atmosphere module 'atmos_clim.module': 'Atmospheric\ntreatment', - # Escape module 'escape.zephyrus.efficiency': '\\rm \\epsilon', 'escape.zephyrus.Pxuv': 'P_{\\rm XUV}\\,[bar]', - # Outgassing module 'outgas.fO2_shift_IW': '\\Delta\\,\\rm IW', - # Delivery module 'delivery.elements.H_oceans': 'H [Earth oceans]', 'delivery.elements.H_ppmw': 'H [ppmw]', @@ -122,39 +117,31 @@ 'delivery.elements.SH_ratio': 'S/H ratio', 'delivery.elements.S_ppmw': 'S [ppmw]', 'delivery.elements.S_kg': 'S [kg]', - ## Output variables (from runtime_helpfile.csv) # Model tracking 'Time': 'Time [yr]', - 'solidification_time': 'Solidification time [yr]', # computed in post-processing script, not in runtime_helpfile.csv - + 'solidification_time': 'Solidification time [yr]', # computed in post-processing script, not in runtime_helpfile.csv # Orbital parameters 'semimajorax': 'a [m]', 'eccentricity': 'e', - # Planet structure 'R_int': 'R_{\\mathrm{int}} [R_\\oplus]', 'M_int': 'M_{\\mathrm{int}} [M_\\oplus]', 'M_planet': 'M_{\\mathrm{planet}} [M_\\oplus]', - # Temperatures 'T_surf': 'T_{\\rm surf}\\,[\\mathrm{K}]', 'T_magma': 'T_{\\rm magma}\\,[\\mathrm{K}]', 'T_eqm': 'T_{\\rm eqm}\\,[\\mathrm{K}]', 'T_skin': 'T_{\\rm skin}\\,[\\mathrm{K}]', - # Planet interior properties 'Phi_global': 'Melt fraction [%]', - # Planet observational properties 'R_obs': 'R_{\\rm obs}\\,[R_\\oplus]', 'rho_obs': '\\rho_{\\rm obs}\\,[\\mathrm{g/m^3}]', - # Atmospheric composition from outgassing 'M_atm': 'Atmosphere mass [kg]', 'P_surf': 'P_{\\rm surf}\\,[\\mathrm{bar}]', 'atm_kg_per_mol': 'MMW [g/mol]', - # Atmospheric escape 'esc_rate_total': 'Escape rate [g/s]', } @@ -164,22 +151,17 @@ # Orbit module 'orbit.semimajoraxis': 1.0, 'orbit.eccentricity': 1.0, - # Structure module 'struct.mass_tot': 1.0, 'struct.radius_int': 1.0, 'struct.corefrac': 1.0, - # Atmosphere module 'atmos_clim.module': 1.0, - # Escape module 'escape.zephyrus.efficiency': 1.0, 'escape.zephyrus.Pxuv': 1.0, - # Outgassing module 'outgas.fO2_shift_IW': 1.0, - # Delivery module 'delivery.elements.H_oceans': 1.0, 'delivery.elements.H_ppmw': 1.0, @@ -193,39 +175,31 @@ 'delivery.elements.SH_ratio': 1.0, 'delivery.elements.S_ppmw': 1.0, 'delivery.elements.S_kg': 1.0, - ## Output variables (from runtime_helpfile.csv) # Model tracking 'Time': 1.0, - 'solidification_time': 1.0, # computed in post-processing script, not in runtime_helpfile.csv - + 'solidification_time': 1.0, # computed in post-processing script, not in runtime_helpfile.csv # Orbital parameters 'semimajorax': 1.0, 'eccentricity': 1.0, - # Planet structure 'R_int': 1.0 / R_earth, 'M_int': 1.0 / M_earth, 'M_planet': 1.0 / M_earth, - # Temperatures 'T_surf': 1.0, 'T_magma': 1.0, 'T_eqm': 1.0, 'T_skin': 1.0, - # Planet interior properties 'Phi_global': 100.0, - # Planet observational properties 'R_obs': 1.0 / R_earth, 'rho_obs': 0.001, - # Atmospheric composition from outgassing 'M_atm': 1.0, 'P_surf': 1.0, 'atm_kg_per_mol': 1000.0, - # Atmospheric escape 'esc_rate_total': 1000.0, } @@ -235,22 +209,17 @@ # Orbit module 'orbit.semimajoraxis': False, 'orbit.eccentricity': False, - # Structure module 'struct.mass_tot': False, 'struct.radius_int': False, 'struct.corefrac': False, - # Atmosphere module 'atmos_clim.module': False, - # Escape module 'escape.zephyrus.efficiency': True, 'escape.zephyrus.Pxuv': True, - # Outgassing module 'outgas.fO2_shift_IW': False, - # Delivery module 'delivery.elements.H_oceans': True, 'delivery.elements.H_ppmw': True, @@ -264,43 +233,36 @@ 'delivery.elements.SH_ratio': True, 'delivery.elements.S_ppmw': True, 'delivery.elements.S_kg': True, - ## Output variables (from runtime_helpfile.csv) # Model tracking 'Time': True, - 'solidification_time': True, # computed in post-processing script, not in runtime_helpfile.csv - + 'solidification_time': True, # computed in post-processing script, not in runtime_helpfile.csv # Orbital parameters 'semimajorax': False, 'eccentricity': False, - # Planet structure 'R_int': False, 'M_int': False, 'M_planet': False, - # Temperatures 'T_surf': False, 'T_magma': False, 'T_eqm': False, 'T_skin': False, - # Planet interior properties 'Phi_global': False, - # Planet observational properties 'R_obs': False, 'rho_obs': False, - # Atmospheric composition from outgassing 'M_atm': False, 'P_surf': True, 'atm_kg_per_mol': False, - # Atmospheric escape 'esc_rate_total': True, } + def _generate_colour(gas: str): """ Systematically generate a colour for a gas, from its composition. diff --git a/tests/grid/test_grid.py b/tests/grid/test_grid.py index aaa987c3f..014df7e95 100644 --- a/tests/grid/test_grid.py +++ b/tests/grid/test_grid.py @@ -77,16 +77,26 @@ def test_grid_pack(grid_run): # check zip exists assert os.path.isfile(OUT_DIR / 'pack.zip') + @pytest.mark.integration def test_grid_post_process(grid_run): # Test running grid-post-process command gpostprocess(GRID_CONFIG) # check post-processed summary CSV file exists - assert os.path.isfile(OUT_DIR / 'post_processing' / 'extracted_data' / f'{GRID_NAME}_final_extracted_data_all.csv') + assert os.path.isfile( + OUT_DIR + / 'post_processing' + / 'extracted_data' + / f'{GRID_NAME}_final_extracted_data_all.csv' + ) # check that status summary plot was generated - assert os.path.isfile(OUT_DIR / 'post_processing' / 'grid_plots' / f'summary_grid_statuses_{GRID_NAME}.png') + assert os.path.isfile( + OUT_DIR / 'post_processing' / 'grid_plots' / f'summary_grid_statuses_{GRID_NAME}.png' + ) # check that ECDF plot was generated - assert os.path.isfile(OUT_DIR / 'post_processing' / 'grid_plots' / f'ecdf_grid_plot_{GRID_NAME}.png') + assert os.path.isfile( + OUT_DIR / 'post_processing' / 'grid_plots' / f'ecdf_grid_plot_{GRID_NAME}.png' + ) From 04c654a0b0ce16b06e5fdaff2d1ccfb177eb6be7 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 8 Apr 2026 20:06:52 +0200 Subject: [PATCH 093/105] fix colormap problem --- input/ensembles/example.grid.toml | 4 ++-- src/proteus/grid/post_processing.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/input/ensembles/example.grid.toml b/input/ensembles/example.grid.toml index f6c738004..83eb815d1 100644 --- a/input/ensembles/example.grid.toml +++ b/input/ensembles/example.grid.toml @@ -1,7 +1,7 @@ # Config file for running a grid of forward models # Path to output folder where grid will be saved (relative to PROTEUS output folder) -output = "grid_demo/" +output = "scratch/grid_demo/" # Make `output` a symbolic link to this absolute location. To disable: set to empty string. symlink = "" @@ -11,7 +11,7 @@ update_csv = true # Whether to update the summary CSV files be plot_format = "png" # Format for saving plots ("png" or "pdf") plot_status = true # Generate status summary plot of the grid plot_ecdf = true # Generate ECDF grid plot for input parameters tested in the grid and output_variables defined below -colormap = "viridis" # Colormap for ECDF plot (e.g. "viridis", "managua") +colormap = "managua" # Colormap for ECDF plot (e.g. "viridis", "managua") output_variables = ["solidification_time", "Phi_global", "T_surf", "P_surf", "atm_kg_per_mol", "esc_rate_total"] # List of output variables to include in ECDF plot (name must match variable name in runtime_helpfile.csv) diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index 34a63ef71..b0e2acb40 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -639,7 +639,7 @@ def load_ecdf_plot_settings(cfg, tested_params=None): raise ValueError('No tested parameters found for ECDF plotting') # Optional colormap from config - cmap_name = cfg.get('input_parameters', {}).get('colormap', 'viridis') + cmap_name = cfg['colormap'] if 'colormap' in cfg else 'viridis' default_cmap = getattr(cm, cmap_name, cm.viridis) # Build parameter settings from config From 1c51c2ab42491ffa09fe029fb257fd464e6cedd1 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 8 Apr 2026 18:07:03 +0000 Subject: [PATCH 094/105] fix: read colormap from top-level cfg key and fix ruff lint in platon.py Agent-Logs-Url: https://github.com/FormingWorlds/PROTEUS/sessions/cbf3c143-d265-4db4-a889-74d0c610eaab Co-authored-by: EmmaPostolec <122358811+EmmaPostolec@users.noreply.github.com> --- src/proteus/grid/post_processing.py | 2 +- src/proteus/observe/platon.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index 34a63ef71..8f743c0d4 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -639,7 +639,7 @@ def load_ecdf_plot_settings(cfg, tested_params=None): raise ValueError('No tested parameters found for ECDF plotting') # Optional colormap from config - cmap_name = cfg.get('input_parameters', {}).get('colormap', 'viridis') + cmap_name = cfg.get('colormap', 'viridis') default_cmap = getattr(cm, cmap_name, cm.viridis) # Build parameter settings from config diff --git a/src/proteus/observe/platon.py b/src/proteus/observe/platon.py index bc526e860..654f4afcc 100644 --- a/src/proteus/observe/platon.py +++ b/src/proteus/observe/platon.py @@ -224,6 +224,7 @@ def transit_depth(hf_row: dict, outdir: str, config: Config, source: str): Method for setting the mixing ratios: "outgas", "profile", or "offchem". """ from platon.transit_depth_calculator import TransitDepthCalculator + from proteus.observe.common import get_transit_fpath # All planet quantities in SI @@ -323,6 +324,7 @@ def eclipse_depth(hf_row: dict, outdir: str, config: Config, source: str): Method for setting the mixing ratios: "outgas", "profile", or "offchem". """ from platon.eclipse_depth_calculator import EclipseDepthCalculator + from proteus.observe.common import get_eclipse_fpath # All planet quantities in SI From a398c5941ca9ec0991de07e1fb291297a62ca9f6 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 8 Apr 2026 20:11:09 +0200 Subject: [PATCH 095/105] fix removing 0 when it shouldnt with clean_series --- src/proteus/grid/post_processing.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index b0e2acb40..89c24e799 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -669,10 +669,12 @@ def load_ecdf_plot_settings(cfg, tested_params=None): return param_settings, output_settings, plot_format -def clean_series(s): +def clean_series(s, log_scale): """Cleans a pandas Series by replacing inf values with NaN and dropping NaN values.""" - return s.replace([np.inf, -np.inf], np.nan).dropna().loc[lambda x: x > 0] - + s_clean = s.replace([np.inf, -np.inf], np.nan).dropna() + if log_scale: + s_clean = s_clean.loc[lambda x: x > 0] + return s_clean def group_output_by_parameter(df, grid_parameters, outputs): """ @@ -702,7 +704,7 @@ def group_output_by_parameter(df, grid_parameters, outputs): value_dict = {} for param_value in df[param].dropna().unique(): subset = df[df[param] == param_value] - output_values = clean_series(subset[output]) * get_scale(output) + output_values = clean_series(subset[output], get_log_scale(output)) * get_scale(output) value_dict[param_value] = output_values From 10b92cd75d82f2cab63c276575e3e06d34adc0fa Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 8 Apr 2026 20:12:27 +0200 Subject: [PATCH 096/105] fix mistake unit rho_obs in plot.py --- src/proteus/utils/plot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proteus/utils/plot.py b/src/proteus/utils/plot.py index c005999c0..85400ef48 100644 --- a/src/proteus/utils/plot.py +++ b/src/proteus/utils/plot.py @@ -137,7 +137,7 @@ 'Phi_global': 'Melt fraction [%]', # Planet observational properties 'R_obs': 'R_{\\rm obs}\\,[R_\\oplus]', - 'rho_obs': '\\rho_{\\rm obs}\\,[\\mathrm{g/m^3}]', + 'rho_obs': '\\rho_{\\rm obs}\\,[\\mathrm{g/cm^3}]', # Atmospheric composition from outgassing 'M_atm': 'Atmosphere mass [kg]', 'P_surf': 'P_{\\rm surf}\\,[\\mathrm{bar}]', From 4d8fc823db73b5b2db22024d2751ae56ae1b4154 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 8 Apr 2026 20:16:00 +0200 Subject: [PATCH 097/105] update the grammar in _escape.py --- input/ensembles/example.grid.toml | 2 +- src/proteus/config/_escape.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/input/ensembles/example.grid.toml b/input/ensembles/example.grid.toml index 83eb815d1..bd76d1e2e 100644 --- a/input/ensembles/example.grid.toml +++ b/input/ensembles/example.grid.toml @@ -14,7 +14,7 @@ plot_ecdf = true # Generate ECDF grid plot for input paramete colormap = "managua" # Colormap for ECDF plot (e.g. "viridis", "managua") output_variables = ["solidification_time", "Phi_global", "T_surf", "P_surf", "atm_kg_per_mol", - "esc_rate_total"] # List of output variables to include in ECDF plot (name must match variable name in runtime_helpfile.csv) + "esc_rate_total", "rho_obs"] # List of output variables to include in ECDF plot (name must match variable name in runtime_helpfile.csv) # Path to base (reference) config file relative to PROTEUS root folder ref_config = "input/demos/dummy.toml" diff --git a/src/proteus/config/_escape.py b/src/proteus/config/_escape.py index 592b23dbc..df3fcf082 100644 --- a/src/proteus/config/_escape.py +++ b/src/proteus/config/_escape.py @@ -26,7 +26,7 @@ class Zephyrus: Attributes ---------- Pxuv: float - Pressure at which XUV radiation become opaque in the planetary atmosphere (should be within 0 < Pxuv < 10 bars) [bar] + Pressure at which XUV radiation becomes opaque in the planetary atmosphere (should be within Pxuv > 0 bar) [bar] efficiency: float Escape efficiency factor tidal: bool From c6353799d70fe20518c2cf742670547b7579a86f Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 8 Apr 2026 20:18:11 +0200 Subject: [PATCH 098/105] update doc mistake --- docs/How-to/usage.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/How-to/usage.md b/docs/How-to/usage.md index 908921df6..5d14fa8f1 100644 --- a/docs/How-to/usage.md +++ b/docs/How-to/usage.md @@ -193,7 +193,7 @@ Before running the command, configure the post-processing options in your grid c To post-process a grid and generate ECDF plots for further analysis, run the following command: ``` -proteus grid-analyse --config input/ensembles/example.grid_analyse.toml +proteus grid-analyse --config input/ensembles/example.grid.toml ``` From 77356bf4d643bb47ca4242175821e7810e4d5666 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 8 Apr 2026 20:19:32 +0200 Subject: [PATCH 099/105] doc typo --- docs/How-to/usage.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/How-to/usage.md b/docs/How-to/usage.md index 5d14fa8f1..91937860e 100644 --- a/docs/How-to/usage.md +++ b/docs/How-to/usage.md @@ -204,7 +204,7 @@ After execution, a `post_processing/` directory is created inside the grid folde - `{grid_name}_final_extracted_data_completed.csv` which contains only successful runs (used for ECDF plots) - `{grid_name}_final_extracted_data_running_error.csv` for only failed simulations with status `Running` or `Error`. -- `grid_plots/` This directory contains a status dummary plot and a ECDF plot +- `grid_plots/` This directory contains a status summary plot and a ECDF plot ## Retrieval scheme (Bayesian optimisation) From ace2dac85f466da75121bb0d5fd948d01790632c Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 8 Apr 2026 20:29:16 +0200 Subject: [PATCH 100/105] comment for copilot on no argument in main --- src/proteus/grid/post_processing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index 89c24e799..cf72c3ca8 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -930,7 +930,7 @@ def color_func(v): def main(grid_analyse_toml_file: str | Path): - # Load configuration from grid_analyse.toml + # Load configuration from example.grid.toml with open(grid_analyse_toml_file, 'rb') as f: cfg = tomllib.load(f) @@ -1003,6 +1003,6 @@ def main(grid_analyse_toml_file: str | Path): ) print('ECDF grid plot is available.') - +# main() expects the TOML file from the CLI; no argument needed here. if __name__ == '__main__': main() From 3d84a2bb640611448b82c1144892f095dee348f9 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 8 Apr 2026 20:34:02 +0200 Subject: [PATCH 101/105] to avoid copilot warning about not passing argument to main --- input/ensembles/example.grid.toml | 2 +- src/proteus/grid/post_processing.py | 4 ---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/input/ensembles/example.grid.toml b/input/ensembles/example.grid.toml index bd76d1e2e..9a3838f79 100644 --- a/input/ensembles/example.grid.toml +++ b/input/ensembles/example.grid.toml @@ -11,7 +11,7 @@ update_csv = true # Whether to update the summary CSV files be plot_format = "png" # Format for saving plots ("png" or "pdf") plot_status = true # Generate status summary plot of the grid plot_ecdf = true # Generate ECDF grid plot for input parameters tested in the grid and output_variables defined below -colormap = "managua" # Colormap for ECDF plot (e.g. "viridis", "managua") +colormap = "berlin" # Colormap for ECDF plot (e.g. "viridis", "managua") output_variables = ["solidification_time", "Phi_global", "T_surf", "P_surf", "atm_kg_per_mol", "esc_rate_total", "rho_obs"] # List of output variables to include in ECDF plot (name must match variable name in runtime_helpfile.csv) diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index cf72c3ca8..39cde9635 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -1002,7 +1002,3 @@ def main(grid_analyse_toml_file: str | Path): grid_name, ) print('ECDF grid plot is available.') - -# main() expects the TOML file from the CLI; no argument needed here. -if __name__ == '__main__': - main() From 5738d61ca6551ab6fedc67f3f2a3a178703deedd Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 8 Apr 2026 20:51:10 +0200 Subject: [PATCH 102/105] typo _escape.py --- src/proteus/config/_escape.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proteus/config/_escape.py b/src/proteus/config/_escape.py index df3fcf082..e55bbfe66 100644 --- a/src/proteus/config/_escape.py +++ b/src/proteus/config/_escape.py @@ -26,7 +26,7 @@ class Zephyrus: Attributes ---------- Pxuv: float - Pressure at which XUV radiation becomes opaque in the planetary atmosphere (should be within Pxuv > 0 bar) [bar] + Pressure at which XUV radiation becomes opaque in the planetary atmosphere (should be above Pxuv > 0 bar) [bar] efficiency: float Escape efficiency factor tidal: bool From 6e6ead581893e38649b254ca2ae3da6abbc9b2a4 Mon Sep 17 00:00:00 2001 From: EmmaPostolec Date: Wed, 8 Apr 2026 20:53:22 +0200 Subject: [PATCH 103/105] ruff format on src/tests/ to pas test --- src/proteus/grid/post_processing.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/proteus/grid/post_processing.py b/src/proteus/grid/post_processing.py index 39cde9635..6529bc049 100644 --- a/src/proteus/grid/post_processing.py +++ b/src/proteus/grid/post_processing.py @@ -676,6 +676,7 @@ def clean_series(s, log_scale): s_clean = s_clean.loc[lambda x: x > 0] return s_clean + def group_output_by_parameter(df, grid_parameters, outputs): """ Groups output values (like P_surf) by one or more grid parameters. @@ -704,7 +705,9 @@ def group_output_by_parameter(df, grid_parameters, outputs): value_dict = {} for param_value in df[param].dropna().unique(): subset = df[df[param] == param_value] - output_values = clean_series(subset[output], get_log_scale(output)) * get_scale(output) + output_values = clean_series(subset[output], get_log_scale(output)) * get_scale( + output + ) value_dict[param_value] = output_values From 1d8978a191211a5eaade4714e32d72d592a215f6 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 8 Apr 2026 19:22:06 +0000 Subject: [PATCH 104/105] test: add unit tests for post_processing helper functions Agent-Logs-Url: https://github.com/FormingWorlds/PROTEUS/sessions/43d242cd-ebd2-458b-9dde-201556591aef Co-authored-by: EmmaPostolec <122358811+EmmaPostolec@users.noreply.github.com> --- tests/grid/test_post_processing.py | 333 +++++++++++++++++++++++++++++ 1 file changed, 333 insertions(+) create mode 100644 tests/grid/test_post_processing.py diff --git a/tests/grid/test_post_processing.py b/tests/grid/test_post_processing.py new file mode 100644 index 000000000..ed755e439 --- /dev/null +++ b/tests/grid/test_post_processing.py @@ -0,0 +1,333 @@ +""" +Unit tests for proteus.grid.post_processing helper functions. + +Tests the pure-Python helper functions that require no running grid, +no file I/O, and no compiled binaries. Each test is fast (<100 ms). + +See also: + docs/How-to/test_infrastructure.md + docs/How-to/test_categorization.md + docs/How-to/test_building.md +""" + +from __future__ import annotations + +import pandas as pd +import pytest + +from proteus.grid.post_processing import ( + clean_series, + flatten_input_parameters, + get_grid_name, + get_label, + get_log_scale, + get_scale, + group_output_by_parameter, + latex, + load_ecdf_plot_settings, + validate_output_variables, +) + +# --------------------------------------------------------- +# get_grid_name +# --------------------------------------------------------- + + +@pytest.mark.unit +def test_get_grid_name_returns_directory_name(tmp_path): + """get_grid_name should return the last component of a valid directory path.""" + grid_dir = tmp_path / 'my_test_grid' + grid_dir.mkdir() + assert get_grid_name(grid_dir) == 'my_test_grid' + + +@pytest.mark.unit +def test_get_grid_name_raises_for_nonexistent_path(tmp_path): + """get_grid_name should raise ValueError if the path is not a directory.""" + missing = tmp_path / 'does_not_exist' + with pytest.raises(ValueError, match='not a valid directory'): + get_grid_name(missing) + + +@pytest.mark.unit +def test_get_grid_name_accepts_string(tmp_path): + """get_grid_name should accept a plain string as well as a Path.""" + grid_dir = tmp_path / 'str_grid' + grid_dir.mkdir() + assert get_grid_name(str(grid_dir)) == 'str_grid' + + +# --------------------------------------------------------- +# get_label +# --------------------------------------------------------- + + +@pytest.mark.unit +def test_get_label_known_quantity(): + """Preset quantities should return their human-readable label.""" + label = get_label('T_surf') + assert 'surf' in label.lower() or 'T' in label + + +@pytest.mark.unit +def test_get_label_unknown_quantity_returns_last_segment(): + """Unknown dotted path should return only the last segment.""" + label = get_label('some.deeply.nested.param') + assert label == 'param' + + +@pytest.mark.unit +def test_get_label_simple_unknown(): + """Unknown non-dotted key should be returned unchanged.""" + label = get_label('totally_unknown_var') + assert label == 'totally_unknown_var' + + +# --------------------------------------------------------- +# get_scale +# --------------------------------------------------------- + + +@pytest.mark.unit +def test_get_scale_known_quantity(): + """Preset quantities should return a non-default (or explicitly 1.0) scale.""" + scale = get_scale('Phi_global') + # Phi_global is stored as fraction but plotted as %, so scale should be 100 + assert scale == pytest.approx(100.0, rel=1e-5) + + +@pytest.mark.unit +def test_get_scale_unknown_returns_one(): + """Unknown quantities should fall back to scale factor of 1.0.""" + assert get_scale('not_a_real_quantity') == pytest.approx(1.0, rel=1e-5) + + +# --------------------------------------------------------- +# get_log_scale +# --------------------------------------------------------- + + +@pytest.mark.unit +def test_get_log_scale_known_log_quantity(): + """esc_rate_total should use log scale.""" + # esc_rate_total is in preset_log_scales as True (escape rates span many orders) + from proteus.utils.plot import _preset_log_scales + + if 'esc_rate_total' in _preset_log_scales: + assert get_log_scale('esc_rate_total') == _preset_log_scales['esc_rate_total'] + + +@pytest.mark.unit +def test_get_log_scale_unknown_returns_false(): + """Unknown quantities default to linear scale (False).""" + assert get_log_scale('some_unknown_output') is False + + +# --------------------------------------------------------- +# latex +# --------------------------------------------------------- + + +@pytest.mark.unit +def test_latex_wraps_backslash_label(): + """Labels containing a backslash should be wrapped in dollar signs.""" + assert latex('\\rm surf') == '$\\rm surf$' + + +@pytest.mark.unit +def test_latex_plain_label_unchanged(): + """Labels without backslash should not be wrapped.""" + assert latex('T_surf') == 'T_surf' + + +@pytest.mark.unit +def test_latex_empty_string(): + """Empty string has no backslash, so it should be returned as-is.""" + assert latex('') == '' + + +# --------------------------------------------------------- +# clean_series +# --------------------------------------------------------- + + +@pytest.mark.unit +def test_clean_series_removes_nan(): + """NaN values should be dropped from the series.""" + s = pd.Series([1.0, float('nan'), 3.0]) + result = clean_series(s, log_scale=False) + assert len(result) == 2 + assert not result.isna().any() + + +@pytest.mark.unit +def test_clean_series_removes_inf(): + """Infinite values should be replaced by NaN and then dropped.""" + s = pd.Series([1.0, float('inf'), -float('inf'), 2.0]) + result = clean_series(s, log_scale=False) + assert len(result) == 2 + + +@pytest.mark.unit +def test_clean_series_log_scale_removes_nonpositive(): + """With log_scale=True, zero and negative values should be dropped.""" + s = pd.Series([-1.0, 0.0, 0.5, 2.0]) + result = clean_series(s, log_scale=True) + assert (result > 0).all() + assert len(result) == 2 + + +@pytest.mark.unit +def test_clean_series_linear_keeps_zeros(): + """With log_scale=False, zero values should be retained.""" + s = pd.Series([0.0, 1.0, 2.0]) + result = clean_series(s, log_scale=False) + assert 0.0 in result.values + assert len(result) == 3 + + +# --------------------------------------------------------- +# validate_output_variables +# --------------------------------------------------------- + + +@pytest.mark.unit +def test_validate_output_variables_all_present(): + """All requested outputs that exist in the DataFrame should be returned.""" + df = pd.DataFrame({'Time': [1], 'T_surf': [300], 'P_surf': [1e5]}) + valid = validate_output_variables(df, ['T_surf', 'P_surf']) + assert valid == ['T_surf', 'P_surf'] + + +@pytest.mark.unit +def test_validate_output_variables_missing_excluded(capsys): + """Missing output variables should be excluded and a warning printed.""" + df = pd.DataFrame({'Time': [1], 'T_surf': [300]}) + valid = validate_output_variables(df, ['T_surf', 'not_a_column']) + assert valid == ['T_surf'] + captured = capsys.readouterr() + assert 'WARNING' in captured.out or 'not_a_column' in captured.out + + +@pytest.mark.unit +def test_validate_output_variables_empty_request(): + """Empty request list should return empty list.""" + df = pd.DataFrame({'Time': [1], 'T_surf': [300]}) + valid = validate_output_variables(df, []) + assert valid == [] + + +# --------------------------------------------------------- +# flatten_input_parameters +# --------------------------------------------------------- + + +@pytest.mark.unit +def test_flatten_input_parameters_simple(): + """Flat dict with leaf blocks should be returned unchanged.""" + d = {'escape': {'efficiency': {'label': 'eff', 'log': True}}} + result = flatten_input_parameters(d) + assert 'escape.efficiency' in result + assert result['escape.efficiency']['label'] == 'eff' + + +@pytest.mark.unit +def test_flatten_input_parameters_skips_colormap(): + """The 'colormap' key should be skipped at any nesting level.""" + d = {'colormap': 'viridis', 'orbit': {'sma': {'label': 'a'}}} + result = flatten_input_parameters(d) + assert 'colormap' not in result + assert 'orbit.sma' in result + + +@pytest.mark.unit +def test_flatten_input_parameters_deeply_nested(): + """Multi-level nesting should produce correct dot-separated keys.""" + d = {'a': {'b': {'c': {'label': 'deep'}}}} + result = flatten_input_parameters(d) + assert 'a.b.c' in result + + +# --------------------------------------------------------- +# load_ecdf_plot_settings +# --------------------------------------------------------- + + +@pytest.mark.unit +def test_load_ecdf_plot_settings_basic(): + """Should return three items: param_settings, output_settings, plot_format.""" + cfg = { + 'colormap': 'viridis', + 'output_variables': ['T_surf', 'P_surf'], + 'plot_format': 'png', + } + tested_params = {'struct.mass_tot': [0.7, 1.0]} + param_settings, output_settings, plot_format = load_ecdf_plot_settings(cfg, tested_params) + + assert 'struct.mass_tot' in param_settings + assert 'T_surf' in output_settings + assert 'P_surf' in output_settings + assert plot_format == 'png' + + +@pytest.mark.unit +def test_load_ecdf_plot_settings_respects_colormap(): + """The colormap from config should be applied to param_settings.""" + import matplotlib.cm as cm + + cfg = { + 'colormap': 'plasma', + 'output_variables': ['T_surf'], + 'plot_format': 'pdf', + } + tested_params = {'orbit.semimajoraxis': [0.1, 1.0]} + param_settings, _, _ = load_ecdf_plot_settings(cfg, tested_params) + # The colormap object stored should correspond to 'plasma' + assert param_settings['orbit.semimajoraxis']['colormap'] is cm.plasma + + +@pytest.mark.unit +def test_load_ecdf_plot_settings_raises_for_empty_params(): + """Should raise ValueError when no tested parameters are provided.""" + cfg = {'colormap': 'viridis', 'output_variables': ['T_surf'], 'plot_format': 'png'} + with pytest.raises(ValueError, match='No tested parameters'): + load_ecdf_plot_settings(cfg, {}) + + +# --------------------------------------------------------- +# group_output_by_parameter +# --------------------------------------------------------- + + +@pytest.mark.unit +def test_group_output_by_parameter_basic(): + """Should group output values correctly by input parameter.""" + df = pd.DataFrame( + { + 'struct.mass_tot': [0.7, 0.7, 1.0, 1.0], + 'T_surf': [500.0, 600.0, 700.0, 800.0], + } + ) + result = group_output_by_parameter(df, ['struct.mass_tot'], ['T_surf']) + key = 'T_surf_per_struct.mass_tot' + assert key in result + assert 0.7 in result[key] + assert 1.0 in result[key] + assert len(result[key][0.7]) == 2 + + +@pytest.mark.unit +def test_group_output_by_parameter_applies_scale(): + """Scale factor from _preset_scales should be applied to output values.""" + # Phi_global has a scale of 100 (fraction → percentage) + df = pd.DataFrame( + { + 'struct.corefrac': [0.35, 0.35], + 'Phi_global': [0.5, 0.8], + } + ) + result = group_output_by_parameter(df, ['struct.corefrac'], ['Phi_global']) + key = 'Phi_global_per_struct.corefrac' + values = list(result[key][0.35]) + # Values should be scaled by 100 + assert pytest.approx(values, rel=1e-5) == [50.0, 80.0] From e65c564631b23f7d2c33836b8cc720a9263f56af Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 8 Apr 2026 19:24:43 +0000 Subject: [PATCH 105/105] test: address code review feedback on unit tests Agent-Logs-Url: https://github.com/FormingWorlds/PROTEUS/sessions/43d242cd-ebd2-458b-9dde-201556591aef Co-authored-by: EmmaPostolec <122358811+EmmaPostolec@users.noreply.github.com> --- tests/grid/test_post_processing.py | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/tests/grid/test_post_processing.py b/tests/grid/test_post_processing.py index ed755e439..df1d60d23 100644 --- a/tests/grid/test_post_processing.py +++ b/tests/grid/test_post_processing.py @@ -49,6 +49,15 @@ def test_get_grid_name_raises_for_nonexistent_path(tmp_path): get_grid_name(missing) +@pytest.mark.unit +def test_get_grid_name_raises_for_file_path(tmp_path): + """get_grid_name should raise ValueError when given a file path instead of a directory.""" + file_path = tmp_path / 'not_a_dir.txt' + file_path.write_text('data') + with pytest.raises(ValueError, match='not a valid directory'): + get_grid_name(file_path) + + @pytest.mark.unit def test_get_grid_name_accepts_string(tmp_path): """get_grid_name should accept a plain string as well as a Path.""" @@ -64,9 +73,10 @@ def test_get_grid_name_accepts_string(tmp_path): @pytest.mark.unit def test_get_label_known_quantity(): - """Preset quantities should return their human-readable label.""" - label = get_label('T_surf') - assert 'surf' in label.lower() or 'T' in label + """Preset quantities should return their human-readable label from _preset_labels.""" + from proteus.utils.plot import _preset_labels + + assert get_label('T_surf') == _preset_labels['T_surf'] @pytest.mark.unit @@ -109,12 +119,11 @@ def test_get_scale_unknown_returns_one(): @pytest.mark.unit def test_get_log_scale_known_log_quantity(): - """esc_rate_total should use log scale.""" - # esc_rate_total is in preset_log_scales as True (escape rates span many orders) + """escape.zephyrus.efficiency is a known log-scale quantity.""" from proteus.utils.plot import _preset_log_scales - if 'esc_rate_total' in _preset_log_scales: - assert get_log_scale('esc_rate_total') == _preset_log_scales['esc_rate_total'] + assert 'escape.zephyrus.efficiency' in _preset_log_scales + assert get_log_scale('escape.zephyrus.efficiency') is True @pytest.mark.unit @@ -206,7 +215,7 @@ def test_validate_output_variables_missing_excluded(capsys): valid = validate_output_variables(df, ['T_surf', 'not_a_column']) assert valid == ['T_surf'] captured = capsys.readouterr() - assert 'WARNING' in captured.out or 'not_a_column' in captured.out + assert 'WARNING' in captured.out and 'not_a_column' in captured.out @pytest.mark.unit