Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions apem/unit_based_model/evaluation/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,11 @@
normalize_run_dir,
parse_run_config,
)
from apem.unit_based_model.evaluation.output_dir import create_timestamped_output_dir

__all__ = [
"compare_price_algorithms",
"create_timestamped_output_dir",
"ensure_lost_opp_cost_run_for_configuration",
"ensure_redispatch_run_for_configuration",
"ensure_run_for_configuration",
Expand Down
57 changes: 57 additions & 0 deletions apem/unit_based_model/evaluation/output_dir.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
"""Helpers for creating evaluation output directories with Windows-safe name lengths."""

from __future__ import annotations

from datetime import datetime, timezone
import hashlib
from pathlib import Path
import re

DEFAULT_OUTPUT_DIR_NAME_MAX_LENGTH = 64
_NON_ALNUM_PATTERN = re.compile(r"[^A-Za-z0-9_.-]+")


def create_timestamped_output_dir(
evaluation_root: Path,
*name_parts: str,
max_dir_name_length: int = DEFAULT_OUTPUT_DIR_NAME_MAX_LENGTH,
) -> Path:
"""
Create a timestamped output directory with bounded folder-name length.

On Windows, long absolute paths can fail around the default 260-character
limit. This helper keeps the timestamped folder segment compact and appends
a stable hash when truncation is required.
"""
timestamp = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ")
sanitized_parts = [_sanitize_part(part) for part in name_parts if str(part).strip()]
descriptor = "_".join(part for part in sanitized_parts if part)

folder_name = timestamp
if descriptor:
folder_name = _truncate_with_hash(f"{timestamp}_{descriptor}", max_dir_name_length)

output_dir = evaluation_root / folder_name
output_dir.mkdir(parents=True, exist_ok=True)
return output_dir


def _sanitize_part(value: str) -> str:
cleaned = _NON_ALNUM_PATTERN.sub("_", str(value).strip())
return cleaned.strip("_.-")


def _truncate_with_hash(value: str, max_length: int) -> str:
if len(value) <= max_length:
return value

digest = hashlib.sha1(value.encode("utf-8")).hexdigest()[:10]
separator = "_"
head_budget = max_length - len(separator) - len(digest)
if head_budget <= 0:
return digest[:max_length]

head = value[:head_budget].rstrip("_.-")
if not head:
return digest[:max_length]
return f"{head}{separator}{digest}"
18 changes: 14 additions & 4 deletions node_ranking/network_scores.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,12 @@ def compute_node_ptdf_contribution_scores(
Contribution score for each node in the full node set. Slack node has
score 0.
"""
m, _ncols = ptdf.shape
m, ncols = ptdf.shape
if len(mask) != ncols:
raise ValueError(
"PTDF column count must match mask length. "
f"Got {ncols} columns and mask length {len(mask)}."
)

# Build weights per line if requested (otherwise set to 1)
if method == "weighted_sum":
Expand All @@ -139,7 +144,9 @@ def compute_node_ptdf_contribution_scores(
# Absolute PTDF values (flow sensitivities can be positive/negative by convention)
abs_ptdf = np.abs(ptdf) # shape = (m, n-1)

if method in ("sum", "weighted_sum"):
if ncols == 0 or m == 0:
scores_non_slack = np.zeros(ncols, dtype=float)
elif method in ("sum", "weighted_sum"):
# Sum over all lines, optionally weighted by line capacity
scores_non_slack = (abs_ptdf * weights[:, None]).sum(axis=0)
elif method == "max":
Expand All @@ -148,8 +155,11 @@ def compute_node_ptdf_contribution_scores(
else:
raise ValueError(f"Unknown method '{method}'.")

# Place scores back into full node order (slack bus gets score 0)
# Place scores back into full node order (slack bus gets score 0).
# NumPy infers float dtype for empty lists, which is invalid for indexing.
mask_idx = np.asarray(mask, dtype=int)
scores_full = np.zeros(len(nodes), dtype=float)
scores_full[np.array(mask)] = scores_non_slack
if mask_idx.size:
scores_full[mask_idx] = scores_non_slack

return {node: float(score) for node, score in zip(nodes, scores_full)}
11 changes: 5 additions & 6 deletions scripts/unit_based_model/example_cost_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@

from __future__ import annotations

from datetime import datetime, timezone
import json
import os
from pathlib import Path
Expand All @@ -48,6 +47,7 @@
from apem.unit_based_model.allocation.algorithms.zonal_clearing.zonal_ntc_multiedge import Zonal_NTC_multiedge
from apem.unit_based_model.enums import FBMCBaseCases, UnitBased_Datasets
from apem.unit_based_model.evaluation import (
create_timestamped_output_dir,
ensure_welfare_run_for_configuration,
load_welfare_from_run,
plot_value_by_period_and_power_flow_model,
Expand Down Expand Up @@ -85,11 +85,10 @@ def create_evaluation_output_dir(
scenario_name: str,
power_flow_models: tuple,
) -> Path:
timestamp = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ")
model_part = "_".join(power_flow_model_name(model) for model in power_flow_models)
output_dir = evaluation_root(scenario_name) / f"{timestamp}_{model_part}"
output_dir.mkdir(parents=True, exist_ok=True)
return output_dir
return create_timestamped_output_dir(
evaluation_root(scenario_name),
*(power_flow_model_name(model) for model in power_flow_models),
)


def run_cost_comparison(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@

from __future__ import annotations

from datetime import datetime, timezone
import json
import os
from pathlib import Path
Expand All @@ -51,6 +50,7 @@
UnitBased_Datasets,
)
from apem.unit_based_model.evaluation import (
create_timestamped_output_dir,
ensure_redispatch_run_for_configuration,
ensure_welfare_run_for_configuration,
load_redispatch_metrics_from_run,
Expand Down Expand Up @@ -86,11 +86,11 @@ def create_evaluation_output_dir(
redispatch_algorithm: RedispatchAlgorithms,
power_flow_models: tuple[PowerFlowModels, ...],
) -> Path:
timestamp = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ")
model_part = "_".join(model.name for model in power_flow_models)
output_dir = evaluation_root(scenario_name) / f"{timestamp}_{redispatch_algorithm.name}_{model_part}"
output_dir.mkdir(parents=True, exist_ok=True)
return output_dir
return create_timestamped_output_dir(
evaluation_root(scenario_name),
redispatch_algorithm.name,
*(model.name for model in power_flow_models),
)


def run_cost_plus_redispatch_cost_comparison(
Expand Down
12 changes: 6 additions & 6 deletions scripts/unit_based_model/example_lost_opp_cost_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@

from __future__ import annotations

from datetime import datetime, timezone
import json
from pathlib import Path
import sys
Expand All @@ -32,6 +31,7 @@
from apem.execution_chain import _retrieve_data
from apem.unit_based_model.enums import PowerFlowModels, PricingAlgorithms, UnitBased_Datasets
from apem.unit_based_model.evaluation import (
create_timestamped_output_dir,
ensure_lost_opp_cost_run_for_configuration,
load_lost_opp_costs_from_run,
plot_lost_opp_cost_by_component,
Expand Down Expand Up @@ -67,11 +67,11 @@ def create_evaluation_output_dir(
pricing_algorithms: tuple[PricingAlgorithms, ...],
) -> Path:
"""Create a timestamped output folder so lost opportunity cost evaluation results are not overwritten."""
timestamp = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ")
algorithm_part = "_".join(algorithm.name for algorithm in pricing_algorithms)
output_dir = evaluation_root(scenario_name) / f"{timestamp}_{power_flow_model}_{algorithm_part}"
output_dir.mkdir(parents=True, exist_ok=True)
return output_dir
return create_timestamped_output_dir(
evaluation_root(scenario_name),
power_flow_model,
*(algorithm.name for algorithm in pricing_algorithms),
)


def build_power_flow_model():
Expand Down
12 changes: 6 additions & 6 deletions scripts/unit_based_model/example_price_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@

from __future__ import annotations

from datetime import datetime, timezone
import json
import os
import sys
Expand All @@ -41,6 +40,7 @@
from apem.unit_based_model.enums import PowerFlowModels, PricingAlgorithms, UnitBased_Datasets
from apem.unit_based_model.evaluation import (
compare_price_algorithms,
create_timestamped_output_dir,
ensure_run_for_configuration,
load_prices_from_run,
plot_average_prices_by_node,
Expand Down Expand Up @@ -81,11 +81,11 @@ def create_evaluation_output_dir(
pricing_algorithms: tuple[PricingAlgorithms, ...],
) -> Path:
"""Create a timestamped output folder so evaluation results are not overwritten."""
timestamp = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ")
algorithm_part = "_".join(algorithm.name for algorithm in pricing_algorithms)
output_dir = evaluation_root(scenario_name) / f"{timestamp}_{power_flow_model}_{algorithm_part}"
output_dir.mkdir(parents=True, exist_ok=True)
return output_dir
return create_timestamped_output_dir(
evaluation_root(scenario_name),
power_flow_model,
*(algorithm.name for algorithm in pricing_algorithms),
)


def build_power_flow_model():
Expand Down
12 changes: 6 additions & 6 deletions scripts/unit_based_model/example_redispatch_cost_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@

from __future__ import annotations

from datetime import datetime, timezone
import json
import os
from pathlib import Path
Expand All @@ -45,6 +44,7 @@
UnitBased_Datasets,
)
from apem.unit_based_model.evaluation import (
create_timestamped_output_dir,
ensure_redispatch_run_for_configuration,
load_redispatch_metrics_from_run,
plot_redispatch_metric_by_power_flow_model,
Expand Down Expand Up @@ -77,11 +77,11 @@ def create_evaluation_output_dir(
redispatch_algorithm: RedispatchAlgorithms,
power_flow_models: tuple[PowerFlowModels, ...],
) -> Path:
timestamp = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ")
model_part = "_".join(model.name for model in power_flow_models)
output_dir = evaluation_root(scenario_name) / f"{timestamp}_{redispatch_algorithm.name}_{model_part}"
output_dir.mkdir(parents=True, exist_ok=True)
return output_dir
return create_timestamped_output_dir(
evaluation_root(scenario_name),
redispatch_algorithm.name,
*(model.name for model in power_flow_models),
)


def run_redispatch_cost_comparison(
Expand Down
12 changes: 6 additions & 6 deletions scripts/unit_based_model/example_redispatch_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@

from __future__ import annotations

from datetime import datetime, timezone
import json
import os
from pathlib import Path
Expand All @@ -45,6 +44,7 @@
UnitBased_Datasets,
)
from apem.unit_based_model.evaluation import (
create_timestamped_output_dir,
ensure_redispatch_run_for_configuration,
load_redispatch_metrics_from_run,
plot_redispatch_metric_by_algorithm,
Expand Down Expand Up @@ -79,11 +79,11 @@ def create_evaluation_output_dir(
power_flow_model_name: str,
redispatch_algorithms: tuple[RedispatchAlgorithms, ...],
) -> Path:
timestamp = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ")
algorithm_part = "_".join(algorithm.name for algorithm in redispatch_algorithms)
output_dir = evaluation_root(scenario_name) / f"{timestamp}_{power_flow_model_name}_{algorithm_part}"
output_dir.mkdir(parents=True, exist_ok=True)
return output_dir
return create_timestamped_output_dir(
evaluation_root(scenario_name),
power_flow_model_name,
*(algorithm.name for algorithm in redispatch_algorithms),
)


def run_redispatch_comparison(
Expand Down
11 changes: 6 additions & 5 deletions scripts/unit_based_model/example_zonal_price_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@

from __future__ import annotations

from datetime import datetime, timezone
import json
import os
import sys
Expand All @@ -45,6 +44,7 @@
from apem.unit_based_model.allocation.algorithms.zonal_clearing.zonal_ntc_multiedge import Zonal_NTC_multiedge
from apem.unit_based_model.evaluation import (
compare_price_algorithms,
create_timestamped_output_dir,
normalize_run_dir,
parse_run_config,
round_numeric_columns,
Expand Down Expand Up @@ -84,10 +84,11 @@ def create_evaluation_output_dir(
pricing_algorithm: PricingAlgorithms,
zonal_configuration: str,
) -> Path:
timestamp = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ")
output_dir = evaluation_root(scenario_name) / f"{timestamp}_{zonal_configuration}_{pricing_algorithm.name}"
output_dir.mkdir(parents=True, exist_ok=True)
return output_dir
return create_timestamped_output_dir(
evaluation_root(scenario_name),
zonal_configuration,
pricing_algorithm.name,
)


def zonal_path_for_model(power_flow_model) -> str:
Expand Down
Loading