Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .Jules/palette.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
## 2024-05-18 - Tooltips for Statistical/Financial Parameters
**Learning:** Users often struggle with abstract statistical or financial parameter inputs (like basis points or quantiles) if there is no concrete example or contextual explanation provided in the UI. Adding separate helper text components clutters the UI and distracts from the primary data visualization.
**Action:** Use native widget tooltips (e.g., `help="10 bps = 0.10%"`) for all advanced statistical or financial inputs. This provides accessible, on-demand context without increasing visual noise or compromising the dashboard's layout density.
89 changes: 45 additions & 44 deletions src/dashboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,9 +168,9 @@ def get_cache_key(*args) -> str:
help="Lookback months for Momentum signal."
)
else:
factor_window = st.slider("Factor Beta Window (days)", 20, 252, 63, 7)
vol_window = st.slider("Regime Vol Window (days)", 10, 60, 21, 5)
adv_pct = st.slider("ADV Participation %", 0.01, 0.30, float(DEFAULT_ADV_PCT), 0.01)
factor_window = st.slider("Factor Beta Window (days)", 20, 252, 63, 7, help="Lookback days for computing rolling factor betas.")
vol_window = st.slider("Regime Vol Window (days)", 10, 60, 21, 5, help="Rolling days for calculating volatility (e.g., 21 days = 1 trading month).")
adv_pct = st.slider("ADV Participation %", 0.01, 0.30, float(DEFAULT_ADV_PCT), 0.01, help="Average Daily Volume limit for liquidation estimates (e.g., 0.10 = 10% of volume).")

st.markdown("---")
st.subheader("4. Research Rigor")
Expand All @@ -185,19 +185,20 @@ def get_cache_key(*args) -> str:
st.info("Using full-sample quantiles (exploratory mode)")

vol_q_high = st.slider(
"High Volatility Quantile", 0.5, 0.95, DEFAULT_VOL_QUANTILE_HIGH, 0.05
"High Volatility Quantile", 0.5, 0.95, DEFAULT_VOL_QUANTILE_HIGH, 0.05,
help="Quantile threshold defining High Volatility (e.g., 0.80 = top 20% most volatile days)."
)

if mode == "Single-Asset":
st.subheader("5. Backtest Settings")
bt_cost = st.number_input("Transaction Cost (bps)", value=DEFAULT_COST_BPS, step=1) / 10000
bt_cost = st.number_input("Transaction Cost (bps)", value=DEFAULT_COST_BPS, step=1, help="One-way friction per trade in basis points (e.g., 10 bps = 0.10%).") / 10000
allow_short = st.checkbox("Allow Short Selling?", value=False)
else:
st.subheader("5. Alert Thresholds")
dd_alert = st.slider("Max Drawdown Alert", -0.6, -0.05, -0.2, 0.05)
vol_alert = st.slider("Volatility Alert (ann.)", 0.1, 1.0, 0.35, 0.05)
beta_alert = st.slider("Beta Alert", 0.5, 2.0, 1.3, 0.1)
dttl_alert = st.slider("Days-to-Liquidate Alert", 1.0, 20.0, 5.0, 1.0)
dd_alert = st.slider("Max Drawdown Alert", -0.6, -0.05, -0.2, 0.05, help="Alert triggers below this equity drop (e.g., -0.2 = -20% drawdown).")
vol_alert = st.slider("Volatility Alert (ann.)", 0.1, 1.0, 0.35, 0.05, help="Alert triggers above this annualized volatility limit (e.g., 0.35 = 35% vol).")
beta_alert = st.slider("Beta Alert", 0.5, 2.0, 1.3, 0.1, help="Alert triggers if portfolio sensitivity to benchmark exceeds this level.")
dttl_alert = st.slider("Days-to-Liquidate Alert", 1.0, 20.0, 5.0, 1.0, help="Alert if selling the portfolio at the ADV limit exceeds these many days.")


# --- Portfolio Mode ---
Expand Down Expand Up @@ -650,9 +651,9 @@ def get_cache_key(*args) -> str:
# --- Regime Detection ---
# Using 21-day annualized vol with option for out-of-sample analysis
df = signals.detect_volatility_regime(
df,
vol_col='Vol_21d',
quantile_high=vol_q_high,
df,
vol_col='Vol_21d',
quantile_high=vol_q_high,
quantile_low=0.25,
use_expanding=use_oos # Toggle between in-sample and out-of-sample
)
Expand Down Expand Up @@ -729,15 +730,15 @@ def get_cache_key(*args) -> str:
fig = go.Figure()
fig.add_trace(go.Scatter(x=df.index, y=df['Close'], name='Close Price', line=dict(color='white', width=1)))
fig.add_trace(go.Scatter(x=df.index, y=df[f'SMA_{sma_window}'], name=f'{sma_window}-Day SMA', line=dict(color='#ff9f43', width=1)))

# Highlight High Volatility Regimes
# Filter high vol periods
high_vol_mask = df['Vol_Regime'] == 'High'
# We can plot markers or shade areas. Shading is valid but tricky in Plotly without shapes list.
# Let's plot points
high_vol_pts = df[high_vol_mask]
fig.add_trace(go.Scatter(x=high_vol_pts.index, y=high_vol_pts['Close'], mode='markers', name='High Volatility', marker=dict(color='red', size=2)))

fig.update_layout(
title=f"{ticker} Price History & Regime Context",
yaxis_title="Price ($)",
Expand All @@ -751,21 +752,21 @@ def get_cache_key(*args) -> str:
# --- TAB 2: REGIME ANALYSIS ---
with tab_regime:
st.subheader("Volatility Regime Classification")

c1, c2 = st.columns(2)
with c1:
# Scatter: Vol vs Returns needed? Maybe just distribution
fig_hist = px.histogram(df, x="Vol_21d", color="Vol_Regime", nbins=50, title="Volatility Distribution", template="plotly_dark",
color_discrete_map={"High": "#ff4b4b", "Low": "#00ff00", "Normal": "#888888"})
st.plotly_chart(fig_hist, use_container_width=True)

with c2:
# Pie chart of time spent in regimes
regime_counts = df['Vol_Regime'].value_counts()
fig_pie = px.pie(values=regime_counts, names=regime_counts.index, title="Time Spent in Regimes", template="plotly_dark",
color=regime_counts.index, color_discrete_map={"High": "#ff4b4b", "Low": "#00ff00", "Normal": "#888888"})
st.plotly_chart(fig_pie, use_container_width=True)

st.markdown("### Regime Characteristics")
stats = df.groupby('Vol_Regime')[['Daily_Return', 'Vol_21d']].mean()
# Annualize return
Expand Down Expand Up @@ -828,57 +829,57 @@ def get_cache_key(*args) -> str:
# --- TAB 3: BACKTEST ---
with tab_bt:
st.subheader("Strategy Simulation")

# Out-of-sample mode indicator
if use_oos:
st.success("🔬 **Out-of-Sample Mode Active** - Regime classification uses only past data at each point")

if not res_df.empty:

# 1. Global Metrics with Bootstrap CI
strat_metrics = backtester.calculate_perf_metrics(
res_df['Equity_Strategy'],
res_df['Equity_Strategy'],
include_bootstrap_ci=True,
n_bootstrap=500
)
bench_metrics = backtester.calculate_perf_metrics(res_df['Equity_Benchmark'])

col_m1, col_m2, col_m3, col_m4 = st.columns(4)
col_m1.metric("Global CAGR", f"{strat_metrics['CAGR']:.2%}")

# Show Sharpe with CI if available
sharpe_display = f"{strat_metrics['Sharpe']:.2f}"
if strat_metrics.get('Sharpe_CI_Lower') is not None:
sharpe_display += f" [{strat_metrics['Sharpe_CI_Lower']:.2f}, {strat_metrics['Sharpe_CI_Upper']:.2f}]"
col_m2.metric("Sharpe (95% CI)", sharpe_display)

col_m3.metric("Max Drawdown", f"{strat_metrics['MaxDD']:.2%}")
col_m4.metric("Max DD Duration", f"{strat_metrics.get('MaxDD_Duration', 0)} days")

# Additional metrics row
col_a1, col_a2, col_a3, col_a4 = st.columns(4)
col_a1.metric("Sortino", f"{strat_metrics.get('Sortino', 0):.2f}")
col_a2.metric("Calmar", f"{strat_metrics.get('Calmar', 0):.2f}")
col_a3.metric("Win Rate", f"{strat_metrics.get('WinRate', 0):.1%}")
col_a4.metric("Avg DD Duration", f"{strat_metrics.get('AvgDD_Duration', 0):.0f} days")

# 2. Equity Curve
fig_eq = go.Figure()
fig_eq.add_trace(go.Scatter(x=res_df.index, y=res_df['Equity_Strategy'], name='Trend Strategy', line=dict(color='#00ff00')))
fig_eq.add_trace(go.Scatter(x=res_df.index, y=res_df['Equity_Benchmark'], name='Buy & Hold', line=dict(color='gray', dash='dot')))
fig_eq.update_layout(title="Equity Curve", template="plotly_dark", height=400)
st.plotly_chart(fig_eq, use_container_width=True)

# 3. Drawdown Chart
with st.expander("📉 Drawdown Analysis", expanded=False):
fig_dd = go.Figure()
fig_dd.add_trace(go.Scatter(
x=res_df.index, y=res_df['DD_Strategy'] * 100,
x=res_df.index, y=res_df['DD_Strategy'] * 100,
name='Strategy Drawdown', fill='tozeroy',
line=dict(color='#ff4b4b')
))
fig_dd.add_trace(go.Scatter(
x=res_df.index, y=res_df['DD_Benchmark'] * 100,
x=res_df.index, y=res_df['DD_Benchmark'] * 100,
name='Benchmark Drawdown',
line=dict(color='gray', dash='dot')
))
Expand All @@ -889,36 +890,36 @@ def get_cache_key(*args) -> str:
height=300
)
st.plotly_chart(fig_dd, use_container_width=True)

# 4. Conditional Analysis
st.markdown("### 🔬 Conditional Performance by Regime")
st.info("Does the strategy outperform during High Volatility?")

# Merge
comparison = pd.concat([cond_stats.add_suffix('_Strat'), bench_cond.add_suffix('_Bench')], axis=1)

# Reorder columns - handle missing columns gracefully
available_cols = []
for col in ['Ann_Return_Strat', 'Ann_Return_Bench', 'Sharpe_Strat', 'Sharpe_Bench', 'WinRate_Strat']:
if col in comparison.columns:
available_cols.append(col)
comparison = comparison[available_cols]

st.dataframe(comparison.style.background_gradient(cmap='RdYlGn', subset=['Ann_Return_Strat', 'Sharpe_Strat']).format("{:.2f}"))

st.markdown("**Key Insight:** Compare 'Sharpe_Strat' vs 'Sharpe_Bench' in the **High** volatility row.")

# 5. Walk-Forward Validation (Advanced)
with st.expander("🚀 Walk-Forward Validation (Advanced)", expanded=False):
st.markdown("""
Walk-forward validation splits data into rolling train/test windows to evaluate
Walk-forward validation splits data into rolling train/test windows to evaluate
out-of-sample performance. This is more rigorous than a single full-sample backtest.
""")

wf_col1, wf_col2 = st.columns(2)
wf_train = wf_col1.number_input("Training Window (months)", value=24, min_value=6, max_value=60)
wf_test = wf_col2.number_input("Test Window (months)", value=6, min_value=1, max_value=12)

if st.button("Run Walk-Forward Analysis"):
with st.spinner("Running walk-forward validation..."):
wf_results = backtester.walk_forward_backtest(
Expand All @@ -928,16 +929,16 @@ def get_cache_key(*args) -> str:
cost_bps=bt_cost,
rebalance_freq='M'
)

if wf_results:
st.success(f"✅ Completed {wf_results['n_periods']} walk-forward periods")

wf_summary = wf_results['summary']
wf_c1, wf_c2, wf_c3 = st.columns(3)
wf_c1.metric("OOS CAGR", f"{wf_summary.get('CAGR', 0):.2%}")
wf_c2.metric("OOS Sharpe", f"{wf_summary.get('Sharpe', 0):.2f}")
wf_c3.metric("OOS Max DD", f"{wf_summary.get('MaxDD', 0):.2%}")

# Show per-period results
st.markdown("#### Per-Period Results")
period_data = []
Expand All @@ -959,11 +960,11 @@ def get_cache_key(*args) -> str:
# --- TAB 4: REPORT ---
with tab_rep:
st.subheader("Research Note Generation")

st.markdown("### Findings Summary")
st.write(f"**Asset**: {ticker}")
st.write(f"**Trend Model**: {sma_window}-Day SMA")

if not res_df.empty:
# Create text summary
high_vol_perf = cond_stats.loc['High', 'Sharpe'] if 'High' in cond_stats.index else 0
Expand All @@ -978,13 +979,13 @@ def get_cache_key(*args) -> str:
sweep_std = sweep_df.groupby("Regime")["Sharpe"].std().dropna()
if not sweep_std.empty:
sweep_stability = ", ".join([f"{k}: {v:.2f}" for k, v in sweep_std.items()])

st.success(f"Strategy Sharpe in High Vol: **{high_vol_perf:.2f}**")
st.info(f"Strategy Sharpe in Normal Vol: **{normal_vol_perf:.2f}**")
st.write(f"**Regime Sensitivity (Sharpe High - Normal)**: {regime_sensitivity.get('Sharpe_Diff', np.nan):.2f}")
st.write(f"**Top Transition Risk**: {transition_risk}")
st.write(f"**Sweep Stability (Sharpe Std)**: {sweep_stability}")

st.download_button(
label="Download Full Research Data (CSV)",
data=res_df.to_csv().encode('utf-8'),
Expand Down
17 changes: 17 additions & 0 deletions src/modules/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,20 @@
regime_analysis,
sweep,
)

__all__ = [
"data_model",
"signals",
"signals_advanced",
"backtester",
"config",
"portfolio",
"risk",
"factors",
"scenario",
"liquidity",
"alerts",
"reporting",
"regime_analysis",
"sweep",
]
Loading