Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .Jules/palette.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
## 2024-05-15 - Improve Statistical Input Clarity
**Learning:** Users often struggle to interpret arbitrary statistical or financial parameters (e.g., quantiles, basis points) when they lack concrete real-world context, leading to confusion when configuring research models.
**Action:** Always provide concrete, interpretable examples in the `help` tooltips for statistical and financial input widgets (e.g., "10 bps = 0.10%").
86 changes: 43 additions & 43 deletions src/dashboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,9 +168,9 @@ def get_cache_key(*args) -> str:
help="Lookback months for Momentum signal."
)
else:
factor_window = st.slider("Factor Beta Window (days)", 20, 252, 63, 7)
vol_window = st.slider("Regime Vol Window (days)", 10, 60, 21, 5)
adv_pct = st.slider("ADV Participation %", 0.01, 0.30, float(DEFAULT_ADV_PCT), 0.01)
factor_window = st.slider("Factor Beta Window (days)", 20, 252, 63, 7, help="Lookback period for calculating rolling factor betas. Example: 63 days = 3 months.")
vol_window = st.slider("Regime Vol Window (days)", 10, 60, 21, 5, help="Lookback period for calculating realized volatility regimes. Example: 21 days = 1 month.")
adv_pct = st.slider("ADV Participation %", 0.01, 0.30, float(DEFAULT_ADV_PCT), 0.01, help="Maximum participation rate of Average Daily Volume. Example: 0.10 = 10% of ADV.")

st.markdown("---")
st.subheader("4. Research Rigor")
Expand All @@ -185,19 +185,19 @@ def get_cache_key(*args) -> str:
st.info("Using full-sample quantiles (exploratory mode)")

vol_q_high = st.slider(
"High Volatility Quantile", 0.5, 0.95, DEFAULT_VOL_QUANTILE_HIGH, 0.05
"High Volatility Quantile", 0.5, 0.95, DEFAULT_VOL_QUANTILE_HIGH, 0.05, help="Threshold for classifying high volatility days. Example: 0.75 means the top 25% most volatile days."
)

if mode == "Single-Asset":
st.subheader("5. Backtest Settings")
bt_cost = st.number_input("Transaction Cost (bps)", value=DEFAULT_COST_BPS, step=1) / 10000
bt_cost = st.number_input("Transaction Cost (bps)", value=DEFAULT_COST_BPS, step=1, help="Trading friction applied per transaction. Example: 10 bps = 0.10%.") / 10000
allow_short = st.checkbox("Allow Short Selling?", value=False)
else:
st.subheader("5. Alert Thresholds")
dd_alert = st.slider("Max Drawdown Alert", -0.6, -0.05, -0.2, 0.05)
vol_alert = st.slider("Volatility Alert (ann.)", 0.1, 1.0, 0.35, 0.05)
beta_alert = st.slider("Beta Alert", 0.5, 2.0, 1.3, 0.1)
dttl_alert = st.slider("Days-to-Liquidate Alert", 1.0, 20.0, 5.0, 1.0)
dd_alert = st.slider("Max Drawdown Alert", -0.6, -0.05, -0.2, 0.05, help="Triggers an alert if portfolio drawdown exceeds this level. Example: -0.20 = 20% decline.")
vol_alert = st.slider("Volatility Alert (ann.)", 0.1, 1.0, 0.35, 0.05, help="Triggers an alert if annualized volatility exceeds this level. Example: 0.35 = 35% vol.")
beta_alert = st.slider("Beta Alert", 0.5, 2.0, 1.3, 0.1, help="Triggers an alert if portfolio beta exceeds this level. Example: 1.30 = 30% more volatile than benchmark.")
dttl_alert = st.slider("Days-to-Liquidate Alert", 1.0, 20.0, 5.0, 1.0, help="Triggers an alert if days required to liquidate the portfolio exceeds this level. Example: 5.0 = 5 trading days.")


# --- Portfolio Mode ---
Expand Down Expand Up @@ -650,9 +650,9 @@ def get_cache_key(*args) -> str:
# --- Regime Detection ---
# Using 21-day annualized vol with option for out-of-sample analysis
df = signals.detect_volatility_regime(
df,
vol_col='Vol_21d',
quantile_high=vol_q_high,
df,
vol_col='Vol_21d',
quantile_high=vol_q_high,
quantile_low=0.25,
use_expanding=use_oos # Toggle between in-sample and out-of-sample
)
Expand Down Expand Up @@ -729,15 +729,15 @@ def get_cache_key(*args) -> str:
fig = go.Figure()
fig.add_trace(go.Scatter(x=df.index, y=df['Close'], name='Close Price', line=dict(color='white', width=1)))
fig.add_trace(go.Scatter(x=df.index, y=df[f'SMA_{sma_window}'], name=f'{sma_window}-Day SMA', line=dict(color='#ff9f43', width=1)))

# Highlight High Volatility Regimes
# Filter high vol periods
high_vol_mask = df['Vol_Regime'] == 'High'
# We can plot markers or shade areas. Shading is valid but tricky in Plotly without shapes list.
# Let's plot points
high_vol_pts = df[high_vol_mask]
fig.add_trace(go.Scatter(x=high_vol_pts.index, y=high_vol_pts['Close'], mode='markers', name='High Volatility', marker=dict(color='red', size=2)))

fig.update_layout(
title=f"{ticker} Price History & Regime Context",
yaxis_title="Price ($)",
Expand All @@ -751,21 +751,21 @@ def get_cache_key(*args) -> str:
# --- TAB 2: REGIME ANALYSIS ---
with tab_regime:
st.subheader("Volatility Regime Classification")

c1, c2 = st.columns(2)
with c1:
# Scatter: Vol vs Returns needed? Maybe just distribution
fig_hist = px.histogram(df, x="Vol_21d", color="Vol_Regime", nbins=50, title="Volatility Distribution", template="plotly_dark",
color_discrete_map={"High": "#ff4b4b", "Low": "#00ff00", "Normal": "#888888"})
st.plotly_chart(fig_hist, use_container_width=True)

with c2:
# Pie chart of time spent in regimes
regime_counts = df['Vol_Regime'].value_counts()
fig_pie = px.pie(values=regime_counts, names=regime_counts.index, title="Time Spent in Regimes", template="plotly_dark",
color=regime_counts.index, color_discrete_map={"High": "#ff4b4b", "Low": "#00ff00", "Normal": "#888888"})
st.plotly_chart(fig_pie, use_container_width=True)

st.markdown("### Regime Characteristics")
stats = df.groupby('Vol_Regime')[['Daily_Return', 'Vol_21d']].mean()
# Annualize return
Expand Down Expand Up @@ -828,57 +828,57 @@ def get_cache_key(*args) -> str:
# --- TAB 3: BACKTEST ---
with tab_bt:
st.subheader("Strategy Simulation")

# Out-of-sample mode indicator
if use_oos:
st.success("🔬 **Out-of-Sample Mode Active** - Regime classification uses only past data at each point")

if not res_df.empty:

# 1. Global Metrics with Bootstrap CI
strat_metrics = backtester.calculate_perf_metrics(
res_df['Equity_Strategy'],
res_df['Equity_Strategy'],
include_bootstrap_ci=True,
n_bootstrap=500
)
bench_metrics = backtester.calculate_perf_metrics(res_df['Equity_Benchmark'])

col_m1, col_m2, col_m3, col_m4 = st.columns(4)
col_m1.metric("Global CAGR", f"{strat_metrics['CAGR']:.2%}")

# Show Sharpe with CI if available
sharpe_display = f"{strat_metrics['Sharpe']:.2f}"
if strat_metrics.get('Sharpe_CI_Lower') is not None:
sharpe_display += f" [{strat_metrics['Sharpe_CI_Lower']:.2f}, {strat_metrics['Sharpe_CI_Upper']:.2f}]"
col_m2.metric("Sharpe (95% CI)", sharpe_display)

col_m3.metric("Max Drawdown", f"{strat_metrics['MaxDD']:.2%}")
col_m4.metric("Max DD Duration", f"{strat_metrics.get('MaxDD_Duration', 0)} days")

# Additional metrics row
col_a1, col_a2, col_a3, col_a4 = st.columns(4)
col_a1.metric("Sortino", f"{strat_metrics.get('Sortino', 0):.2f}")
col_a2.metric("Calmar", f"{strat_metrics.get('Calmar', 0):.2f}")
col_a3.metric("Win Rate", f"{strat_metrics.get('WinRate', 0):.1%}")
col_a4.metric("Avg DD Duration", f"{strat_metrics.get('AvgDD_Duration', 0):.0f} days")

# 2. Equity Curve
fig_eq = go.Figure()
fig_eq.add_trace(go.Scatter(x=res_df.index, y=res_df['Equity_Strategy'], name='Trend Strategy', line=dict(color='#00ff00')))
fig_eq.add_trace(go.Scatter(x=res_df.index, y=res_df['Equity_Benchmark'], name='Buy & Hold', line=dict(color='gray', dash='dot')))
fig_eq.update_layout(title="Equity Curve", template="plotly_dark", height=400)
st.plotly_chart(fig_eq, use_container_width=True)

# 3. Drawdown Chart
with st.expander("📉 Drawdown Analysis", expanded=False):
fig_dd = go.Figure()
fig_dd.add_trace(go.Scatter(
x=res_df.index, y=res_df['DD_Strategy'] * 100,
x=res_df.index, y=res_df['DD_Strategy'] * 100,
name='Strategy Drawdown', fill='tozeroy',
line=dict(color='#ff4b4b')
))
fig_dd.add_trace(go.Scatter(
x=res_df.index, y=res_df['DD_Benchmark'] * 100,
x=res_df.index, y=res_df['DD_Benchmark'] * 100,
name='Benchmark Drawdown',
line=dict(color='gray', dash='dot')
))
Expand All @@ -889,36 +889,36 @@ def get_cache_key(*args) -> str:
height=300
)
st.plotly_chart(fig_dd, use_container_width=True)

# 4. Conditional Analysis
st.markdown("### 🔬 Conditional Performance by Regime")
st.info("Does the strategy outperform during High Volatility?")

# Merge
comparison = pd.concat([cond_stats.add_suffix('_Strat'), bench_cond.add_suffix('_Bench')], axis=1)

# Reorder columns - handle missing columns gracefully
available_cols = []
for col in ['Ann_Return_Strat', 'Ann_Return_Bench', 'Sharpe_Strat', 'Sharpe_Bench', 'WinRate_Strat']:
if col in comparison.columns:
available_cols.append(col)
comparison = comparison[available_cols]

st.dataframe(comparison.style.background_gradient(cmap='RdYlGn', subset=['Ann_Return_Strat', 'Sharpe_Strat']).format("{:.2f}"))

st.markdown("**Key Insight:** Compare 'Sharpe_Strat' vs 'Sharpe_Bench' in the **High** volatility row.")

# 5. Walk-Forward Validation (Advanced)
with st.expander("🚀 Walk-Forward Validation (Advanced)", expanded=False):
st.markdown("""
Walk-forward validation splits data into rolling train/test windows to evaluate
out-of-sample performance. This is more rigorous than a single full-sample backtest.
""")

wf_col1, wf_col2 = st.columns(2)
wf_train = wf_col1.number_input("Training Window (months)", value=24, min_value=6, max_value=60)
wf_test = wf_col2.number_input("Test Window (months)", value=6, min_value=1, max_value=12)

if st.button("Run Walk-Forward Analysis"):
with st.spinner("Running walk-forward validation..."):
wf_results = backtester.walk_forward_backtest(
Expand All @@ -928,16 +928,16 @@ def get_cache_key(*args) -> str:
cost_bps=bt_cost,
rebalance_freq='M'
)

if wf_results:
st.success(f"✅ Completed {wf_results['n_periods']} walk-forward periods")

wf_summary = wf_results['summary']
wf_c1, wf_c2, wf_c3 = st.columns(3)
wf_c1.metric("OOS CAGR", f"{wf_summary.get('CAGR', 0):.2%}")
wf_c2.metric("OOS Sharpe", f"{wf_summary.get('Sharpe', 0):.2f}")
wf_c3.metric("OOS Max DD", f"{wf_summary.get('MaxDD', 0):.2%}")

# Show per-period results
st.markdown("#### Per-Period Results")
period_data = []
Expand All @@ -959,11 +959,11 @@ def get_cache_key(*args) -> str:
# --- TAB 4: REPORT ---
with tab_rep:
st.subheader("Research Note Generation")

st.markdown("### Findings Summary")
st.write(f"**Asset**: {ticker}")
st.write(f"**Trend Model**: {sma_window}-Day SMA")

if not res_df.empty:
# Create text summary
high_vol_perf = cond_stats.loc['High', 'Sharpe'] if 'High' in cond_stats.index else 0
Expand All @@ -978,13 +978,13 @@ def get_cache_key(*args) -> str:
sweep_std = sweep_df.groupby("Regime")["Sharpe"].std().dropna()
if not sweep_std.empty:
sweep_stability = ", ".join([f"{k}: {v:.2f}" for k, v in sweep_std.items()])

st.success(f"Strategy Sharpe in High Vol: **{high_vol_perf:.2f}**")
st.info(f"Strategy Sharpe in Normal Vol: **{normal_vol_perf:.2f}**")
st.write(f"**Regime Sensitivity (Sharpe High - Normal)**: {regime_sensitivity.get('Sharpe_Diff', np.nan):.2f}")
st.write(f"**Top Transition Risk**: {transition_risk}")
st.write(f"**Sweep Stability (Sharpe Std)**: {sweep_stability}")

st.download_button(
label="Download Full Research Data (CSV)",
data=res_df.to_csv().encode('utf-8'),
Expand Down
Loading