Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 16 additions & 3 deletions packages/backend/app/routes/categories.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from flask_jwt_extended import jwt_required, get_jwt_identity
from ..extensions import db
from ..models import Category
from ..services.cache import TTL, cache_delete, cache_get, cache_set, categories_key

bp = Blueprint("categories", __name__)
logger = logging.getLogger("finmind.categories")
Expand All @@ -12,11 +13,21 @@
@jwt_required()
def list_categories():
uid = int(get_jwt_identity())
key = categories_key(uid)

# Categories are static — cache for 1 hour, invalidate on write
cached = cache_get(key)
if cached is not None:
logger.debug("categories cache HIT user=%s", uid)
return jsonify(cached)

items = (
db.session.query(Category).filter_by(user_id=uid).order_by(Category.name).all()
)
logger.info("List categories for user=%s count=%s", uid, len(items))
return jsonify([{"id": c.id, "name": c.name} for c in items])
result = [{"id": c.id, "name": c.name} for c in items]
cache_set(key, result, ttl_seconds=TTL.STATIC)
logger.info("List categories user=%s count=%s (cache MISS)", uid, len(items))
return jsonify(result)


@bp.post("")
Expand All @@ -28,13 +39,13 @@ def create_category():
if not name:
logger.warning("Create category missing name user=%s", uid)
return jsonify(error="name required"), 400
# Optional: enforce unique name per user
exists = db.session.query(Category).filter_by(user_id=uid, name=name).first()
if exists:
return jsonify(error="category already exists"), 409
c = Category(user_id=uid, name=name)
db.session.add(c)
db.session.commit()
cache_delete(categories_key(uid)) # invalidate
logger.info("Created category id=%s user=%s", c.id, uid)
return jsonify(id=c.id, name=c.name), 201

Expand All @@ -52,6 +63,7 @@ def update_category(category_id: int):
return jsonify(error="name required"), 400
c.name = name
db.session.commit()
cache_delete(categories_key(uid)) # invalidate
logger.info("Updated category id=%s user=%s", c.id, uid)
return jsonify(id=c.id, name=c.name)

Expand All @@ -65,5 +77,6 @@ def delete_category(category_id: int):
return jsonify(error="not found"), 404
db.session.delete(c)
db.session.commit()
cache_delete(categories_key(uid)) # invalidate
logger.info("Deleted category id=%s user=%s", c.id, uid)
return jsonify(message="deleted")
42 changes: 41 additions & 1 deletion packages/backend/app/routes/insights.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@
from flask import Blueprint, jsonify, request
from flask_jwt_extended import jwt_required, get_jwt_identity
from ..services.ai import monthly_budget_suggestion
from ..services.cache import (
TTL, budget_suggestion_key, cache_get, cache_set, get_cache_stats, reset_cache_stats
)
import logging

bp = Blueprint("insights", __name__)
Expand All @@ -13,6 +16,14 @@
def budget_suggestion():
uid = int(get_jwt_identity())
ym = (request.args.get("month") or date.today().strftime("%Y-%m")).strip()

# AI results are expensive — cache for 30 minutes
key = budget_suggestion_key(uid, ym)
cached = cache_get(key)
if cached is not None:
logger.info("Budget suggestion cache HIT user=%s month=%s", uid, ym)
return jsonify(cached)

user_gemini_key = (request.headers.get("X-Gemini-Api-Key") or "").strip() or None
persona = (request.headers.get("X-Insight-Persona") or "").strip() or None
suggestion = monthly_budget_suggestion(
Expand All @@ -21,5 +32,34 @@ def budget_suggestion():
gemini_api_key=user_gemini_key,
persona=persona,
)
logger.info("Budget suggestion served user=%s month=%s", uid, ym)
cache_set(key, suggestion, ttl_seconds=TTL.AI_RESULT)
logger.info("Budget suggestion served user=%s month=%s (cache MISS)", uid, ym)
return jsonify(suggestion)


@bp.get("/cache-stats")
@jwt_required()
def cache_stats():
"""Return cache hit/miss stats and Redis memory usage.

Access policy (deliberate): open to all authenticated users, no admin
guard. The data exposed (hit/miss counters, Redis memory usage) is
operational telemetry with no PII; any logged-in user can consult it for
debugging their own session behaviour. If the deployment requires
restricting this to admin roles, add a role check here (e.g.
``if get_jwt_identity_claims().get('role') != 'ADMIN': abort(403)``).
"""
return jsonify(get_cache_stats())


@bp.delete("/cache-stats")
@jwt_required()
def clear_cache_stats():
"""Reset cache hit/miss counters.

Same access policy as GET /cache-stats: open to all authenticated users.
Counters are global (not per-user), so any user can reset them — acceptable
for a lightweight monitoring tool. Restrict to admin if needed (see above).
"""
reset_cache_stats()
return jsonify(message="cache stats reset")
225 changes: 198 additions & 27 deletions packages/backend/app/services/cache.py
Original file line number Diff line number Diff line change
@@ -1,47 +1,218 @@
"""
Smart caching strategy for dashboard & analytics (Issue #127).

Architecture:
- Tiered TTLs: static data (long) vs live data (short) vs AI results (medium)
- cache() decorator for route-level caching with auto key generation
- Explicit invalidation on mutations (existing pattern extended)
- Cache stats endpoint for monitoring
- Graceful degradation: cache errors never crash the app
"""

from __future__ import annotations

import functools
import json
from typing import Iterable
import logging
import time
from typing import Any, Callable, Iterable

from flask import request as flask_request
from flask_jwt_extended import get_jwt_identity

from ..extensions import redis_client

logger = logging.getLogger("finmind.cache")


# ── TTL constants ─────────────────────────────────────────────────────────────

class TTL:
"""Tiered TTL strategy: match cache lifetime to data volatility."""
STATIC = 3600 # 1 hour — categories, user prefs
ANALYTICS = 600 # 10 min — monthly summaries, insights
DASHBOARD = 300 # 5 min — dashboard summary
AI_RESULT = 1800 # 30 min — AI budget suggestions (expensive)
SHORT = 60 # 1 min — upcoming bills, reminders count
REALTIME = 0 # no cache


# ── Key builders ─────────────────────────────────────────────────────────────

def monthly_summary_key(user_id: int, ym: str) -> str:
return f"user:{user_id}:monthly_summary:{ym}"


def categories_key(user_id: int) -> str:
return f"user:{user_id}:categories"


def upcoming_bills_key(user_id: int) -> str:
return f"user:{user_id}:upcoming_bills"


def insights_key(user_id: int, ym: str) -> str:
return f"insights:{user_id}:{ym}"


def dashboard_summary_key(user_id: int, ym: str) -> str:
return f"user:{user_id}:dashboard_summary:{ym}"


def cache_set(key: str, value, ttl_seconds: int | None = None):
payload = json.dumps(value)
if ttl_seconds:
redis_client.setex(key, ttl_seconds, payload)
else:
redis_client.set(key, payload)


def cache_get(key: str):
raw = redis_client.get(key)
return json.loads(raw) if raw else None


def cache_delete_patterns(patterns: Iterable[str]):
def budget_suggestion_key(user_id: int, ym: str) -> str:
return f"ai:budget_suggestion:{user_id}:{ym}"


# ── Core cache operations ─────────────────────────────────────────────────────

def cache_set(key: str, value: Any, ttl_seconds: int | None = None) -> bool:
"""Set a cache value. Returns True on success, False on error."""
try:
payload = json.dumps(value)
if ttl_seconds and ttl_seconds > 0:
redis_client.setex(key, ttl_seconds, payload)
else:
redis_client.set(key, payload)
return True
except Exception as exc:
logger.warning("cache_set failed key=%s: %s", key, exc)
return False


def cache_get(key: str) -> Any | None:
"""Get a cache value. Returns None on miss or error."""
try:
raw = redis_client.get(key)
return json.loads(raw) if raw else None
except Exception as exc:
logger.warning("cache_get failed key=%s: %s", key, exc)
return None


def cache_delete(key: str) -> bool:
"""Delete a single cache key."""
try:
redis_client.delete(key)
return True
except Exception as exc:
logger.warning("cache_delete failed key=%s: %s", key, exc)
return False


def cache_delete_patterns(patterns: Iterable[str]) -> int:
"""Delete all keys matching any of the given patterns. Returns count deleted."""
total = 0
for pattern in patterns:
cursor = 0
while True:
cursor, keys = redis_client.scan(cursor=cursor, match=pattern, count=100)
if keys:
redis_client.delete(*keys)
if cursor == 0:
break
try:
cursor = 0
while True:
cursor, keys = redis_client.scan(cursor=cursor, match=pattern, count=100)
if keys:
redis_client.delete(*keys)
total += len(keys)
if cursor == 0:
break
except Exception as exc:
logger.warning("cache_delete_patterns failed pattern=%s: %s", pattern, exc)
return total


# ── Decorator ─────────────────────────────────────────────────────────────────

def cached(key_fn: Callable[..., str], ttl: int = TTL.ANALYTICS):
"""
Route-level caching decorator.

Usage:
@bp.get("/data")
@jwt_required()
@cached(lambda: f"mykey:{get_jwt_identity()}", ttl=TTL.ANALYTICS)
def my_route():
...

key_fn is called at request time so it can access flask context.
If cache is warm, returns cached JSON directly.
On miss: calls the wrapped function and caches its response.
Cache errors never raise — they silently degrade.
"""
def decorator(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
key = None
try:
key = key_fn()
hit = cache_get(key)
if hit is not None:
logger.debug("cache HIT key=%s", key)
_increment_hit()
from flask import jsonify as _jsonify
return _jsonify(hit)
_increment_miss()
except Exception as exc:
logger.warning("cache lookup failed key=%s: %s", key, exc)

result = fn(*args, **kwargs)

try:
if key and result.status_code == 200:
data = result.get_json(force=True)
# get_json returns None when the response body is not valid
# JSON (e.g. empty body, non-JSON content type). Caching
# None would poison the key, causing every subsequent cache
# hit to return a null JSON response instead of calling the
# real handler. Skip caching and serve the original result.
if data is None:
return result
cache_set(key, data, ttl_seconds=ttl)
logger.debug("cache SET key=%s ttl=%s", key, ttl)
except Exception as exc:
logger.warning("cache store failed key=%s: %s", key, exc)

return result
return wrapper
return decorator


# ── Stats ─────────────────────────────────────────────────────────────────────

_STATS_KEY = "cache:stats"


def _increment_hit():
try:
redis_client.hincrby(_STATS_KEY, "hits", 1)
except Exception as exc:
logger.debug("_increment_hit Redis failure (non-critical): %s", exc)


def _increment_miss():
try:
redis_client.hincrby(_STATS_KEY, "misses", 1)
except Exception as exc:
logger.debug("_increment_miss Redis failure (non-critical): %s", exc)


def get_cache_stats() -> dict:
"""Return cache hit/miss counters and Redis info."""
try:
raw = redis_client.hgetall(_STATS_KEY)
# Redis is initialised with decode_responses=True so hgetall returns
# str keys, not bytes. Using b"hits" / b"misses" would always return
# the default 0, making the counters appear permanently zero.
hits = int(raw.get("hits", 0))
misses = int(raw.get("misses", 0))
total = hits + misses
hit_rate = round(hits / total * 100, 1) if total > 0 else 0.0
info = redis_client.info("memory")
return {
"hits": hits,
"misses": misses,
"total_requests": total,
"hit_rate_percent": hit_rate,
"redis_used_memory_human": info.get("used_memory_human", "unknown"),
}
except Exception as exc:
logger.warning("get_cache_stats failed: %s", exc)
return {"error": str(exc)}


def reset_cache_stats() -> None:
try:
redis_client.delete(_STATS_KEY)
except Exception:
pass
Loading