Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 1 addition & 3 deletions kalibr/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def call_openai(prompt):
from .feedback import (
KalibrFeedback, track_run, user_rejected, user_accepted, get_feedback,
classify_satisfaction, classify_satisfaction_async, emit_signal,
report_pipeline, report_user_turn, report_action, request_feedback, submit_feedback,
report_pipeline, report_user_turn, report_action,
)

if os.getenv("KALIBR_AUTO_INSTRUMENT", "true").lower() == "true":
Expand Down Expand Up @@ -204,6 +204,4 @@ def call_openai(prompt):
"report_pipeline",
"report_user_turn",
"report_action",
"request_feedback",
"submit_feedback",
]
72 changes: 0 additions & 72 deletions kalibr/feedback.py
Original file line number Diff line number Diff line change
Expand Up @@ -658,78 +658,6 @@ def _send():
logger.warning("report_action failed: %s", e)


_DIMENSION_DEFS = {
"length": {"label": "Length", "options": [-1, 0, 1], "labels": ["Too long", "Good", "Too short"]},
"tone": {"label": "Tone", "options": [-1, 0, 1], "labels": ["Too casual", "Good", "Too formal"]},
"content": {"label": "Content", "options": [-1, 0, 1], "labels": ["Off-topic", "Good", "Missing info"]},
"factuality": {"label": "Factuality", "options": [-1, 0, 1], "labels": ["Inaccurate", "Good", "Unsure"]},
"format": {"label": "Format", "options": [-1, 0, 1], "labels": ["Poor format", "Good", "Needs restructuring"]},
}


def request_feedback(
dimensions: Optional[list] = None,
) -> dict:
"""
SDK-only. Returns a structured feedback form dict the developer renders
to their users. No network call.
"""
if dimensions is None:
dimensions = ["length", "tone", "content", "factuality", "format"]
return {
"question": "How was this response?",
"dimensions": {
d: _DIMENSION_DEFS.get(d, {"label": d.title(), "options": [-1, 0, 1], "labels": ["Bad", "Good", "Needs work"]})
for d in dimensions
},
}


def submit_feedback(session_id: str, ratings: dict) -> None:
"""
Submit explicit user feedback. Writes ONE signal row PER dimension
with a non-zero rating. Fire-and-forget.

ratings: {"length": -1, "tone": 0, "content": 1, ...}
Values: -1 (negative), 0 (neutral — skipped), 1 (positive)
"""
try:
session = _read_session(session_id)
if session is None:
return

api_key, tenant_id, base_url = _fb_config()
if not api_key or not tenant_id:
return

trace_id = session.get("trace_id", "")
goal_val = session.get("goal", "")

def _send_all():
try:
for dimension, rating in ratings.items():
if rating == 0:
continue
strength = 0.0 if rating == -1 else 1.0
payload = {
"trace_id": trace_id,
"signal_type": "explicit_feedback",
"signal_source": "explicit_feedback",
"strength": strength,
"confidence": 1.0,
"dimension": dimension,
"goal": goal_val,
"session_id": session_id,
}
_emit_signal_http(base_url, api_key, tenant_id, payload)
except Exception:
pass

threading.Thread(target=_send_all, daemon=True).start()
except Exception as e:
logger.warning("submit_feedback failed: %s", e)


def emit_signal(
signal_type: str,
strength: float = 0.5,
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"

[project]
name = "kalibr"
version = "1.12.0"
version = "1.12.1"
description = "Outcome-aware LLM routing for production AI agents. Routes between models, tools, and parameters based on real success signals using Thompson Sampling. Automatic fallback, cost optimization, and continuous learning — no redeploy required."
authors = [{name = "Kalibr Team", email = "support@kalibr.systems"}]
readme = "README.md"
Expand Down
Loading