Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
16 commits
Select commit Hold shift + click to select a range
f97f989
fix(log-viewer): improve workflow timeline modal error handling and d…
rnetser Jan 19, 2026
9c74860
fix(log-viewer): add missing context fields to workflow timeline resp…
rnetser Jan 19, 2026
c072bd1
fix(log-viewer): address CodeRabbit review comments for workflow time…
rnetser Jan 19, 2026
7bfa2a5
fix(log-viewer): address CodeRabbit review comments
rnetser Jan 19, 2026
606b4a4
fix(log-viewer): add debug logging for timestamp parse failures
rnetser Jan 19, 2026
2016994
fix(log-viewer): address CodeRabbit review comments
rnetser Jan 19, 2026
d0f50be
fix(log-viewer): address CodeRabbit review comments
rnetser Jan 19, 2026
6ffad81
fix(log-viewer): address CodeRabbit review comments
rnetser Jan 19, 2026
d6d2041
fix(log-viewer): add type validation for pr_info and fix error messag…
rnetser Jan 19, 2026
b7980db
Merge branch 'main' of github.com:myk-org/github-webhook-server into …
rnetser Jan 20, 2026
b67f830
fix(log-viewer): use step_name for log search instead of full message
rnetser Jan 20, 2026
ed6c3c6
feat(log-viewer): add time-based log retrieval for workflow steps
rnetser Jan 20, 2026
0952f57
fix(log-viewer): address CodeRabbit review comments for step-logs
rnetser Jan 20, 2026
3a05fc0
refactor(tests): extract shared fixtures and fail fast on missing tim…
rnetser Jan 21, 2026
1570ba1
fix(tests): remove unused tmp_path parameters from fixtures
rnetser Jan 21, 2026
cdb2902
fix(tests): improve test fixtures to mirror production behavior
rnetser Jan 21, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
74 changes: 74 additions & 0 deletions webhook_server/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
Depends,
FastAPI,
HTTPException,
Path,
Query,
Request,
Response,
Expand Down Expand Up @@ -99,6 +100,47 @@ def require_log_server_enabled() -> None:
)


async def require_trusted_network(request: Request) -> None:
"""Dependency to restrict log viewer access to trusted networks only.

This provides an additional layer of security for log viewer endpoints by
checking if the client IP is from a trusted/private network. Currently checks
for private IP ranges (RFC 1918), loopback, and link-local addresses.

Security Warning:
This check can be bypassed if the server is behind a reverse proxy that
doesn't properly set X-Forwarded-For headers. For production deployments,
always deploy log viewer endpoints on trusted networks (VPN, internal network).
"""
client_host = request.client.host if request.client else None

if not client_host:
# No client IP available - deny access
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Access denied: Unable to determine client IP address.",
)

try:
client_ip = ipaddress.ip_address(client_host)
except ValueError:
# Invalid IP address format - deny access
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Access denied: Invalid client IP address format.",
) from None

# Allow private networks (RFC 1918), loopback, and link-local
is_trusted = client_ip.is_private or client_ip.is_loopback or client_ip.is_link_local

if not is_trusted:
LOGGER.warning(f"Log viewer access denied from untrusted IP: {client_host}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Access denied: Log viewer is only accessible from trusted networks.",
)


@asynccontextmanager
async def lifespan(_app: FastAPI) -> AsyncGenerator[None]:
global _lifespan_http_client
Expand Down Expand Up @@ -1144,6 +1186,38 @@ async def get_workflow_steps(hook_id: str, controller: LogViewerController = con
return await get_workflow_steps_core(controller=controller, hook_id=hook_id)


@FASTAPI_APP.get(
"/logs/api/step-logs/{hook_id}/{step_name}",
operation_id="get_step_logs",
dependencies=[Depends(require_log_server_enabled), Depends(require_trusted_network)],
)
async def get_step_logs(
hook_id: str = Path(..., min_length=1, max_length=100),
step_name: str = Path(..., min_length=1, max_length=100),
controller: LogViewerController = controller_dependency,
) -> dict[str, Any]:
"""Retrieve log entries that occurred during a specific workflow step's execution.

This endpoint provides time-based correlation of log entries with workflow steps,
allowing detailed analysis of what happened during each step's execution window.

Parameters:
- hook_id: GitHub webhook delivery ID
- step_name: Name of the workflow step (e.g., "clone_repository", "webhook_routing")

Returns:
- step: Metadata about the step (name, status, timestamp, duration_ms, error)
- logs: Array of log entries that occurred during the step's execution
- log_count: Number of log entries found

Error Conditions:
- 404: Hook ID not found in logs
- 404: Step name not found in workflow steps for the given hook ID
- 500: Internal server error
"""
return await controller.get_step_logs(hook_id=hook_id, step_name=step_name)


@FASTAPI_APP.websocket("/logs/ws")
async def websocket_log_stream(
websocket: WebSocket,
Expand Down
138 changes: 138 additions & 0 deletions webhook_server/tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
import json
import logging as python_logging
import os
from dataclasses import dataclass
from datetime import datetime, timedelta
from pathlib import Path
from unittest.mock import Mock

import pytest
import yaml
Expand Down Expand Up @@ -200,3 +203,138 @@ def owners_files_test_data():
Uses centralized OWNERS_TEST_DATA constant to ensure consistency.
"""
return {path: yaml.dump(data) for path, data in OWNERS_TEST_DATA.items()}


# === Log Viewer Shared Fixtures ===


@pytest.fixture
def mock_logger():
"""Create a mock logger that mirrors production logger attributes."""
mock = Mock(spec=python_logging.Logger)
mock.name = "webhook_server.tests"
mock.level = python_logging.INFO
return mock


@pytest.fixture
def sample_json_webhook_data() -> dict:
"""Create sample JSON webhook log data with workflow steps.

Used by test_log_viewer.py tests for JSON log parsing and workflow step retrieval.
"""
return {
"hook_id": "test-hook-123",
"event_type": "pull_request",
"action": "opened",
"repository": "org/test-repo",
"sender": "test-user",
"pr": {
"number": 456,
"title": "Test PR",
"url": "https://github.com/org/test-repo/pull/456",
},
"timing": {
"started_at": "2025-01-05T10:00:00.000000Z",
"completed_at": "2025-01-05T10:00:05.000000Z",
"duration_ms": 5000,
},
"workflow_steps": {
"clone_repository": {
"timestamp": "2025-01-05T10:00:01.000000Z",
"status": "completed",
"duration_ms": 1500,
},
"assign_reviewers": {
"timestamp": "2025-01-05T10:00:02.500000Z",
"status": "completed",
"duration_ms": 800,
},
"apply_labels": {
"timestamp": "2025-01-05T10:00:03.500000Z",
"status": "failed",
"duration_ms": 200,
"error": {"type": "ValueError", "message": "Label not found"},
},
},
"token_spend": 35,
"success": False,
"error": {
"type": "TestError",
"message": "Test failure message for unit tests",
},
}


@pytest.fixture
def create_json_log_file():
"""Factory fixture to create test JSON log files.

Returns a callable that accepts log_dir, filename, and entries parameters.
Tests pass their own tmp_path to the returned factory function.

Usage:
def test_example(create_json_log_file, tmp_path):
log_dir = tmp_path / "logs"
log_dir.mkdir()
create_json_log_file(log_dir, "webhooks_2025-01-05.json", [entry_dict])
"""

def _create_json_log_file(log_dir: Path, filename: str, entries: list[dict]) -> Path:
"""Create a test JSON log file with entries in JSONL format.

The log viewer expects JSONL format (JSON Lines): one compact JSON object per line.
This matches production behavior where each webhook log entry is written as a single
line for efficient streaming and parsing.

Args:
log_dir: Directory to create the log file in
filename: Name of the log file
entries: List of JSON webhook data dictionaries

Returns:
Path to created log file
"""
log_file = log_dir / filename
with open(log_file, "w", encoding="utf-8") as f:
for entry in entries:
# JSONL format: one compact JSON object per line (no indentation)
# This matches production log format and log_viewer._stream_json_log_entries()
f.write(json.dumps(entry) + "\n")
return log_file

return _create_json_log_file


@pytest.fixture
def create_text_log_file():
"""Factory fixture to create test text log files.

Returns a callable that accepts log_dir, filename, and log_lines parameters.
Tests pass their own tmp_path to the returned factory function.

Usage:
def test_example(create_text_log_file, tmp_path):
log_dir = tmp_path / "logs"
log_dir.mkdir()
create_text_log_file(log_dir, "webhook-server.log", ["line1", "line2"])
"""

def _create_text_log_file(log_dir: Path, filename: str, log_lines: list[str]) -> Path:
"""Create a test text log file with log lines.

Args:
log_dir: Directory to create the log file in
filename: Name of the log file
log_lines: List of log line strings

Returns:
Path to created log file
"""
log_file = log_dir / filename
with open(log_file, "w", encoding="utf-8") as f:
for line in log_lines:
f.write(line + "\n")
return log_file

return _create_text_log_file
Loading