diff --git a/.claude/skills/audit-less/SKILL.md b/.claude/skills/audit-less/SKILL.md
new file mode 100644
index 0000000..8787334
--- /dev/null
+++ b/.claude/skills/audit-less/SKILL.md
@@ -0,0 +1,57 @@
+---
+name: audit-less
+description: Scan LESS stylesheets for variable errors — undefined variables, CSS custom properties passed to compile-time functions (fade, darken, lighten, etc.), and incorrect variable name patterns. Reports issues with file, line, and fix.
+argument-hint: "[file-or-directory]"
+---
+
+Audit LESS stylesheets in ServerKit for recurring build-breaking patterns.
+Scope: **${ARGUMENTS:-frontend/src/styles/}**
+
+## What to Scan For
+
+### Pattern 1: CSS custom properties in LESS compile-time functions
+
+LESS functions like `fade()`, `darken()`, `lighten()`, `saturate()`, `spin()`, `mix()` require **real color values** at compile time. Variables defined as `var(--something)` will fail.
+
+Search for calls to these functions and check if any argument is a variable that resolves to a CSS custom property.
+
+**Broken** — these variables use `var(--...)` and cannot be evaluated by LESS:
+- `@bg-body`, `@bg-sidebar`, `@bg-card`, `@bg-hover`, `@bg-elevated`, `@bg-secondary`, `@bg-tertiary`
+- `@border-default`, `@border-subtle`, `@border-active`, `@border-hover`
+- `@text-primary`, `@text-secondary`, `@text-tertiary`
+- `@accent-primary`, `@accent-hover`, `@accent-glow`, `@accent-shadow`
+- `@shadow-sm`, `@shadow-md`, `@shadow-lg`
+- `@color-primary`
+
+**Fix**: Use the corresponding `*-raw` variant instead (e.g., `@bg-hover` → `@bg-hover-raw`, `@text-tertiary` → `@text-tertiary-raw`, `@accent-primary` → `@accent-primary-raw`).
+
+### Pattern 2: Undefined or misspelled variables
+
+Check for variables that don't exist in `_variables.less`. Common mistakes:
+- `@card-bg` → should be `@bg-card`
+- `@accent-success` → should be `@success`
+- `@accent-danger` → should be `@danger`
+- `@accent-info` → should be `@info`
+- `@accent-warning` → should be `@warning`
+- `@primary-color` → should be `@accent-primary` or `@accent-primary-raw`
+- `@spacing-*` → should be `@space-*`
+
+### Pattern 3: Non-raw variables in theme-sensitive contexts
+
+For any LESS function that manipulates color values (fade, darken, lighten, contrast, saturate, desaturate, spin, mix, tint, shade), the argument MUST be a raw hex/rgb value or a `*-raw` variable.
+
+## Reference: Valid Variable Names
+
+Read `frontend/src/styles/_variables.less` to get the authoritative list of defined variables. Any `@variable` used in a `.less` file that is not in `_variables.less` (and is not a local variable or LESS built-in) is a bug.
+
+## Output Format
+
+For each issue found, report:
+```
+[FILE]:[LINE] — [ISSUE]
+ Found: [problematic code]
+ Fix: [corrected code]
+```
+
+At the end, provide a summary count: `X issues found across Y files`.
+If no issues are found, report: `No LESS variable issues found.`
diff --git a/.claude/skills/create-pr/SKILL.md b/.claude/skills/create-pr/SKILL.md
index bec9f74..2a6a5ae 100644
--- a/.claude/skills/create-pr/SKILL.md
+++ b/.claude/skills/create-pr/SKILL.md
@@ -103,6 +103,13 @@ Omit the Highlights section entirely for internal-only PRs — don't force it.
- Bullets should describe the mechanism, not just the intent. "Race condition in `get_or_create_chat` fixed by moving creation inside the lookup session" is good. "Fix database issues" is not.
- Group related changes together (all typing fixes, all security hardening, all API changes, etc.)
+#### Contributors
+- If the PR includes commits from multiple authors (not just the repo owner), add a **Contributors** section after the summary and before Highlights.
+- Use `git log main..HEAD --format='%aN <%aE>' | sort -u` to find unique commit authors.
+- Exclude bot accounts (e.g., `github-actions[bot]`).
+- Format: `@username` if their GitHub handle is available (check the ARGUMENTS or commit metadata), otherwise use their name. Add a brief note about what they contributed if it's clear from the commits.
+- Keep it short — one line per contributor, no need for a full changelog.
+
#### General
- **No test plan section.** Do not include "Test plan" or "Testing".
- **No mention of tests.** Do not reference test files, test results, or testing.
diff --git a/Dockerfile b/Dockerfile
index 42e6013..eb2faa2 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -62,8 +62,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
RUN groupadd -r serverkit && useradd -r -g serverkit serverkit
# Create necessary directories
-RUN mkdir -p /etc/serverkit /var/log/serverkit /var/quarantine \
- && chown -R serverkit:serverkit /etc/serverkit /var/log/serverkit /var/quarantine
+RUN mkdir -p /etc/serverkit /var/log/serverkit /var/quarantine /var/backups/serverkit \
+ && chown -R serverkit:serverkit /etc/serverkit /var/log/serverkit /var/quarantine /var/backups/serverkit
# Set working directory
WORKDIR /app
diff --git a/README.md b/README.md
index f3f53c2..6aec4e5 100644
--- a/README.md
+++ b/README.md
@@ -18,6 +18,7 @@ English | [Español](docs/README.es.md) | [中文版](docs/README.zh-CN.md) | [P

[](https://discord.gg/ZKk6tkCQfG)
+[](https://github.com/jhd3197/ServerKit/stargazers)
[](LICENSE)
[](https://python.org)
[](https://reactjs.org)
@@ -41,55 +42,89 @@ English | [Español](docs/README.es.md) | [中文版](docs/README.zh-CN.md) | [P
## 🎯 Features
-### Apps & Containers
+### 🚀 Apps & Deployment
-🐘 **PHP / WordPress** — PHP-FPM 8.x with one-click WordPress installation
+**PHP / WordPress** — PHP-FPM 8.x with one-click WordPress installation
-🐍 **Python Apps** — Deploy Flask and Django with Gunicorn
+**Python Apps** — Deploy Flask and Django with Gunicorn
-🟢 **Node.js** — PM2-managed applications with log streaming
+**Node.js** — PM2-managed applications with log streaming
-🐳 **Docker** — Full container and Docker Compose management
+**Docker** — Full container and Docker Compose management
-🔑 **Environment Variables** — Secure, encrypted per-app variable management
+**Environment Variables** — Secure, encrypted per-app variable management
-### Infrastructure
+**Git Deployment** — GitHub/GitLab webhooks, auto-deploy on push, branch selection, rollback, zero-downtime deployments
-🌐 **Domain Management** — Nginx virtual hosts with easy configuration
+### 🏗️ Infrastructure
-🔒 **SSL Certificates** — Automatic Let's Encrypt with auto-renewal
+**Domain Management** — Nginx virtual hosts with easy configuration
-🗄️ **Databases** — MySQL/MariaDB and PostgreSQL support
+**SSL Certificates** — Automatic Let's Encrypt with auto-renewal
-🛡️ **Firewall (UFW)** — Visual firewall rule management
+**Databases** — MySQL/MariaDB and PostgreSQL with user management and query interface
-⏰ **Cron Jobs** — Schedule tasks with a visual editor
+**Firewall** — UFW/firewalld with visual rule management and port presets
-📁 **File Manager** — Browse and edit files via web interface
+**Cron Jobs** — Schedule tasks with a visual editor
-📡 **FTP Server** — Manage vsftpd users and access
+**File Manager** — Browse, edit, upload, and download files via web interface
-### Security
+**FTP Server** — Manage vsftpd users and access
-🔐 **Two-Factor Auth** — TOTP-based with backup codes
+**Backup & Restore** — Automated backups to S3, Backblaze B2, or local storage with scheduling, retention policies, and one-click restore
-🦠 **Malware Scanning** — ClamAV integration with quarantine
+**Email Server** — Postfix + Dovecot with DKIM/SPF/DMARC, SpamAssassin, Roundcube webmail, email forwarding rules
-📋 **File Integrity Monitoring** — Detect unauthorized file changes
+### 🔒 Security
-🚨 **Security Alerts** — Real-time threat notifications
+**Two-Factor Auth** — TOTP-based with backup codes
-🧱 **Fail2ban & SSH** — Brute force protection and SSH key management
+**Malware Scanning** — ClamAV integration with quarantine
-### Monitoring & Alerts
+**File Integrity Monitoring** — Detect unauthorized file changes
-📊 **Real-time Metrics** — CPU, RAM, disk, network monitoring via WebSocket
+**Fail2ban & SSH** — Brute force protection, SSH key management, IP allowlist/blocklist
-📈 **Uptime Tracking** — Historical server uptime data
+**Vulnerability Scanning** — Lynis security audits with reports and recommendations
-🔔 **Notifications** — Discord, Slack, Telegram, and generic webhooks
+**Automatic Updates** — unattended-upgrades / dnf-automatic for OS-level patching
-🖥️ **Multi-Server** — Agent-based remote server monitoring and management
+### 🖥️ Multi-Server Management
+
+**Agent-Based Architecture** — Go agent with HMAC-SHA256 authentication and real-time WebSocket gateway
+
+**Fleet Overview** — Centralized dashboard with server grouping, tagging, and health monitoring
+
+**Remote Docker** — Manage containers, images, volumes, networks, and Compose projects across all servers
+
+**API Key Rotation** — Secure credential rotation with acknowledgment handshake
+
+**Cross-Server Metrics** — Historical metrics with comparison charts and retention policies
+
+### 📊 Monitoring & Alerts
+
+**Real-time Metrics** — CPU, RAM, disk, network monitoring via WebSocket
+
+**Uptime Tracking** — Historical server uptime data and visualization
+
+**Notifications** — Discord, Slack, Telegram, email (HTML templates), and generic webhooks
+
+**Per-User Preferences** — Individual notification channels, severity filters, and quiet hours
+
+### 👥 Team & Access Control
+
+**Multi-User** — Admin, developer, and viewer roles with team invitations
+
+**RBAC** — Granular per-feature permissions (read/write per module)
+
+**SSO & OAuth** — Google, GitHub, OpenID Connect, and SAML 2.0 with account linking
+
+**Audit Logging** — Track all user actions with detailed activity dashboard
+
+**API Keys** — Tiered API keys (standard/elevated/unlimited) with rate limiting, usage analytics, and OpenAPI documentation
+
+**Webhook Subscriptions** — Event-driven webhooks with HMAC signatures, retry logic, and custom headers
---
@@ -215,15 +250,23 @@ See the [Installation Guide](docs/INSTALLATION.md) for step-by-step instructions
- [x] Databases — MySQL, PostgreSQL
- [x] File & FTP management
- [x] Monitoring & alerts — Metrics, webhooks, uptime tracking
-- [x] Security — 2FA, ClamAV, file integrity, Fail2ban
-- [x] Firewall — UFW integration
+- [x] Security — 2FA, ClamAV, file integrity, Fail2ban, Lynis
+- [x] Firewall — UFW/firewalld integration
- [x] Multi-server management — Go agent, centralized dashboard
- [x] Git deployment — Webhooks, auto-deploy, rollback, zero-downtime
-- [ ] Backup & restore — S3, Backblaze B2, scheduled backups
-- [ ] Email server — Postfix, Dovecot, DKIM/SPF/DMARC
-- [ ] Team & permissions — RBAC, audit logging
-- [ ] Mobile app — React Native with push notifications
-- [ ] Plugin marketplace — Extensions, custom widgets, themes
+- [x] Backup & restore — S3, Backblaze B2, scheduled backups
+- [x] Email server — Postfix, Dovecot, DKIM/SPF/DMARC, Roundcube
+- [x] Team & permissions — RBAC, invitations, audit logging
+- [x] API enhancements — API keys, rate limiting, OpenAPI docs, webhook subscriptions
+- [x] SSO & OAuth — Google, GitHub, OIDC, SAML
+- [x] Database migrations — Flask-Migrate/Alembic, versioned schema
+- [ ] Agent fleet management — Auto-upgrade, bulk ops, offline command queue
+- [ ] Cross-server monitoring — Fleet dashboard, anomaly detection, alerting
+- [ ] Agent plugin system — Extensible agent with custom metrics, commands, health checks
+- [ ] Server templates & config sync — Drift detection, compliance dashboards
+- [ ] Multi-tenancy — Workspaces, team isolation, per-workspace settings
+- [ ] DNS zone management — Cloudflare, Route53, DigitalOcean integrations
+- [ ] Status pages — Public status page, health checks, incident management
Full details: [ROADMAP.md](ROADMAP.md)
@@ -246,12 +289,15 @@ Full details: [ROADMAP.md](ROADMAP.md)
| Layer | Technology |
|-------|------------|
-| Backend | Python 3.11, Flask, SQLAlchemy, Flask-SocketIO |
-| Frontend | React 18, Vite, LESS |
+| Backend | Python 3.11, Flask, SQLAlchemy, Flask-SocketIO, Flask-Migrate |
+| Frontend | React 18, Vite, LESS, Recharts |
| Database | SQLite / PostgreSQL |
-| Web Server | Nginx, Gunicorn |
+| Web Server | Nginx, Gunicorn (GeventWebSocket) |
| Containers | Docker, Docker Compose |
-| Security | ClamAV, TOTP (pyotp), Cryptography |
+| Security | ClamAV, Lynis, Fail2ban, TOTP (pyotp), Fernet encryption |
+| Auth | JWT, OAuth 2.0, OIDC, SAML 2.0 |
+| Email | Postfix, Dovecot, SpamAssassin, Roundcube |
+| Agent | Go (multi-server), HMAC-SHA256, WebSocket |
---
@@ -263,7 +309,7 @@ Contributions are welcome! Please read [CONTRIBUTING.md](CONTRIBUTING.md) first.
fork → feature branch → commit → push → pull request
```
-**Priority areas:** Backup implementations, additional notification channels, UI/UX improvements, documentation.
+**Priority areas:** Agent plugin system, fleet management, DNS integrations, status pages, UI/UX improvements, documentation.
---
@@ -275,6 +321,12 @@ Join the Discord to ask questions, share feedback, or get help with your setup.
---
+## ⭐ Star History
+
+[](https://star-history.com/#jhd3197/ServerKit&Date)
+
+---
+
**ServerKit** — Simple. Modern. Self-hosted.
diff --git a/ROADMAP.md b/ROADMAP.md
index f91f8e4..0d3c7e9 100644
--- a/ROADMAP.md
+++ b/ROADMAP.md
@@ -4,17 +4,15 @@ This document outlines the development roadmap for ServerKit. Features are organ
---
-## Current Version: v0.9.0
+## Current Version: v1.5.0 (In Development)
-### Recently Completed
+### Recently Completed (v1.4.0)
-- **Two-Factor Authentication (2FA)** - TOTP-based with backup codes
-- **Notification Webhooks** - Discord, Slack, Telegram, generic webhooks
-- **ClamAV Integration** - Malware scanning with quarantine
-- **File Integrity Monitoring** - Baseline creation and change detection
-- **Environment Variable Management** - Secure, encrypted per-app variables
-- **Cron Job Management** - Visual cron editor
-- **Server Uptime Tracking** - Historical uptime data and visualization
+- **Team & Permissions** - RBAC with admin/developer/viewer roles, invitations, audit logging
+- **API Enhancements** - API keys, rate limiting, webhook subscriptions, OpenAPI docs, analytics
+- **SSO & OAuth Login** - Google, GitHub, OIDC, SAML with account linking
+- **Database Migrations** - Flask-Migrate/Alembic with versioned schema migrations
+- **Email Server Management** - Postfix, Dovecot, DKIM, SpamAssassin, Roundcube
---
@@ -117,7 +115,7 @@ This document outlines the development roadmap for ServerKit. Features are organ
---
-## Phase 10: Multi-Server Management (In Progress)
+## Phase 10: Multi-Server Management (Completed)
**Priority: High**
@@ -137,7 +135,7 @@ This document outlines the development roadmap for ServerKit. Features are organ
---
-## Phase 11: Git Deployment (Planned)
+## Phase 11: Git Deployment (Completed)
**Priority: High**
@@ -166,58 +164,46 @@ This document outlines the development roadmap for ServerKit. Features are organ
---
-## Phase 13: Email Server Management (Planned)
+## Phase 13: Email Server Management (Completed)
**Priority: Medium**
-- [ ] Postfix mail server setup
-- [ ] Dovecot IMAP/POP3 configuration
-- [ ] Email account management
-- [ ] Spam filtering (SpamAssassin)
-- [ ] DKIM/SPF/DMARC configuration
-- [ ] Webmail interface integration
-- [ ] Email forwarding rules
+- [x] Postfix mail server setup
+- [x] Dovecot IMAP/POP3 configuration
+- [x] Email account management
+- [x] Spam filtering (SpamAssassin)
+- [x] DKIM/SPF/DMARC configuration
+- [x] Webmail interface integration
+- [x] Email forwarding rules
---
-## Phase 14: Advanced SSL Features (Planned)
+## Phase 14: Team & Permissions (Completed)
**Priority: Medium**
-- [ ] Wildcard SSL certificates
-- [ ] Multi-domain certificates (SAN)
-- [ ] Custom certificate upload
-- [ ] Certificate expiry monitoring
-- [ ] Automatic renewal notifications
-
----
-
-## Phase 15: Team & Permissions (Planned)
-
-**Priority: Medium**
-
-- [ ] Multi-user support
-- [ ] Role-based access control (RBAC)
-- [ ] Custom permission sets
-- [ ] Audit logging per user
-- [ ] Team invitations
-- [ ] Activity dashboard
+- [x] Multi-user support
+- [x] Role-based access control (RBAC)
+- [x] Custom permission sets
+- [x] Audit logging per user
+- [x] Team invitations
+- [x] Activity dashboard
---
-## Phase 16: API Enhancements (Planned)
+## Phase 15: API Enhancements (Completed)
**Priority: Medium**
-- [ ] API key management
-- [ ] Rate limiting
-- [ ] Webhook event subscriptions
-- [ ] OpenAPI/Swagger documentation
-- [ ] API usage analytics
+- [x] API key management
+- [x] Rate limiting
+- [x] Webhook event subscriptions
+- [x] OpenAPI/Swagger documentation
+- [x] API usage analytics
---
-## Phase 17: Advanced Security (Completed)
+## Phase 16: Advanced Security (Completed)
**Priority: High**
@@ -233,37 +219,350 @@ This document outlines the development roadmap for ServerKit. Features are organ
---
-## Phase 18: Performance Optimization (Planned)
+## Phase 17: SSO & OAuth Login (Completed)
-**Priority: Low**
+**Priority: High**
-- [ ] Redis caching integration
-- [ ] Database query optimization
-- [ ] Static asset CDN support
-- [ ] Lazy loading for large datasets
-- [ ] Background job queue (Celery)
+- [x] Google OAuth 2.0 login
+- [x] GitHub OAuth login
+- [x] Generic OpenID Connect (OIDC) provider support
+- [x] SAML 2.0 support for enterprise environments
+- [x] Social login UI (provider buttons on login page)
+- [x] Account linking (connect OAuth identity to existing local account)
+- [x] Auto-provisioning of new users on first SSO login
+- [x] Configurable SSO settings (enable/disable providers, client ID/secret management)
+- [x] Enforce SSO-only login (disable password auth for team members)
+- [x] SSO session management and token refresh
---
-## Phase 19: Mobile App (Planned)
+## Phase 18: Database Migrations & Schema Versioning (Completed)
-**Priority: Low**
+**Priority: High**
+
+### Backend — Migration Engine
+- [x] Integrate Flask-Migrate (Alembic) for versioned schema migrations
+- [x] Generate initial migration from current model state as baseline
+- [x] Replace `_auto_migrate_columns()` hack with proper Alembic migrations
+- [x] Store schema version in a `schema_version` table (current version, history)
+- [x] API endpoints for migration status, apply, and rollback
+- [x] Auto-detect pending migrations on login and flag the session
+- [x] Pre-migration automatic DB backup before applying changes
+- [x] Migration scripts for all existing model changes (retroactive baseline)
+
+### CLI Fallback
+- [x] CLI commands for headless/SSH scenarios (`flask db upgrade`, `flask db status`)
+- [x] CLI rollback support (`flask db downgrade`)
+
+---
+
+# Upcoming Development
+
+The phases below are ordered by priority. Higher phases ship first.
+
+---
+
+## Phase 19: New UI & Services Page (Planned)
+
+**Priority: Critical**
+
+Merge the `new-ui` branch — adds a full Services page with service detail views, metrics, logs, shell, settings, git connect, and package management.
+
+- [ ] Merge `new-ui` branch into main development line
+- [ ] Services list page with status indicators and quick actions
+- [ ] Service detail page with tabbed interface (Metrics, Logs, Shell, Settings, Commands, Events, Packages)
+- [ ] Git connect modal for linking services to repositories
+- [ ] Gunicorn management tab for Python services
+- [ ] Service type detection and type-specific UI (Node, Python, PHP, Docker, etc.)
+- [ ] Resolve any conflicts with features added since branch diverged
+
+---
+
+## Phase 20: Customizable Sidebar & Dashboard Views (Planned)
+
+**Priority: High**
+
+Let users personalize what they see. Not everyone runs email servers or manages Docker — the sidebar should adapt to each user's needs.
+
+- [ ] Sidebar configuration page in Settings
+- [ ] Preset view profiles: **Full** (default, all modules), **Web Hosting** (apps, domains, SSL, databases, files), **Email Admin** (email, DNS, security), **Docker/DevOps** (containers, deployments, git, monitoring), **Minimal** (apps, monitoring, backups only)
+- [ ] Custom view builder — toggle individual sidebar items on/off
+- [ ] Per-user preference storage (saved to user profile, synced across sessions)
+- [ ] Sidebar sections collapse/expand with memory
+- [ ] Quick-switch between saved view profiles
+- [ ] Admin can set default view for new users
+- [ ] Hide empty/unconfigured modules automatically (e.g., hide Email if no email domains exist)
+
+---
+
+## Phase 21: Migration Wizard Frontend UI (Planned)
+
+**Priority: High**
+
+The backend migration engine is complete — this adds the visual upgrade experience (Matomo-style).
+
+- [ ] Full-screen modal/wizard that appears when pending migrations are detected
+- [ ] Step 1: "Update Available" — show current version vs new version, changelog summary
+- [ ] Step 2: "Backup" — auto-backup the database, show progress, confirm success
+- [ ] Step 3: "Apply Migrations" — run migrations with real-time progress/log output
+- [ ] Step 4: "Done" — success confirmation with summary of changes applied
+- [ ] Error handling: if a migration fails, show the error and offer rollback option
+- [ ] Block access to the panel until migrations are applied
+- [ ] Migration history page in Settings showing all past migrations and timestamps
+
+---
+
+## Phase 22: Container Logs & Monitoring UI (Planned)
+
+**Priority: High**
+
+The container logs API is already built. This phase adds the frontend and extends monitoring to per-app metrics.
+
+- [ ] Log viewer component with terminal-style display and ANSI color support
+- [ ] Real-time log streaming via WebSocket with auto-scroll (pause on user scroll)
+- [ ] Log search with regex support and match highlighting
+- [ ] Filter by log level (INFO, WARN, ERROR, DEBUG) and time range
+- [ ] Export filtered logs to file
+- [ ] Per-container resource collection (CPU %, memory, network I/O via Docker stats API)
+- [ ] Per-app resource usage charts (Recharts) with time range selector (1h, 6h, 24h, 7d)
+- [ ] Per-app alert rules (metric, operator, threshold, duration)
+- [ ] Alert notifications via existing channels (email, Discord, Telegram) with cooldown
+
+---
+
+## Phase 23: Agent Fleet Management (Planned)
+
+**Priority: High**
+
+Level up agent management from "connect and monitor" to full fleet control.
+
+- [ ] Agent version tracking and compatibility matrix (panel version ↔ agent version)
+- [ ] Push agent upgrades from the panel (single server or fleet-wide rollout)
+- [ ] Staged rollout support — upgrade agents in batches with health checks between waves
+- [ ] Agent health dashboard — connection uptime, heartbeat latency, command success rate per agent
+- [ ] Auto-discovery of new servers on the local network (mDNS/broadcast scan)
+- [ ] Agent registration approval workflow (admin must approve before agent joins fleet)
+- [ ] Bulk agent operations — restart, upgrade, rotate keys across selected servers
+- [ ] Agent changelog and release notes visible in UI
+- [ ] Offline agent command queue — persist commands and deliver when agent reconnects
+- [ ] Command retry with configurable backoff for failed/timed-out operations
+- [ ] Agent connection diagnostics — test connectivity, latency, firewall check from panel
+
+---
+
+## Phase 24: Cross-Server Monitoring Dashboard (Planned)
+
+**Priority: High**
+
+Fleet-wide visibility — see everything at a glance and catch problems early.
+
+- [ ] Fleet overview dashboard — heatmap of all servers by CPU/memory/disk usage
+- [ ] Server comparison charts — overlay metrics from multiple servers on one graph
+- [ ] Per-server alert thresholds (CPU > 80% for 5 min → warning, > 95% → critical)
+- [ ] Anomaly detection — automatic baseline learning, alert on deviations
+- [ ] Custom metric dashboards — drag-and-drop widgets, save layouts per user
+- [ ] Metric correlation view — spot relationships between metrics across servers
+- [ ] Capacity forecasting — trend-based predictions (disk full in X days, memory growth rate)
+- [ ] Metrics export — Prometheus endpoint (`/metrics`), CSV download, JSON API
+- [ ] Grafana integration guide and pre-built dashboard templates
+- [ ] Fleet-wide search — find which server is running a specific container, service, or port
+
+---
+
+## Phase 25: Agent Plugin System (Planned)
-- [ ] React Native mobile application
-- [ ] Push notifications
-- [ ] Quick actions (restart, view stats)
-- [ ] Biometric authentication
+**Priority: High**
+
+Make the agent extensible — let users add custom capabilities without modifying agent core. This is the foundation for future integrations (Android device farms, IoT fleets, custom hardware monitoring, etc.).
+
+### Plugin Architecture
+- [ ] Plugin specification — standard interface (init, healthcheck, metrics, commands)
+- [ ] Plugin manifest format (YAML/JSON) — name, version, dependencies, capabilities, permissions
+- [ ] Plugin lifecycle management — install, enable, disable, uninstall, upgrade
+- [ ] Plugin isolation — each plugin runs in its own process/sandbox with resource limits
+- [ ] Plugin communication — standardized IPC between plugin and agent core
+
+### Plugin Capabilities
+- [ ] Custom metrics reporters — plugins can push arbitrary metrics to the panel
+- [ ] Custom health checks — plugins define checks that feed into the status system
+- [ ] Custom commands — plugins register new command types the panel can invoke
+- [ ] Scheduled tasks — plugins can register periodic jobs (cron-like)
+- [ ] Event hooks — plugins can react to agent events (connect, disconnect, command, alert)
+
+### Panel Integration
+- [ ] Plugin management UI — install, configure, monitor plugins per server
+- [ ] Plugin marketplace / registry — browse and install community plugins
+- [ ] Plugin configuration editor — per-server plugin settings from the panel
+- [ ] Plugin logs and diagnostics — view plugin output and errors
+- [ ] Plugin metrics visualization — custom widgets for plugin-reported data
+
+### Developer Experience
+- [ ] Plugin SDK (Go module) — scaffolding, helpers, testing tools
+- [ ] Plugin template repository — quickstart for new plugin development
+- [ ] Local plugin development mode — hot-reload, debug logging
+- [ ] Plugin documentation and API reference
+
+---
+
+## Phase 26: Server Templates & Config Sync (Planned)
+
+**Priority: Medium**
+
+Define what a server should look like, apply it, and detect when it drifts.
+
+- [ ] Server template builder — define expected state (packages, services, firewall rules, users, files)
+- [ ] Template library — save and reuse templates (e.g., "Web Server", "Database Server", "Mail Server")
+- [ ] Apply template to server — install packages, configure services, set firewall rules via agent
+- [ ] Config drift detection — periodic comparison of actual vs. expected state
+- [ ] Drift report UI — visual diff showing what changed and when
+- [ ] Auto-remediation option — automatically fix drift back to template (with approval toggle)
+- [ ] Template versioning — track changes to templates over time
+- [ ] Template inheritance — base template + role-specific overrides
+- [ ] Bulk apply — roll out template changes across server groups
+- [ ] Compliance dashboard — percentage of fleet in compliance per template
+
+---
+
+## Phase 27: Multi-Tenancy & Workspaces (Planned)
+
+**Priority: Medium**
+
+Isolate servers by team, client, or project. Essential for agencies, MSPs, and larger teams.
+
+- [ ] Workspace model — isolated container for servers, users, and settings
+- [ ] Workspace CRUD — create, rename, archive workspaces
+- [ ] Server assignment — each server belongs to exactly one workspace
+- [ ] User workspace membership — users can belong to multiple workspaces with different roles
+- [ ] Workspace switching — quick-switch dropdown in the header
+- [ ] Per-workspace settings — notification preferences, default templates, branding
+- [ ] Workspace-scoped API keys — API keys restricted to a single workspace
+- [ ] Cross-workspace admin view — super-admin can see all workspaces and usage
+- [ ] Workspace usage quotas — limit servers, users, or API calls per workspace
+- [ ] Workspace billing integration — track resource usage per workspace for invoicing
---
-## Phase 20: Marketplace & Extensions (Planned)
+## Phase 28: Advanced SSL Features (Planned)
+
+**Priority: Medium**
+
+- [x] Certificate expiry monitoring
+- [ ] Wildcard SSL certificates via DNS-01 challenge
+- [ ] Multi-domain certificates (SAN)
+- [ ] Custom certificate upload (key + cert + chain)
+- [ ] Certificate expiry notifications (email/webhook alerts before expiration)
+- [ ] SSL configuration templates (modern, intermediate, legacy compatibility)
+- [ ] SSL health check dashboard (grade, cipher suites, protocol versions)
+
+---
+
+## Phase 29: DNS Zone Management (Planned)
+
+**Priority: Medium**
+
+Full DNS record management with provider API integration.
+
+- [ ] DNS zone editor UI (A, AAAA, CNAME, MX, TXT, SRV, CAA records)
+- [ ] Cloudflare API integration (list/create/update/delete records)
+- [ ] Route53 API integration
+- [ ] DigitalOcean DNS integration
+- [ ] DNS propagation checker (query multiple nameservers)
+- [ ] Auto-generate recommended records for hosted services (SPF, DKIM, DMARC, MX)
+- [ ] DNS template presets (e.g., "standard web hosting", "email hosting")
+- [ ] Bulk record import/export (BIND zone file format)
+
+---
+
+## Phase 30: Nginx Advanced Configuration (Planned)
+
+**Priority: Medium**
+
+Go beyond basic virtual hosts — full reverse proxy and performance configuration.
+
+- [ ] Visual reverse proxy rule builder (upstream servers, load balancing methods)
+- [ ] Load balancing configuration (round-robin, least connections, IP hash)
+- [ ] Caching rules editor (proxy cache zones, TTLs, cache bypass rules)
+- [ ] Rate limiting at proxy level (per-IP, per-route)
+- [ ] Custom location block editor with syntax validation
+- [ ] Header manipulation (add/remove/modify request/response headers)
+- [ ] Nginx config syntax check before applying changes
+- [ ] Config diff preview before saving
+- [ ] Access/error log viewer per virtual host
+
+---
+
+## Phase 31: Status Page & Health Checks (Planned)
+
+**Priority: Medium**
+
+Public-facing status page and automated health monitoring.
+
+- [ ] Automated health checks (HTTP, TCP, DNS, SMTP) with configurable intervals
+- [ ] Public status page (standalone URL, no auth required)
+- [ ] Status page customization (logo, colors, custom domain)
+- [ ] Service grouping on status page (e.g., "Web Services", "Email", "APIs")
+- [ ] Incident management — create, update, resolve incidents with timeline
+- [ ] Uptime percentage display (24h, 7d, 30d, 90d)
+- [ ] Scheduled maintenance windows with advance notifications
+- [ ] Status page subscribers (email/webhook notifications on incidents)
+- [ ] Historical uptime graphs
+- [ ] Status badge embeds (SVG/PNG for README files)
+
+---
+
+## Phase 32: Server Provisioning APIs (Planned)
+
+**Priority: Medium**
+
+Spin up and manage cloud servers directly from the panel.
+
+- [ ] DigitalOcean API integration (create/destroy/resize droplets)
+- [ ] Hetzner Cloud API integration
+- [ ] Vultr API integration
+- [ ] Linode/Akamai API integration
+- [ ] Server creation wizard (region, size, OS, SSH keys)
+- [ ] Auto-install ServerKit agent on provisioned servers
+- [ ] Server cost tracking and billing overview
+- [ ] Snapshot management (create/restore/delete)
+- [ ] One-click server cloning
+- [ ] Destroy server with confirmation safeguards
+
+---
+
+## Phase 33: Performance Optimization (Planned)
**Priority: Low**
-- [ ] Plugin/extension system
-- [ ] Community marketplace
+- [ ] Redis caching for frequently accessed data (metrics, server status)
+- [ ] Database query optimization and slow query logging
+- [ ] Background job queue (Celery or RQ) for long-running tasks
+- [ ] Lazy loading for large datasets (paginated API responses)
+- [ ] WebSocket connection pooling and reconnection improvements
+- [ ] Frontend bundle optimization and code splitting
+
+---
+
+## Phase 34: Mobile App (Future)
+
+**Priority: Low — v3.0+**
+
+- [ ] React Native or PWA mobile application
+- [ ] Push notifications for alerts and incidents
+- [ ] Quick actions (restart services, view stats, acknowledge alerts)
+- [ ] Biometric authentication (fingerprint/Face ID)
+- [ ] Offline mode with cached server status
+
+---
+
+## Phase 35: Marketplace & Extensions (Future)
+
+**Priority: Low — v3.0+**
+
+- [ ] Plugin/extension system with API hooks
+- [ ] Community marketplace for plugins
- [ ] Custom dashboard widgets
-- [ ] Theme customization
+- [ ] Theme customization (colors, layout, branding)
+- [ ] Extension SDK and developer documentation
---
@@ -271,14 +570,19 @@ This document outlines the development roadmap for ServerKit. Features are organ
| Version | Target Features | Status |
|---------|-----------------|--------|
-| v0.9.0 | Core features, 2FA, Notifications, Security | Current |
-| v1.0.0 | Production-ready stable release | Planned |
-| v1.1.0 | Multi-server, Git deployment | Planned |
-| v1.2.0 | Backups, Advanced SSL, Advanced Security | Planned |
-| v1.3.0 | Email server, API enhancements | Planned |
-| v1.4.0 | Team & permissions | Planned |
-| v1.5.0 | Performance optimizations | Planned |
-| v2.0.0 | Mobile app, Marketplace | Future |
+| v0.9.0 | Core features, 2FA, Notifications, Security | Completed |
+| v1.0.0 | Production-ready stable release, DB migrations | Completed |
+| v1.1.0 | Multi-server, Git deployment | Completed |
+| v1.2.0 | Backups, Advanced SSL, Advanced Security | Completed |
+| v1.3.0 | Email server, API enhancements | Completed |
+| v1.4.0 | Team & permissions, SSO & OAuth login | Completed |
+| v1.5.0 | New UI, customizable sidebar, migration wizard UI | Current |
+| v1.6.0 | Container monitoring UI, agent fleet management | Planned |
+| v1.7.0 | Cross-server monitoring, agent plugin system | Planned |
+| v1.8.0 | Server templates, multi-tenancy | Planned |
+| v1.9.0 | Advanced SSL, DNS management, Nginx config | Planned |
+| v2.0.0 | Status pages, server provisioning, performance | Planned |
+| v3.0.0 | Mobile app, Marketplace | Future |
---
@@ -287,10 +591,10 @@ This document outlines the development roadmap for ServerKit. Features are organ
Want to help? See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
**Priority areas for contributions:**
-- Multi-server agent development
-- Git webhook integration
-- S3/B2 backup implementations
-- Additional notification channels
+- Agent plugin SDK and example plugins
+- Fleet management and monitoring dashboard
+- DNS provider integrations (Cloudflare, Route53)
+- Status page and health check system
- UI/UX improvements
- Documentation
@@ -304,5 +608,5 @@ Have a feature idea? Open an issue on GitHub with the `enhancement` label.
ServerKit Roadmap
- Last updated: January 2026
+ Last updated: March 2026
diff --git a/VERSION b/VERSION
index 3a3cd8c..80e78df 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.3.1
+1.3.5
diff --git a/agent/cmd/agent/main.go b/agent/cmd/agent/main.go
index 2129e0e..bca3c36 100644
--- a/agent/cmd/agent/main.go
+++ b/agent/cmd/agent/main.go
@@ -5,6 +5,7 @@ import (
"fmt"
"os"
"os/signal"
+ "path/filepath"
"syscall"
"github.com/serverkit/agent/internal/agent"
@@ -309,8 +310,19 @@ func runRegister(token, serverURL, name string) error {
cfg.Auth.APIKey = result.APIKey
cfg.Auth.APISecret = result.APISecret
- // Save config
- if err := cfg.Save(config.DefaultConfigPath()); err != nil {
+ // Determine config path (use --config flag if set, otherwise default)
+ configPath := cfgFile
+ if configPath == "" {
+ configPath = config.DefaultConfigPath()
+ }
+
+ // Update key file path to be relative to config directory if using custom path
+ if cfgFile != "" {
+ cfg.Auth.KeyFile = filepath.Join(filepath.Dir(configPath), "agent.key")
+ }
+
+ // Save config (key_file path must be set before saving)
+ if err := cfg.Save(configPath); err != nil {
return fmt.Errorf("failed to save config: %w", err)
}
diff --git a/agent/internal/agent/registration.go b/agent/internal/agent/registration.go
index 36358d4..e187476 100644
--- a/agent/internal/agent/registration.go
+++ b/agent/internal/agent/registration.go
@@ -103,7 +103,7 @@ func (r *Registration) Register(serverURL, token, name string) (*RegistrationRes
}
// Make registration request
- registrationURL := serverURL + "/api/v1/agents/register"
+ registrationURL := serverURL + "/api/v1/servers/register"
r.log.Info("Sending registration request", "url", registrationURL)
req, err := http.NewRequestWithContext(ctx, "POST", registrationURL, bytes.NewReader(bodyBytes))
diff --git a/agent/internal/ws/client.go b/agent/internal/ws/client.go
index 9bc7981..621da67 100644
--- a/agent/internal/ws/client.go
+++ b/agent/internal/ws/client.go
@@ -6,6 +6,8 @@ import (
"encoding/json"
"fmt"
"net/http"
+ "net/url"
+ "strings"
"sync"
"time"
@@ -19,7 +21,7 @@ import (
// MessageHandler is called when a message is received
type MessageHandler func(msgType protocol.MessageType, data []byte)
-// Client is a WebSocket client with auto-reconnect
+// Client is a Socket.IO client with auto-reconnect
type Client struct {
cfg config.ServerConfig
auth *auth.Authenticator
@@ -36,16 +38,23 @@ type Client struct {
doneCh chan struct{}
reconnectCount int
+
+ // Socket.IO namespace
+ namespace string
+ // Engine.IO ping interval from server
+ pingInterval time.Duration
+ pingTimeout time.Duration
}
// NewClient creates a new WebSocket client
func NewClient(cfg config.ServerConfig, authenticator *auth.Authenticator, log *logger.Logger) *Client {
return &Client{
- cfg: cfg,
- auth: authenticator,
- log: log.WithComponent("websocket"),
- sendCh: make(chan []byte, 100),
- doneCh: make(chan struct{}),
+ cfg: cfg,
+ auth: authenticator,
+ log: log.WithComponent("websocket"),
+ sendCh: make(chan []byte, 100),
+ doneCh: make(chan struct{}),
+ namespace: "/agent",
}
}
@@ -54,7 +63,34 @@ func (c *Client) SetHandler(handler MessageHandler) {
c.handler = handler
}
-// Connect establishes a WebSocket connection
+// buildSocketIOURL converts the configured server URL to a Socket.IO WebSocket URL.
+// Input examples:
+// - "wss://server.example.com/agent"
+// - "ws://localhost:5000/agent"
+//
+// Output: "wss://server.example.com/socket.io/?EIO=4&transport=websocket"
+func (c *Client) buildSocketIOURL() (string, error) {
+ rawURL := c.cfg.URL
+ if rawURL == "" {
+ return "", fmt.Errorf("server URL is empty")
+ }
+
+ parsed, err := url.Parse(rawURL)
+ if err != nil {
+ return "", fmt.Errorf("invalid server URL: %w", err)
+ }
+
+ // Keep the scheme (ws/wss), strip the path (e.g. /agent)
+ parsed.Path = "/socket.io/"
+ q := url.Values{}
+ q.Set("EIO", "4")
+ q.Set("transport", "websocket")
+ parsed.RawQuery = q.Encode()
+
+ return parsed.String(), nil
+}
+
+// Connect establishes a Socket.IO connection over WebSocket
func (c *Client) Connect(ctx context.Context) error {
c.mu.Lock()
if c.connected {
@@ -63,6 +99,11 @@ func (c *Client) Connect(ctx context.Context) error {
}
c.mu.Unlock()
+ sioURL, err := c.buildSocketIOURL()
+ if err != nil {
+ return err
+ }
+
dialer := websocket.Dialer{
HandshakeTimeout: 10 * time.Second,
}
@@ -76,10 +117,11 @@ func (c *Client) Connect(ctx context.Context) error {
headers := http.Header{}
headers.Set("X-Agent-ID", c.auth.AgentID())
headers.Set("X-API-Key-Prefix", c.auth.GetAPIKeyPrefix())
+ headers.Set("User-Agent", fmt.Sprintf("ServerKit-Agent/%s", "dev"))
- c.log.Debug("Connecting to server", "url", c.cfg.URL)
+ c.log.Debug("Connecting to Socket.IO", "url", sioURL)
- conn, resp, err := dialer.DialContext(ctx, c.cfg.URL, headers)
+ conn, resp, err := dialer.DialContext(ctx, sioURL, headers)
if err != nil {
if resp != nil {
c.log.Error("Connection failed",
@@ -92,14 +134,29 @@ func (c *Client) Connect(ctx context.Context) error {
c.mu.Lock()
c.conn = conn
+ c.mu.Unlock()
+
+ // Step 1: Read Engine.IO OPEN packet
+ if err := c.handleEngineIOOpen(); err != nil {
+ conn.Close()
+ return fmt.Errorf("engine.io handshake failed: %w", err)
+ }
+
+ // Step 2: Connect to Socket.IO namespace
+ if err := c.connectNamespace(); err != nil {
+ conn.Close()
+ return fmt.Errorf("namespace connect failed: %w", err)
+ }
+
+ c.mu.Lock()
c.connected = true
c.reconnecting = false
c.reconnectCount = 0
c.mu.Unlock()
- c.log.Info("Connected to server")
+ c.log.Info("Connected to server via Socket.IO")
- // Authenticate
+ // Step 3: Authenticate via Socket.IO event
if err := c.authenticate(); err != nil {
c.Close()
return fmt.Errorf("authentication failed: %w", err)
@@ -108,65 +165,213 @@ func (c *Client) Connect(ctx context.Context) error {
return nil
}
-// authenticate sends authentication message and waits for response
+// handleEngineIOOpen reads and processes the Engine.IO OPEN packet (type 0)
+func (c *Client) handleEngineIOOpen() error {
+ c.conn.SetReadDeadline(time.Now().Add(10 * time.Second))
+ _, msg, err := c.conn.ReadMessage()
+ if err != nil {
+ return fmt.Errorf("failed to read OPEN packet: %w", err)
+ }
+ c.conn.SetReadDeadline(time.Time{})
+
+ msgStr := string(msg)
+ c.log.Debug("Received Engine.IO packet", "raw", msgStr)
+
+ // Engine.IO OPEN packet starts with '0'
+ if len(msgStr) < 2 || msgStr[0] != '0' {
+ return fmt.Errorf("expected OPEN packet (0), got: %s", msgStr)
+ }
+
+ // Parse the JSON payload
+ var openData struct {
+ SID string `json:"sid"`
+ Upgrades []string `json:"upgrades"`
+ PingInterval int `json:"pingInterval"`
+ PingTimeout int `json:"pingTimeout"`
+ }
+ if err := json.Unmarshal([]byte(msgStr[1:]), &openData); err != nil {
+ return fmt.Errorf("failed to parse OPEN data: %w", err)
+ }
+
+ c.pingInterval = time.Duration(openData.PingInterval) * time.Millisecond
+ c.pingTimeout = time.Duration(openData.PingTimeout) * time.Millisecond
+
+ c.log.Debug("Engine.IO handshake complete",
+ "sid", openData.SID,
+ "pingInterval", c.pingInterval,
+ "pingTimeout", c.pingTimeout,
+ )
+
+ return nil
+}
+
+// connectNamespace sends a Socket.IO CONNECT packet to the /agent namespace
+func (c *Client) connectNamespace() error {
+ // Socket.IO CONNECT: packet type 4 (MESSAGE) + message type 0 (CONNECT) + namespace
+ // Wire format: "40/agent,"
+ connectMsg := fmt.Sprintf("40%s,", c.namespace)
+ c.log.Debug("Connecting to namespace", "namespace", c.namespace, "packet", connectMsg)
+
+ if err := c.conn.WriteMessage(websocket.TextMessage, []byte(connectMsg)); err != nil {
+ return fmt.Errorf("failed to send CONNECT: %w", err)
+ }
+
+ // Read namespace CONNECT ack
+ c.conn.SetReadDeadline(time.Now().Add(10 * time.Second))
+ _, msg, err := c.conn.ReadMessage()
+ if err != nil {
+ return fmt.Errorf("failed to read CONNECT ack: %w", err)
+ }
+ c.conn.SetReadDeadline(time.Time{})
+
+ msgStr := string(msg)
+ c.log.Debug("Received namespace response", "raw", msgStr)
+
+ // Expected: "40/agent,{\"sid\":\"...\"}"
+ // Or error: "44/agent,{\"message\":\"...\"}"
+ prefix := fmt.Sprintf("40%s,", c.namespace)
+ errorPrefix := fmt.Sprintf("44%s,", c.namespace)
+
+ if strings.HasPrefix(msgStr, errorPrefix) {
+ return fmt.Errorf("namespace connection rejected: %s", msgStr)
+ }
+
+ if !strings.HasPrefix(msgStr, prefix) {
+ return fmt.Errorf("unexpected namespace response: %s", msgStr)
+ }
+
+ c.log.Info("Connected to namespace", "namespace", c.namespace)
+ return nil
+}
+
+// authenticate sends an "auth" Socket.IO event and waits for response
func (c *Client) authenticate() error {
timestamp := time.Now().UnixMilli()
nonce := auth.GenerateNonce()
- // Sign with nonce for replay protection
signature := c.auth.SignMessageWithNonce(timestamp, nonce)
- authMsg := protocol.AuthMessage{
- Message: protocol.NewMessage(protocol.TypeAuth, nonce),
- AgentID: c.auth.AgentID(),
- APIKeyPrefix: c.auth.GetAPIKeyPrefix(),
- Nonce: nonce,
+ authData := map[string]interface{}{
+ "type": "auth",
+ "agent_id": c.auth.AgentID(),
+ "api_key_prefix": c.auth.GetAPIKeyPrefix(),
+ "nonce": nonce,
+ "timestamp": timestamp,
+ "signature": signature,
}
- authMsg.Timestamp = timestamp
- authMsg.Signature = signature
- data, err := json.Marshal(authMsg)
- if err != nil {
- return fmt.Errorf("failed to marshal auth message: %w", err)
+ c.log.Debug("Sending authentication event")
+
+ if err := c.emitEvent("auth", authData); err != nil {
+ return fmt.Errorf("failed to send auth event: %w", err)
}
- c.log.Debug("Sending authentication message")
+ // Wait for auth response event
+ c.conn.SetReadDeadline(time.Now().Add(10 * time.Second))
+ defer c.conn.SetReadDeadline(time.Time{})
+
+ for {
+ _, msg, err := c.conn.ReadMessage()
+ if err != nil {
+ return fmt.Errorf("failed to read auth response: %w", err)
+ }
+
+ msgStr := string(msg)
+
+ // Handle Engine.IO ping during auth
+ if msgStr == "2" {
+ c.conn.WriteMessage(websocket.TextMessage, []byte("3"))
+ continue
+ }
+
+ eventName, eventData, err := c.parseEvent(msgStr)
+ if err != nil {
+ c.log.Debug("Ignoring non-event message during auth", "raw", msgStr)
+ continue
+ }
- if err := c.conn.WriteMessage(websocket.TextMessage, data); err != nil {
- return fmt.Errorf("failed to send auth message: %w", err)
+ switch eventName {
+ case "auth_ok":
+ var response struct {
+ Type string `json:"type"`
+ SessionToken string `json:"session_token"`
+ Expires int64 `json:"expires"`
+ ServerID string `json:"server_id"`
+ }
+ if err := json.Unmarshal(eventData, &response); err != nil {
+ return fmt.Errorf("failed to parse auth_ok: %w", err)
+ }
+
+ c.session = &auth.SessionToken{
+ Token: response.SessionToken,
+ ExpiresAt: time.UnixMilli(response.Expires),
+ }
+
+ c.log.Info("Authentication successful",
+ "expires_in", time.Until(c.session.ExpiresAt).Round(time.Second),
+ )
+ return nil
+
+ case "auth_fail":
+ var response struct {
+ Type string `json:"type"`
+ Error string `json:"error"`
+ }
+ json.Unmarshal(eventData, &response)
+ return fmt.Errorf("authentication rejected: %s", response.Error)
+
+ default:
+ c.log.Debug("Ignoring event during auth", "event", eventName)
+ }
}
+}
- // Wait for auth response
- c.conn.SetReadDeadline(time.Now().Add(10 * time.Second))
- _, msg, err := c.conn.ReadMessage()
+// emitEvent sends a Socket.IO EVENT packet
+// Wire format: 42/agent,["event_name",{data}]
+func (c *Client) emitEvent(event string, data interface{}) error {
+ dataBytes, err := json.Marshal(data)
if err != nil {
- return fmt.Errorf("failed to read auth response: %w", err)
+ return fmt.Errorf("failed to marshal event data: %w", err)
}
- c.conn.SetReadDeadline(time.Time{})
- var response protocol.AuthResponse
- if err := json.Unmarshal(msg, &response); err != nil {
- return fmt.Errorf("failed to parse auth response: %w", err)
+ // Build Socket.IO EVENT: 42/namespace,["event",data]
+ eventJSON := fmt.Sprintf(`42%s,["%s",%s]`, c.namespace, event, string(dataBytes))
+
+ return c.conn.WriteMessage(websocket.TextMessage, []byte(eventJSON))
+}
+
+// parseEvent parses a Socket.IO EVENT packet and returns event name + data
+// Expected format: 42/agent,["event_name",{data}]
+func (c *Client) parseEvent(msg string) (string, json.RawMessage, error) {
+ prefix := fmt.Sprintf("42%s,", c.namespace)
+ if !strings.HasPrefix(msg, prefix) {
+ return "", nil, fmt.Errorf("not a Socket.IO EVENT for namespace %s", c.namespace)
}
- if response.Type == protocol.TypeAuthFail {
- return fmt.Errorf("authentication rejected: %s", response.Error)
+ payload := msg[len(prefix):]
+
+ // Parse as JSON array: ["event_name", data]
+ var arr []json.RawMessage
+ if err := json.Unmarshal([]byte(payload), &arr); err != nil {
+ return "", nil, fmt.Errorf("failed to parse event array: %w", err)
}
- if response.Type != protocol.TypeAuthOK {
- return fmt.Errorf("unexpected response type: %s", response.Type)
+ if len(arr) < 1 {
+ return "", nil, fmt.Errorf("empty event array")
}
- // Store session token
- c.session = &auth.SessionToken{
- Token: response.SessionToken,
- ExpiresAt: time.UnixMilli(response.Expires),
+ // Extract event name (first element is a string)
+ var eventName string
+ if err := json.Unmarshal(arr[0], &eventName); err != nil {
+ return "", nil, fmt.Errorf("failed to parse event name: %w", err)
}
- c.log.Info("Authentication successful",
- "expires_in", time.Until(c.session.ExpiresAt).Round(time.Second),
- )
+ // Data is the second element (may be absent)
+ var eventData json.RawMessage
+ if len(arr) > 1 {
+ eventData = arr[1]
+ }
- return nil
+ return eventName, eventData, nil
}
// Run starts the read/write loops and handles reconnection
@@ -186,13 +391,14 @@ func (c *Client) Run(ctx context.Context) error {
if !connected {
if err := c.Connect(ctx); err != nil {
+ c.log.Warn("Connection failed", "error", err)
c.handleReconnect(ctx)
continue
}
}
- // Start read/write loops
- errCh := make(chan error, 2)
+ // Start read/write/ping loops
+ errCh := make(chan error, 3)
go func() {
errCh <- c.readLoop(ctx)
@@ -202,6 +408,10 @@ func (c *Client) Run(ctx context.Context) error {
errCh <- c.writeLoop(ctx)
}()
+ go func() {
+ errCh <- c.pingLoop(ctx)
+ }()
+
// Wait for error
err := <-errCh
c.log.Warn("Connection loop ended", "error", err)
@@ -226,7 +436,7 @@ func (c *Client) Run(ctx context.Context) error {
}
}
-// readLoop reads messages from the WebSocket
+// readLoop reads Socket.IO messages from the WebSocket
func (c *Client) readLoop(ctx context.Context) error {
for {
select {
@@ -240,40 +450,121 @@ func (c *Client) readLoop(ctx context.Context) error {
return fmt.Errorf("read error: %w", err)
}
- // Parse message type
- var base protocol.Message
- if err := json.Unmarshal(msg, &base); err != nil {
- c.log.Warn("Failed to parse message", "error", err)
+ msgStr := string(msg)
+
+ // Handle Engine.IO PING (server sends "2", we respond "3")
+ if msgStr == "2" {
+ if err := c.conn.WriteMessage(websocket.TextMessage, []byte("3")); err != nil {
+ return fmt.Errorf("failed to send pong: %w", err)
+ }
+ c.log.Debug("Responded to Engine.IO ping")
continue
}
- // Handle heartbeat ack internally
- if base.Type == protocol.TypeHeartbeatAck {
- c.log.Debug("Received heartbeat ack")
- continue
+ // Handle Engine.IO CLOSE
+ if msgStr == "1" {
+ return fmt.Errorf("server sent Engine.IO CLOSE")
+ }
+
+ // Handle Socket.IO DISCONNECT for our namespace
+ disconnectPrefix := fmt.Sprintf("41%s,", c.namespace)
+ if strings.HasPrefix(msgStr, disconnectPrefix) || msgStr == "41" {
+ return fmt.Errorf("server disconnected namespace %s", c.namespace)
}
- // Pass to handler
- if c.handler != nil {
- c.handler(base.Type, msg)
+ // Parse Socket.IO EVENT
+ eventName, eventData, err := c.parseEvent(msgStr)
+ if err != nil {
+ c.log.Debug("Ignoring unrecognized message", "raw", msgStr)
+ continue
}
+
+ // Map event name to protocol message type and dispatch
+ c.dispatchEvent(eventName, eventData)
}
}
-// writeLoop writes messages from the send channel
+// dispatchEvent maps a Socket.IO event to the agent's message handler
+func (c *Client) dispatchEvent(eventName string, data json.RawMessage) {
+ c.log.Debug("Received event", "event", eventName)
+
+ // Map Socket.IO event names to protocol message types
+ msgType := protocol.MessageType(eventName)
+
+ // Handle heartbeat ack internally
+ if msgType == protocol.TypeHeartbeatAck {
+ c.log.Debug("Received heartbeat ack")
+ return
+ }
+
+ // For events that carry data, we need to reconstruct the full message
+ // so the agent handler can unmarshal it as expected
+ if c.handler != nil && len(data) > 0 {
+ c.handler(msgType, data)
+ }
+}
+
+// writeLoop writes queued messages via Socket.IO events
func (c *Client) writeLoop(ctx context.Context) error {
for {
select {
case <-ctx.Done():
return ctx.Err()
case msg := <-c.sendCh:
- if err := c.conn.WriteMessage(websocket.TextMessage, msg); err != nil {
+ // Parse the message to extract the type for the event name
+ var base protocol.Message
+ if err := json.Unmarshal(msg, &base); err != nil {
+ c.log.Warn("Failed to parse outgoing message", "error", err)
+ continue
+ }
+
+ eventName := string(base.Type)
+ eventJSON := fmt.Sprintf(`42%s,["%s",%s]`, c.namespace, eventName, string(msg))
+
+ if err := c.conn.WriteMessage(websocket.TextMessage, []byte(eventJSON)); err != nil {
return fmt.Errorf("write error: %w", err)
}
}
}
}
+// pingLoop sends Engine.IO PING packets to keep the connection alive
+func (c *Client) pingLoop(ctx context.Context) error {
+ if c.pingInterval <= 0 {
+ c.pingInterval = 25 * time.Second
+ }
+
+ ticker := time.NewTicker(c.pingInterval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-ticker.C:
+ c.mu.RLock()
+ connected := c.connected
+ c.mu.RUnlock()
+
+ if !connected {
+ return fmt.Errorf("not connected")
+ }
+
+ // Engine.IO PING from client side (type 3 = PONG in EIO4 client-initiated)
+ // Actually in EIO4, server sends PINGs (2) and client responds with PONGs (3)
+ // Client doesn't need to send its own pings, just respond to server's
+ // But we'll use this loop to detect stale connections via WriteControl
+ if err := c.conn.WriteControl(
+ websocket.PingMessage,
+ nil,
+ time.Now().Add(5*time.Second),
+ ); err != nil {
+ return fmt.Errorf("websocket ping failed: %w", err)
+ }
+ }
+ }
+}
+
// handleReconnect implements exponential backoff reconnection
func (c *Client) handleReconnect(ctx context.Context) {
c.mu.Lock()
diff --git a/agent/scripts/install.ps1 b/agent/scripts/install.ps1
index 36c6f55..9bdba71 100644
--- a/agent/scripts/install.ps1
+++ b/agent/scripts/install.ps1
@@ -59,7 +59,7 @@ function Install-ServerKitAgent {
# Construct download URL
if ([string]::IsNullOrEmpty($DownloadUrl)) {
- $DownloadUrl = "$Server/downloads/agent/serverkit-agent-windows-$Arch.exe"
+ $DownloadUrl = "$Server/api/v1/servers/agent/download/windows/$Arch"
}
# Download agent
diff --git a/agent/scripts/install.sh b/agent/scripts/install.sh
index ea1eec8..872b1b1 100644
--- a/agent/scripts/install.sh
+++ b/agent/scripts/install.sh
@@ -156,7 +156,7 @@ create_user() {
download_agent() {
if [ -z "$DOWNLOAD_URL" ]; then
# Construct download URL from server
- DOWNLOAD_URL="${SERVER_URL}/downloads/agent/serverkit-agent-${OS}-${ARCH}"
+ DOWNLOAD_URL="${SERVER_URL}/api/v1/servers/agent/download/${OS}/${ARCH}"
fi
log_info "Downloading agent from $DOWNLOAD_URL..."
diff --git a/backend/Dockerfile b/backend/Dockerfile
index 3866867..fc3339a 100644
--- a/backend/Dockerfile
+++ b/backend/Dockerfile
@@ -30,7 +30,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
&& apt-get clean
# Create directories for ServerKit
-RUN mkdir -p /etc/serverkit /app/logs /var/quarantine /app/instance
+RUN mkdir -p /etc/serverkit /app/logs /var/quarantine /app/instance /var/backups/serverkit
# Copy requirements first for caching
COPY requirements.txt .
@@ -47,7 +47,8 @@ RUN useradd --create-home appuser \
&& chown -R appuser:appuser /app \
&& chown -R appuser:appuser /app/instance \
&& chown -R appuser:appuser /etc/serverkit \
- && chown -R appuser:appuser /var/quarantine
+ && chown -R appuser:appuser /var/quarantine \
+ && chown -R appuser:appuser /var/backups/serverkit
USER appuser
diff --git a/backend/app/__init__.py b/backend/app/__init__.py
index 59f03ff..2c5c664 100644
--- a/backend/app/__init__.py
+++ b/backend/app/__init__.py
@@ -5,12 +5,15 @@
from flask_cors import CORS
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
+from flask_migrate import Migrate
from config import config
db = SQLAlchemy()
jwt = JWTManager()
+migrate = Migrate()
limiter = Limiter(key_func=get_remote_address, default_limits=["100 per minute"])
+# Note: key_func is updated to get_rate_limit_key after app init
socketio = None
# Path to frontend dist folder (relative to backend folder)
@@ -33,13 +36,14 @@ def create_app(config_name=None):
# Initialize extensions
db.init_app(app)
+ migrate.init_app(app, db)
jwt.init_app(app)
limiter.init_app(app)
CORS(
app,
origins=app.config['CORS_ORIGINS'],
supports_credentials=True,
- allow_headers=['Content-Type', 'Authorization', 'X-Requested-With'],
+ allow_headers=['Content-Type', 'Authorization', 'X-Requested-With', 'X-API-Key'],
methods=['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS', 'PATCH']
)
@@ -47,6 +51,19 @@ def create_app(config_name=None):
from app.middleware.security import register_security_headers
register_security_headers(app)
+ # Register API key authentication middleware
+ from app.middleware.api_key_auth import register_api_key_auth
+ register_api_key_auth(app)
+
+ # Register API analytics middleware
+ from app.middleware.api_analytics import register_api_analytics
+ register_api_analytics(app)
+
+ # Update rate limiter with custom key function
+ from app.middleware.rate_limit import get_rate_limit_key, register_rate_limit_headers
+ limiter._key_func = get_rate_limit_key
+ register_rate_limit_headers(app)
+
# Initialize SocketIO
from app.sockets import init_socketio
socketio = init_socketio(app)
@@ -151,6 +168,10 @@ def create_app(config_name=None):
from app.api.cron import cron_bp
app.register_blueprint(cron_bp, url_prefix='/api/v1/cron')
+ # Register blueprints - Email Server
+ from app.api.email import email_bp
+ app.register_blueprint(email_bp, url_prefix='/api/v1/email')
+
# Register blueprints - Uptime Tracking
from app.api.uptime import uptime_bp
app.register_blueprint(uptime_bp, url_prefix='/api/v1/uptime')
@@ -163,10 +184,32 @@ def create_app(config_name=None):
from app.api.two_factor import two_factor_bp
app.register_blueprint(two_factor_bp, url_prefix='/api/v1/auth/2fa')
+ # Register blueprints - SSO / OAuth
+ from app.api.sso import sso_bp
+ app.register_blueprint(sso_bp, url_prefix='/api/v1/sso')
+
+ # Register blueprints - Database Migrations
+ from app.api.migrations import migrations_bp
+ app.register_blueprint(migrations_bp, url_prefix='/api/v1/migrations')
+
+ # Register blueprints - API Enhancements
+ from app.api.api_keys import api_keys_bp
+ from app.api.api_analytics import api_analytics_bp
+ from app.api.event_subscriptions import event_subscriptions_bp
+ from app.api.docs import docs_bp
+ app.register_blueprint(api_keys_bp, url_prefix='/api/v1/api-keys')
+ app.register_blueprint(api_analytics_bp, url_prefix='/api/v1/api-analytics')
+ app.register_blueprint(event_subscriptions_bp, url_prefix='/api/v1/event-subscriptions')
+ app.register_blueprint(docs_bp, url_prefix='/api/v1/docs')
+
# Register blueprints - Admin (User Management, Settings, Audit Logs)
from app.api.admin import admin_bp
app.register_blueprint(admin_bp, url_prefix='/api/v1/admin')
+ # Register blueprints - Invitations
+ from app.api.invitations import invitations_bp
+ app.register_blueprint(invitations_bp, url_prefix='/api/v1/admin/invitations')
+
# Register blueprints - Historical Metrics
from app.api.metrics import metrics_bp
app.register_blueprint(metrics_bp, url_prefix='/api/v1/metrics')
@@ -179,12 +222,10 @@ def create_app(config_name=None):
from app.api.servers import servers_bp
app.register_blueprint(servers_bp, url_prefix='/api/v1/servers')
- # Create database tables
+ # Handle database migrations (Alembic)
with app.app_context():
- db.create_all()
-
- # Auto-migrate missing columns on existing tables
- _auto_migrate_columns(app)
+ from app.services.migration_service import MigrationService
+ MigrationService.check_and_prepare(app)
# Initialize default settings and migrate legacy roles
from app.services.settings_service import SettingsService
@@ -199,6 +240,13 @@ def create_app(config_name=None):
# Start auto-sync scheduler for WordPress environments
_start_auto_sync_scheduler(app)
+ # Start API analytics flush thread
+ from app.middleware.api_analytics import start_analytics_flush_thread
+ start_analytics_flush_thread(app)
+
+ # Start hourly analytics aggregation and event retry threads
+ _start_api_background_threads(app)
+
# Serve frontend for root path
@app.route('/')
def serve_index():
@@ -227,72 +275,6 @@ def get_socketio():
return socketio
-def _auto_migrate_columns(app):
- """Add missing columns to existing tables (lightweight auto-migration)."""
- import logging
- from sqlalchemy import text, inspect as sa_inspect
-
- logger = logging.getLogger(__name__)
-
- # Define expected columns per table: (table, column, sql_type)
- expected_columns = [
- # wordpress_sites table
- ('wordpress_sites', 'environment_type', "VARCHAR(20) DEFAULT 'standalone'"),
- ('wordpress_sites', 'multidev_branch', 'VARCHAR(200)'),
- ('wordpress_sites', 'is_locked', 'BOOLEAN DEFAULT 0'),
- ('wordpress_sites', 'locked_by', 'VARCHAR(100)'),
- ('wordpress_sites', 'locked_reason', 'VARCHAR(200)'),
- ('wordpress_sites', 'lock_expires_at', 'DATETIME'),
- ('wordpress_sites', 'compose_project_name', 'VARCHAR(100)'),
- ('wordpress_sites', 'container_prefix', 'VARCHAR(100)'),
- ('wordpress_sites', 'resource_limits', 'TEXT'),
- ('wordpress_sites', 'basic_auth_enabled', 'BOOLEAN DEFAULT 0'),
- ('wordpress_sites', 'basic_auth_user', 'VARCHAR(100)'),
- ('wordpress_sites', 'basic_auth_password_hash', 'VARCHAR(200)'),
- ('wordpress_sites', 'health_status', "VARCHAR(20) DEFAULT 'unknown'"),
- ('wordpress_sites', 'last_health_check', 'DATETIME'),
- ('wordpress_sites', 'disk_usage_bytes', 'BIGINT DEFAULT 0'),
- ('wordpress_sites', 'disk_usage_updated_at', 'DATETIME'),
- ('wordpress_sites', 'auto_sync_schedule', 'VARCHAR(100)'),
- ('wordpress_sites', 'auto_sync_enabled', 'BOOLEAN DEFAULT 0'),
- # applications table
- ('applications', 'private_slug', 'VARCHAR(50)'),
- ('applications', 'private_url_enabled', 'BOOLEAN DEFAULT 0'),
- ('applications', 'environment_type', "VARCHAR(20) DEFAULT 'standalone'"),
- ('applications', 'linked_app_id', 'INTEGER'),
- ('applications', 'shared_config', 'TEXT'),
- ]
-
- try:
- inspector = sa_inspect(db.engine)
- existing_tables = inspector.get_table_names()
-
- # Group by table for efficient inspection
- tables_checked = {}
- applied = 0
-
- for table, column, col_type in expected_columns:
- if table not in existing_tables:
- continue
-
- if table not in tables_checked:
- tables_checked[table] = [col['name'] for col in inspector.get_columns(table)]
-
- if column not in tables_checked[table]:
- try:
- db.session.execute(text(f'ALTER TABLE {table} ADD COLUMN {column} {col_type}'))
- applied += 1
- logger.info(f'Auto-migrated: added {table}.{column}')
- except Exception as e:
- logger.warning(f'Auto-migrate failed for {table}.{column}: {e}')
-
- if applied > 0:
- db.session.commit()
- logger.info(f'Auto-migration: applied {applied} column(s)')
- except Exception as e:
- logger.warning(f'Auto-migration check failed: {e}')
-
-
_auto_sync_thread = None
@@ -365,3 +347,39 @@ def _check_auto_sync_schedules(logger):
)
except Exception as e:
logger.error(f'Auto-sync check failed for site {site.id}: {e}')
+
+
+_api_bg_thread = None
+
+
+def _start_api_background_threads(app):
+ """Start background threads for API analytics aggregation and event delivery retry."""
+ global _api_bg_thread
+ if _api_bg_thread is not None:
+ return
+
+ import threading
+ import time
+ import logging
+
+ logger = logging.getLogger(__name__)
+
+ def api_bg_loop():
+ while True:
+ try:
+ time.sleep(3600) # Run hourly
+ with app.app_context():
+ from app.services.api_analytics_service import ApiAnalyticsService
+ ApiAnalyticsService.aggregate_hourly()
+
+ from app.services.event_service import EventService
+ EventService.retry_failed()
+ except Exception as e:
+ logger.error(f'API background thread error: {e}')
+
+ _api_bg_thread = threading.Thread(
+ target=api_bg_loop,
+ daemon=True,
+ name='api-background'
+ )
+ _api_bg_thread.start()
diff --git a/backend/app/api/admin.py b/backend/app/api/admin.py
index b626e5c..c01e65e 100644
--- a/backend/app/api/admin.py
+++ b/backend/app/api/admin.py
@@ -1,12 +1,14 @@
"""Admin API endpoints for user management, settings, and audit logs."""
-from datetime import datetime
+from datetime import datetime, timedelta
from flask import Blueprint, request, jsonify
from flask_jwt_extended import get_jwt_identity
+from sqlalchemy import func
from app import db
from app.models import User, AuditLog
from app.middleware.rbac import admin_required, get_current_user
from app.services.audit_service import AuditService
from app.services.settings_service import SettingsService
+from app.services.permission_service import PermissionService
admin_bp = Blueprint('admin', __name__)
@@ -143,6 +145,15 @@ def update_user(user_id):
user.set_password(data['password'])
changes['password'] = 'changed'
+ # Update custom permissions (inline with user update)
+ if 'permissions' in data and user.role != User.ROLE_ADMIN:
+ from app.services.permission_service import PermissionService
+ perm_error = PermissionService.validate_permissions(data['permissions'])
+ if perm_error:
+ return jsonify({'error': perm_error}), 400
+ user.set_permissions(data['permissions'])
+ changes['permissions'] = 'updated'
+
# Update active status
if 'is_active' in data and data['is_active'] != user.is_active:
# Prevent deactivating self
@@ -379,3 +390,197 @@ def get_admin_stats():
},
'recent_logins': [log.to_dict() for log in recent_logins]
}), 200
+
+
+# ============================================
+# Permission Endpoints
+# ============================================
+
+@admin_bp.route('/users/
/permissions', methods=['GET'])
+@admin_required
+def get_user_permissions(user_id):
+ """Get a user's resolved permissions."""
+ perms = PermissionService.get_user_permissions(user_id)
+ if perms is None:
+ return jsonify({'error': 'User not found'}), 404
+ return jsonify({'permissions': perms}), 200
+
+
+@admin_bp.route('/users//permissions', methods=['PUT'])
+@admin_required
+def update_user_permissions(user_id):
+ """Update a user's custom permissions."""
+ data = request.get_json()
+ current_user_id = get_jwt_identity()
+
+ if not data or 'permissions' not in data:
+ return jsonify({'error': 'No permissions provided'}), 400
+
+ result = PermissionService.update_user_permissions(user_id, data['permissions'])
+ if not result['success']:
+ return jsonify({'error': result['error']}), 400
+
+ AuditService.log_user_action(
+ action=AuditLog.ACTION_USER_PERMISSIONS_UPDATE,
+ user_id=current_user_id,
+ target_user_id=user_id,
+ details={'permissions': data['permissions']}
+ )
+ db.session.commit()
+
+ return jsonify({
+ 'message': 'Permissions updated',
+ 'permissions': result['permissions']
+ }), 200
+
+
+@admin_bp.route('/users//permissions/reset', methods=['POST'])
+@admin_required
+def reset_user_permissions(user_id):
+ """Reset user permissions to role defaults."""
+ current_user_id = get_jwt_identity()
+
+ result = PermissionService.reset_to_role_defaults(user_id)
+ if not result['success']:
+ return jsonify({'error': result['error']}), 400
+
+ AuditService.log_user_action(
+ action=AuditLog.ACTION_USER_PERMISSIONS_RESET,
+ user_id=current_user_id,
+ target_user_id=user_id,
+ )
+ db.session.commit()
+
+ return jsonify({
+ 'message': 'Permissions reset to role defaults',
+ 'permissions': result['permissions']
+ }), 200
+
+
+@admin_bp.route('/permissions/templates', methods=['GET'])
+@admin_required
+def get_permission_templates():
+ """Get role template definitions and feature list."""
+ return jsonify({
+ 'features': User.PERMISSION_FEATURES,
+ 'templates': User.ROLE_PERMISSION_TEMPLATES,
+ }), 200
+
+
+# ============================================
+# Activity Dashboard Endpoints
+# ============================================
+
+@admin_bp.route('/activity/summary', methods=['GET'])
+@admin_required
+def get_activity_summary():
+ """Get activity summary: active users today, actions this week, top users."""
+ now = datetime.utcnow()
+ today_start = now.replace(hour=0, minute=0, second=0, microsecond=0)
+ week_start = today_start - timedelta(days=7)
+
+ # Active users today (distinct users with audit log entries)
+ active_today = db.session.query(
+ func.count(func.distinct(AuditLog.user_id))
+ ).filter(
+ AuditLog.created_at >= today_start,
+ AuditLog.user_id.isnot(None)
+ ).scalar() or 0
+
+ # Actions this week
+ actions_this_week = db.session.query(
+ func.count(AuditLog.id)
+ ).filter(
+ AuditLog.created_at >= week_start
+ ).scalar() or 0
+
+ # Total users
+ total_users = User.query.filter_by(is_active=True).count()
+
+ # Top 5 most active users this week
+ top_users_query = db.session.query(
+ AuditLog.user_id,
+ func.count(AuditLog.id).label('action_count')
+ ).filter(
+ AuditLog.created_at >= week_start,
+ AuditLog.user_id.isnot(None)
+ ).group_by(AuditLog.user_id).order_by(
+ func.count(AuditLog.id).desc()
+ ).limit(5).all()
+
+ top_users = []
+ for user_id, count in top_users_query:
+ user = User.query.get(user_id)
+ if user:
+ top_users.append({
+ 'user_id': user_id,
+ 'username': user.username,
+ 'action_count': count
+ })
+
+ # Daily action counts for the past 7 days
+ daily_counts = []
+ for i in range(7):
+ day_start = today_start - timedelta(days=6 - i)
+ day_end = day_start + timedelta(days=1)
+ count = db.session.query(func.count(AuditLog.id)).filter(
+ AuditLog.created_at >= day_start,
+ AuditLog.created_at < day_end
+ ).scalar() or 0
+ daily_counts.append({
+ 'date': day_start.strftime('%Y-%m-%d'),
+ 'count': count
+ })
+
+ return jsonify({
+ 'active_users_today': active_today,
+ 'actions_this_week': actions_this_week,
+ 'total_users': total_users,
+ 'top_users': top_users,
+ 'daily_counts': daily_counts,
+ }), 200
+
+
+@admin_bp.route('/activity/feed', methods=['GET'])
+@admin_required
+def get_activity_feed():
+ """Get paginated activity feed with filters."""
+ page = request.args.get('page', 1, type=int)
+ per_page = min(request.args.get('per_page', 50, type=int), 100)
+ user_id = request.args.get('user_id', type=int)
+ action = request.args.get('action')
+ start_date = request.args.get('start_date')
+ end_date = request.args.get('end_date')
+
+ kwargs = {
+ 'page': page,
+ 'per_page': per_page,
+ 'user_id': user_id,
+ 'action': action,
+ }
+
+ if start_date:
+ try:
+ kwargs['start_date'] = datetime.fromisoformat(start_date)
+ except ValueError:
+ pass
+
+ if end_date:
+ try:
+ kwargs['end_date'] = datetime.fromisoformat(end_date)
+ except ValueError:
+ pass
+
+ pagination = AuditService.get_logs(**kwargs)
+
+ return jsonify({
+ 'logs': [log.to_dict() for log in pagination.items],
+ 'pagination': {
+ 'page': pagination.page,
+ 'per_page': pagination.per_page,
+ 'total': pagination.total,
+ 'pages': pagination.pages,
+ 'has_next': pagination.has_next,
+ 'has_prev': pagination.has_prev
+ }
+ }), 200
diff --git a/backend/app/api/api_analytics.py b/backend/app/api/api_analytics.py
new file mode 100644
index 0000000..e4ff45e
--- /dev/null
+++ b/backend/app/api/api_analytics.py
@@ -0,0 +1,59 @@
+"""API analytics endpoints."""
+from flask import Blueprint, jsonify, request
+from app.middleware.rbac import admin_required, auth_required, get_current_user
+from app.services.api_analytics_service import ApiAnalyticsService
+from app.services.api_key_service import ApiKeyService
+
+api_analytics_bp = Blueprint('api_analytics', __name__)
+
+
+@api_analytics_bp.route('/overview', methods=['GET'])
+@admin_required
+def overview():
+ """Get API usage overview."""
+ period = request.args.get('period', '24h')
+ return jsonify(ApiAnalyticsService.get_overview(period))
+
+
+@api_analytics_bp.route('/endpoints', methods=['GET'])
+@admin_required
+def endpoints():
+ """Get top endpoints by usage."""
+ period = request.args.get('period', '24h')
+ limit = request.args.get('limit', 20, type=int)
+ return jsonify({'endpoints': ApiAnalyticsService.get_endpoint_stats(period, limit)})
+
+
+@api_analytics_bp.route('/errors', methods=['GET'])
+@admin_required
+def errors():
+ """Get error breakdown."""
+ period = request.args.get('period', '24h')
+ return jsonify({'errors': ApiAnalyticsService.get_error_stats(period)})
+
+
+@api_analytics_bp.route('/timeseries', methods=['GET'])
+@admin_required
+def timeseries():
+ """Get time series data for charts."""
+ period = request.args.get('period', '24h')
+ interval = request.args.get('interval', 'hour')
+ return jsonify({'data': ApiAnalyticsService.get_time_series(period, interval)})
+
+
+@api_analytics_bp.route('/keys//usage', methods=['GET'])
+@auth_required
+def key_usage(key_id):
+ """Get usage stats for a specific API key."""
+ period = request.args.get('period', '24h')
+
+ # Allow key owners to view their own usage
+ user = get_current_user()
+ api_key = ApiKeyService.get_key(key_id)
+ if not api_key:
+ return jsonify({'error': 'API key not found'}), 404
+
+ if not user.is_admin and api_key.user_id != user.id:
+ return jsonify({'error': 'Access denied'}), 403
+
+ return jsonify(ApiAnalyticsService.get_key_usage(key_id, period))
diff --git a/backend/app/api/api_keys.py b/backend/app/api/api_keys.py
new file mode 100644
index 0000000..caf1af0
--- /dev/null
+++ b/backend/app/api/api_keys.py
@@ -0,0 +1,150 @@
+"""API Key management endpoints."""
+from datetime import datetime
+from flask import Blueprint, jsonify, request
+from flask_jwt_extended import jwt_required, get_jwt_identity
+from app.middleware.rbac import get_current_user
+from app.services.api_key_service import ApiKeyService
+from app.services.audit_service import AuditService
+from app.models.audit_log import AuditLog
+
+api_keys_bp = Blueprint('api_keys', __name__)
+
+
+@api_keys_bp.route('/', methods=['GET'])
+@jwt_required()
+def list_keys():
+ """List the current user's API keys."""
+ user = get_current_user()
+ if not user or not user.is_developer:
+ return jsonify({'error': 'Developer access required'}), 403
+
+ keys = ApiKeyService.list_keys(user.id)
+ return jsonify({'api_keys': [k.to_dict() for k in keys]})
+
+
+@api_keys_bp.route('/', methods=['POST'])
+@jwt_required()
+def create_key():
+ """Create a new API key."""
+ user = get_current_user()
+ if not user or not user.is_developer:
+ return jsonify({'error': 'Developer access required'}), 403
+
+ data = request.get_json() or {}
+ name = data.get('name')
+ if not name:
+ return jsonify({'error': 'Name is required'}), 400
+
+ scopes = data.get('scopes')
+ tier = data.get('tier', 'standard')
+ expires_at = None
+ if data.get('expires_at'):
+ try:
+ expires_at = datetime.fromisoformat(data['expires_at'].replace('Z', '+00:00'))
+ except (ValueError, AttributeError):
+ return jsonify({'error': 'Invalid expires_at format'}), 400
+
+ api_key, raw_key = ApiKeyService.create_key(
+ user_id=user.id,
+ name=name,
+ scopes=scopes,
+ tier=tier,
+ expires_at=expires_at,
+ )
+
+ AuditService.log(
+ AuditLog.ACTION_API_KEY_CREATE,
+ user_id=user.id,
+ target_type='api_key',
+ target_id=api_key.id,
+ details={'name': name, 'tier': tier}
+ )
+
+ result = api_key.to_dict()
+ result['raw_key'] = raw_key # Only exposed once at creation
+ return jsonify(result), 201
+
+
+@api_keys_bp.route('/', methods=['GET'])
+@jwt_required()
+def get_key(key_id):
+ """Get API key details."""
+ user = get_current_user()
+ if not user:
+ return jsonify({'error': 'User not found'}), 404
+
+ api_key = ApiKeyService.get_key(key_id, user.id)
+ if not api_key:
+ return jsonify({'error': 'API key not found'}), 404
+
+ return jsonify(api_key.to_dict())
+
+
+@api_keys_bp.route('/', methods=['PUT'])
+@jwt_required()
+def update_key(key_id):
+ """Update API key metadata."""
+ user = get_current_user()
+ if not user:
+ return jsonify({'error': 'User not found'}), 404
+
+ data = request.get_json() or {}
+ api_key = ApiKeyService.update_key(
+ key_id=key_id,
+ user_id=user.id,
+ name=data.get('name'),
+ scopes=data.get('scopes'),
+ tier=data.get('tier'),
+ )
+ if not api_key:
+ return jsonify({'error': 'API key not found'}), 404
+
+ return jsonify(api_key.to_dict())
+
+
+@api_keys_bp.route('/', methods=['DELETE'])
+@jwt_required()
+def revoke_key(key_id):
+ """Revoke an API key."""
+ user = get_current_user()
+ if not user:
+ return jsonify({'error': 'User not found'}), 404
+
+ api_key = ApiKeyService.revoke_key(key_id, user.id)
+ if not api_key:
+ return jsonify({'error': 'API key not found'}), 404
+
+ AuditService.log(
+ AuditLog.ACTION_API_KEY_REVOKE,
+ user_id=user.id,
+ target_type='api_key',
+ target_id=key_id,
+ details={'name': api_key.name}
+ )
+
+ return jsonify({'message': 'API key revoked'})
+
+
+@api_keys_bp.route('//rotate', methods=['POST'])
+@jwt_required()
+def rotate_key(key_id):
+ """Rotate an API key (revoke + recreate with same config)."""
+ user = get_current_user()
+ if not user:
+ return jsonify({'error': 'User not found'}), 404
+
+ new_key, raw_key = ApiKeyService.rotate_key(key_id, user.id)
+ if not new_key:
+ return jsonify({'error': 'API key not found'}), 404
+
+ AuditService.log(
+ AuditLog.ACTION_API_KEY_ROTATE,
+ user_id=user.id,
+ target_type='api_key',
+ target_id=new_key.id,
+ details={'old_key_id': key_id, 'name': new_key.name}
+ )
+
+ result = new_key.to_dict()
+ result['raw_key'] = raw_key
+ return jsonify(result)
diff --git a/backend/app/api/auth.py b/backend/app/api/auth.py
index 707c6b4..adc7d7c 100644
--- a/backend/app/api/auth.py
+++ b/backend/app/api/auth.py
@@ -21,9 +21,26 @@ def get_setup_status():
needs_setup = SettingsService.needs_setup()
registration_enabled = SettingsService.is_registration_enabled()
+ # SSO info for login page
+ from app.services import sso_service
+ sso_providers = sso_service.get_enabled_providers()
+ password_login_enabled = sso_service.is_password_login_allowed()
+
+ # Migration status
+ from app.services.migration_service import MigrationService
+ migration_status = MigrationService.get_status()
+
return jsonify({
'needs_setup': needs_setup,
- 'registration_enabled': registration_enabled
+ 'registration_enabled': registration_enabled,
+ 'sso_providers': sso_providers,
+ 'password_login_enabled': password_login_enabled,
+ 'needs_migration': migration_status['needs_migration'],
+ 'migration_info': {
+ 'pending_count': migration_status['pending_count'],
+ 'current_revision': migration_status['current_revision'],
+ 'head_revision': migration_status['head_revision'],
+ },
}), 200
@@ -35,9 +52,19 @@ def register():
if not data:
return jsonify({'error': 'No data provided'}), 400
+ invite_token = data.get('invite_token')
+ invitation = None
+
+ # Validate invitation if provided
+ if invite_token:
+ from app.services.invitation_service import InvitationService
+ invitation = InvitationService.validate_token(invite_token)
+ if not invitation:
+ return jsonify({'error': 'Invalid or expired invitation'}), 400
+
# Check if registration is allowed
is_first_user = User.query.count() == 0
- if not is_first_user and not SettingsService.is_registration_enabled():
+ if not is_first_user and not invitation and not SettingsService.is_registration_enabled():
return jsonify({'error': 'Registration is disabled'}), 403
email = data.get('email')
@@ -56,22 +83,52 @@ def register():
if len(password) < 8:
return jsonify({'error': 'Password must be at least 8 characters'}), 400
+ # Determine role and permissions from invitation or defaults
+ if is_first_user:
+ role = User.ROLE_ADMIN
+ elif invitation:
+ role = invitation.role
+ else:
+ role = User.ROLE_DEVELOPER
+
user = User(
email=email,
username=username,
- role=User.ROLE_ADMIN if is_first_user else User.ROLE_DEVELOPER
+ role=role
)
user.set_password(password)
+ # Apply custom permissions from invitation
+ if invitation and invitation.get_permissions():
+ user.set_permissions(invitation.get_permissions())
+
db.session.add(user)
db.session.commit()
+ # Mark invitation accepted
+ if invitation:
+ from app.services.invitation_service import InvitationService
+ InvitationService.accept_invitation(invite_token, user.id)
+
+ AuditService.log(
+ action=AuditLog.ACTION_INVITATION_ACCEPT,
+ user_id=user.id,
+ target_type='invitation',
+ target_id=invitation.id,
+ details={'role': role}
+ )
+
# Log the user creation
AuditService.log_user_action(
action=AuditLog.ACTION_USER_CREATE,
user_id=user.id,
target_user_id=user.id,
- details={'username': username, 'role': user.role, 'self_registration': True}
+ details={
+ 'username': username,
+ 'role': user.role,
+ 'self_registration': True,
+ 'via_invitation': invitation is not None
+ }
)
db.session.commit()
@@ -123,6 +180,11 @@ def complete_onboarding():
@auth_bp.route('/login', methods=['POST'])
@limiter.limit("5 per minute")
def login():
+ # Check if password login is disabled (SSO-only mode)
+ from app.services import sso_service
+ if not sso_service.is_password_login_allowed():
+ return jsonify({'error': 'Password login is disabled. Please use SSO.'}), 403
+
data = request.get_json()
if not data:
diff --git a/backend/app/api/cron.py b/backend/app/api/cron.py
index 2e827b3..94b361a 100644
--- a/backend/app/api/cron.py
+++ b/backend/app/api/cron.py
@@ -57,6 +57,25 @@ def create_job():
return jsonify(result), 400
+@cron_bp.route('/jobs/', methods=['PUT'])
+@admin_required
+def update_job(job_id):
+ """Update a cron job."""
+ data = request.get_json()
+
+ result = CronService.update_job(
+ job_id=job_id,
+ name=data.get('name'),
+ command=data.get('command'),
+ schedule=data.get('schedule'),
+ description=data.get('description')
+ )
+
+ if result.get('success'):
+ return jsonify(result)
+ return jsonify(result), 400
+
+
@cron_bp.route('/jobs/', methods=['DELETE'])
@admin_required
def delete_job(job_id):
diff --git a/backend/app/api/docs.py b/backend/app/api/docs.py
new file mode 100644
index 0000000..86e85ee
--- /dev/null
+++ b/backend/app/api/docs.py
@@ -0,0 +1,47 @@
+"""OpenAPI/Swagger documentation endpoints."""
+from flask import Blueprint, jsonify
+
+docs_bp = Blueprint('docs', __name__)
+
+
+@docs_bp.route('/', methods=['GET'])
+def swagger_ui():
+ """Serve Swagger UI."""
+ html = '''
+
+
+
+ ServerKit API Documentation
+
+
+
+
+
+
+
+
+'''
+ return html, 200, {'Content-Type': 'text/html'}
+
+
+@docs_bp.route('/openapi.json', methods=['GET'])
+def openapi_spec():
+ """Return the generated OpenAPI specification."""
+ from app.services.openapi_service import OpenAPIService
+ spec = OpenAPIService.generate_spec()
+ return jsonify(spec)
diff --git a/backend/app/api/email.py b/backend/app/api/email.py
new file mode 100644
index 0000000..57b9e7d
--- /dev/null
+++ b/backend/app/api/email.py
@@ -0,0 +1,427 @@
+"""Email Server API endpoints for managing mail services, domains, accounts, and DNS."""
+from flask import Blueprint, request, jsonify
+
+from ..middleware.rbac import admin_required, viewer_required
+from ..services.email_service import EmailService
+from ..services.dns_provider_service import DNSProviderService
+from ..services.spamassassin_service import SpamAssassinService
+from ..services.roundcube_service import RoundcubeService
+from ..services.postfix_service import PostfixService
+
+email_bp = Blueprint('email', __name__)
+
+
+# ── Status & Installation ──
+
+@email_bp.route('/status', methods=['GET'])
+@viewer_required
+def get_status():
+ """Get aggregate email server status."""
+ roundcube = RoundcubeService.get_status()
+ status = EmailService.get_status()
+ status['roundcube'] = roundcube
+ return jsonify(status), 200
+
+
+@email_bp.route('/install', methods=['POST'])
+@admin_required
+def install():
+ """Install and configure all email components."""
+ data = request.get_json() or {}
+ hostname = data.get('hostname')
+ result = EmailService.install_all(hostname)
+ return jsonify(result), 200 if result.get('success') else 500
+
+
+@email_bp.route('/service//', methods=['POST'])
+@admin_required
+def control_service(component, action):
+ """Start/stop/restart an email component."""
+ result = EmailService.control_service(component, action)
+ return jsonify(result), 200 if result.get('success') else 400
+
+
+# ── Domains ──
+
+@email_bp.route('/domains', methods=['GET'])
+@viewer_required
+def list_domains():
+ """List all email domains."""
+ domains = EmailService.get_domains()
+ return jsonify({'domains': domains}), 200
+
+
+@email_bp.route('/domains', methods=['POST'])
+@admin_required
+def add_domain():
+ """Add an email domain."""
+ data = request.get_json()
+ if not data or not data.get('name'):
+ return jsonify({'success': False, 'error': 'Domain name is required'}), 400
+ result = EmailService.add_domain(
+ data['name'],
+ dns_provider_id=data.get('dns_provider_id'),
+ dns_zone_id=data.get('dns_zone_id'),
+ )
+ return jsonify(result), 201 if result.get('success') else 400
+
+
+@email_bp.route('/domains/', methods=['GET'])
+@viewer_required
+def get_domain(domain_id):
+ """Get domain details."""
+ result = EmailService.get_domain(domain_id)
+ if result.get('success'):
+ return jsonify(result), 200
+ return jsonify(result), 404
+
+
+@email_bp.route('/domains/', methods=['DELETE'])
+@admin_required
+def remove_domain(domain_id):
+ """Remove an email domain."""
+ result = EmailService.remove_domain(domain_id)
+ return jsonify(result), 200 if result.get('success') else 400
+
+
+@email_bp.route('/domains//verify-dns', methods=['POST'])
+@viewer_required
+def verify_dns(domain_id):
+ """Verify DNS records for a domain."""
+ result = EmailService.verify_dns(domain_id)
+ return jsonify(result), 200
+
+
+@email_bp.route('/domains//deploy-dns', methods=['POST'])
+@admin_required
+def deploy_dns(domain_id):
+ """Deploy DNS records via provider API."""
+ from app.models.email import EmailDomain
+ domain = EmailDomain.query.get(domain_id)
+ if not domain:
+ return jsonify({'success': False, 'error': 'Domain not found'}), 404
+ if not domain.dns_provider_id or not domain.dns_zone_id:
+ return jsonify({'success': False, 'error': 'No DNS provider configured for this domain'}), 400
+ result = DNSProviderService.deploy_email_records(
+ domain.dns_provider_id,
+ domain.dns_zone_id,
+ domain.name,
+ domain.dkim_selector or 'default',
+ domain.dkim_public_key or '',
+ )
+ return jsonify(result), 200 if result.get('success') else 400
+
+
+# ── Accounts ──
+
+@email_bp.route('/domains//accounts', methods=['GET'])
+@viewer_required
+def list_accounts(domain_id):
+ """List email accounts for a domain."""
+ accounts = EmailService.get_accounts(domain_id)
+ return jsonify({'accounts': accounts}), 200
+
+
+@email_bp.route('/domains//accounts', methods=['POST'])
+@admin_required
+def create_account(domain_id):
+ """Create an email account."""
+ data = request.get_json()
+ if not data:
+ return jsonify({'success': False, 'error': 'Request body required'}), 400
+ username = data.get('username')
+ password = data.get('password')
+ if not username or not password:
+ return jsonify({'success': False, 'error': 'Username and password are required'}), 400
+ result = EmailService.add_account(
+ domain_id,
+ username,
+ password,
+ quota_mb=data.get('quota_mb', 1024),
+ )
+ return jsonify(result), 201 if result.get('success') else 400
+
+
+@email_bp.route('/accounts/', methods=['GET'])
+@viewer_required
+def get_account(account_id):
+ """Get account details."""
+ from app.models.email import EmailAccount
+ account = EmailAccount.query.get(account_id)
+ if not account:
+ return jsonify({'success': False, 'error': 'Account not found'}), 404
+ return jsonify({'success': True, 'account': account.to_dict()}), 200
+
+
+@email_bp.route('/accounts/', methods=['PUT'])
+@admin_required
+def update_account(account_id):
+ """Update account settings."""
+ data = request.get_json() or {}
+ result = EmailService.update_account(
+ account_id,
+ quota_mb=data.get('quota_mb'),
+ is_active=data.get('is_active'),
+ )
+ return jsonify(result), 200 if result.get('success') else 400
+
+
+@email_bp.route('/accounts/', methods=['DELETE'])
+@admin_required
+def delete_account(account_id):
+ """Delete an email account."""
+ result = EmailService.delete_account(account_id)
+ return jsonify(result), 200 if result.get('success') else 400
+
+
+@email_bp.route('/accounts//password', methods=['POST'])
+@admin_required
+def change_password(account_id):
+ """Change account password."""
+ data = request.get_json()
+ if not data or not data.get('password'):
+ return jsonify({'success': False, 'error': 'New password is required'}), 400
+ result = EmailService.change_password(account_id, data['password'])
+ return jsonify(result), 200 if result.get('success') else 400
+
+
+# ── Aliases ──
+
+@email_bp.route('/domains//aliases', methods=['GET'])
+@viewer_required
+def list_aliases(domain_id):
+ """List email aliases for a domain."""
+ aliases = EmailService.get_aliases(domain_id)
+ return jsonify({'aliases': aliases}), 200
+
+
+@email_bp.route('/domains//aliases', methods=['POST'])
+@admin_required
+def create_alias(domain_id):
+ """Create an email alias."""
+ data = request.get_json()
+ if not data or not data.get('source') or not data.get('destination'):
+ return jsonify({'success': False, 'error': 'Source and destination are required'}), 400
+ result = EmailService.add_alias(domain_id, data['source'], data['destination'])
+ return jsonify(result), 201 if result.get('success') else 400
+
+
+@email_bp.route('/aliases/', methods=['DELETE'])
+@admin_required
+def delete_alias(alias_id):
+ """Delete an email alias."""
+ result = EmailService.remove_alias(alias_id)
+ return jsonify(result), 200 if result.get('success') else 400
+
+
+# ── Forwarding Rules ──
+
+@email_bp.route('/accounts//forwarding', methods=['GET'])
+@viewer_required
+def list_forwarding(account_id):
+ """List forwarding rules for an account."""
+ rules = EmailService.get_forwarding(account_id)
+ return jsonify({'rules': rules}), 200
+
+
+@email_bp.route('/accounts//forwarding', methods=['POST'])
+@admin_required
+def create_forwarding(account_id):
+ """Create a forwarding rule."""
+ data = request.get_json()
+ if not data or not data.get('destination'):
+ return jsonify({'success': False, 'error': 'Destination is required'}), 400
+ result = EmailService.add_forwarding(
+ account_id,
+ data['destination'],
+ keep_copy=data.get('keep_copy', True),
+ )
+ return jsonify(result), 201 if result.get('success') else 400
+
+
+@email_bp.route('/forwarding/', methods=['PUT'])
+@admin_required
+def update_forwarding(rule_id):
+ """Update a forwarding rule."""
+ data = request.get_json() or {}
+ result = EmailService.update_forwarding(
+ rule_id,
+ destination=data.get('destination'),
+ keep_copy=data.get('keep_copy'),
+ is_active=data.get('is_active'),
+ )
+ return jsonify(result), 200 if result.get('success') else 400
+
+
+@email_bp.route('/forwarding/', methods=['DELETE'])
+@admin_required
+def delete_forwarding(rule_id):
+ """Delete a forwarding rule."""
+ result = EmailService.remove_forwarding(rule_id)
+ return jsonify(result), 200 if result.get('success') else 400
+
+
+# ── DNS Providers ──
+
+@email_bp.route('/dns-providers', methods=['GET'])
+@viewer_required
+def list_dns_providers():
+ """List configured DNS providers."""
+ providers = DNSProviderService.list_providers()
+ return jsonify({'providers': providers}), 200
+
+
+@email_bp.route('/dns-providers', methods=['POST'])
+@admin_required
+def add_dns_provider():
+ """Add a DNS provider."""
+ data = request.get_json()
+ if not data or not data.get('name') or not data.get('provider') or not data.get('api_key'):
+ return jsonify({'success': False, 'error': 'Name, provider, and api_key are required'}), 400
+ result = DNSProviderService.add_provider(
+ name=data['name'],
+ provider=data['provider'],
+ api_key=data['api_key'],
+ api_secret=data.get('api_secret'),
+ api_email=data.get('api_email'),
+ is_default=data.get('is_default', False),
+ )
+ return jsonify(result), 201 if result.get('success') else 400
+
+
+@email_bp.route('/dns-providers/', methods=['DELETE'])
+@admin_required
+def remove_dns_provider(provider_id):
+ """Remove a DNS provider."""
+ result = DNSProviderService.remove_provider(provider_id)
+ return jsonify(result), 200 if result.get('success') else 400
+
+
+@email_bp.route('/dns-providers//test', methods=['POST'])
+@admin_required
+def test_dns_provider(provider_id):
+ """Test DNS provider connection."""
+ result = DNSProviderService.test_connection(provider_id)
+ return jsonify(result), 200 if result.get('success') else 400
+
+
+@email_bp.route('/dns-providers//zones', methods=['GET'])
+@viewer_required
+def list_dns_zones(provider_id):
+ """List DNS zones from a provider."""
+ result = DNSProviderService.list_zones(provider_id)
+ return jsonify(result), 200 if result.get('success') else 400
+
+
+# ── SpamAssassin Config ──
+
+@email_bp.route('/spam/config', methods=['GET'])
+@viewer_required
+def get_spam_config():
+ """Get SpamAssassin configuration."""
+ result = SpamAssassinService.get_config()
+ return jsonify(result), 200
+
+
+@email_bp.route('/spam/config', methods=['PUT'])
+@admin_required
+def update_spam_config():
+ """Update SpamAssassin configuration."""
+ data = request.get_json()
+ if not data:
+ return jsonify({'success': False, 'error': 'Request body required'}), 400
+ result = SpamAssassinService.configure(data)
+ if result.get('success'):
+ SpamAssassinService.reload()
+ return jsonify(result), 200 if result.get('success') else 400
+
+
+@email_bp.route('/spam/update-rules', methods=['POST'])
+@admin_required
+def update_spam_rules():
+ """Update SpamAssassin rules."""
+ result = SpamAssassinService.update_rules()
+ return jsonify(result), 200 if result.get('success') else 400
+
+
+# ── Roundcube Webmail ──
+
+@email_bp.route('/webmail/status', methods=['GET'])
+@viewer_required
+def webmail_status():
+ """Get Roundcube webmail status."""
+ result = RoundcubeService.get_status()
+ return jsonify(result), 200
+
+
+@email_bp.route('/webmail/install', methods=['POST'])
+@admin_required
+def webmail_install():
+ """Install Roundcube webmail."""
+ data = request.get_json() or {}
+ result = RoundcubeService.install(
+ imap_host=data.get('imap_host', 'host.docker.internal'),
+ smtp_host=data.get('smtp_host', 'host.docker.internal'),
+ )
+ return jsonify(result), 200 if result.get('success') else 400
+
+
+@email_bp.route('/webmail/service/', methods=['POST'])
+@admin_required
+def webmail_control(action):
+ """Start/stop/restart Roundcube."""
+ actions = {
+ 'start': RoundcubeService.start,
+ 'stop': RoundcubeService.stop,
+ 'restart': RoundcubeService.restart,
+ }
+ if action not in actions:
+ return jsonify({'success': False, 'error': 'Invalid action'}), 400
+ result = actions[action]()
+ return jsonify(result), 200 if result.get('success') else 400
+
+
+@email_bp.route('/webmail/configure-proxy', methods=['POST'])
+@admin_required
+def webmail_configure_proxy():
+ """Configure Nginx reverse proxy for Roundcube."""
+ data = request.get_json()
+ if not data or not data.get('domain'):
+ return jsonify({'success': False, 'error': 'Domain is required'}), 400
+ result = RoundcubeService.configure_nginx_proxy(data['domain'])
+ return jsonify(result), 200 if result.get('success') else 400
+
+
+# ── Mail Queue & Logs ──
+
+@email_bp.route('/queue', methods=['GET'])
+@viewer_required
+def get_queue():
+ """Get Postfix mail queue."""
+ result = PostfixService.get_queue()
+ return jsonify(result), 200
+
+
+@email_bp.route('/queue/flush', methods=['POST'])
+@admin_required
+def flush_queue():
+ """Flush the mail queue."""
+ result = PostfixService.flush_queue()
+ return jsonify(result), 200 if result.get('success') else 400
+
+
+@email_bp.route('/queue/', methods=['DELETE'])
+@admin_required
+def delete_queue_item(queue_id):
+ """Delete a message from the queue."""
+ result = PostfixService.delete_from_queue(queue_id)
+ return jsonify(result), 200 if result.get('success') else 400
+
+
+@email_bp.route('/logs', methods=['GET'])
+@viewer_required
+def get_logs():
+ """Get mail logs."""
+ lines = request.args.get('lines', 100, type=int)
+ result = PostfixService.get_logs(lines)
+ return jsonify(result), 200
diff --git a/backend/app/api/event_subscriptions.py b/backend/app/api/event_subscriptions.py
new file mode 100644
index 0000000..c510e87
--- /dev/null
+++ b/backend/app/api/event_subscriptions.py
@@ -0,0 +1,228 @@
+"""Webhook event subscription endpoints."""
+from flask import Blueprint, jsonify, request
+from flask_jwt_extended import jwt_required, get_jwt_identity
+from app.middleware.rbac import get_current_user, auth_required
+from app.models.event_subscription import EventSubscription
+from app.services.event_service import EventService
+from app import db
+
+event_subscriptions_bp = Blueprint('event_subscriptions', __name__)
+
+
+@event_subscriptions_bp.route('/', methods=['GET'])
+@jwt_required()
+def list_subscriptions():
+ """List webhook subscriptions."""
+ user = get_current_user()
+ if not user or not user.is_developer:
+ return jsonify({'error': 'Developer access required'}), 403
+
+ if user.is_admin:
+ subs = EventSubscription.query.order_by(EventSubscription.created_at.desc()).all()
+ else:
+ subs = EventSubscription.query.filter_by(user_id=user.id).order_by(
+ EventSubscription.created_at.desc()
+ ).all()
+
+ return jsonify({'subscriptions': [s.to_dict() for s in subs]})
+
+
+@event_subscriptions_bp.route('/', methods=['POST'])
+@jwt_required()
+def create_subscription():
+ """Create a new webhook subscription."""
+ user = get_current_user()
+ if not user or not user.is_developer:
+ return jsonify({'error': 'Developer access required'}), 403
+
+ data = request.get_json() or {}
+ name = data.get('name')
+ url = data.get('url')
+ events = data.get('events', [])
+
+ if not name or not url:
+ return jsonify({'error': 'Name and URL are required'}), 400
+ if not events:
+ return jsonify({'error': 'At least one event type is required'}), 400
+
+ sub = EventSubscription(
+ user_id=user.id,
+ name=name,
+ url=url,
+ retry_count=data.get('retry_count', 3),
+ timeout_seconds=data.get('timeout_seconds', 10),
+ )
+ sub.set_events(events)
+
+ if data.get('generate_secret', True):
+ sub.secret = EventSubscription.generate_secret()
+
+ custom_headers = data.get('headers')
+ if custom_headers:
+ sub.set_headers(custom_headers)
+
+ db.session.add(sub)
+ db.session.commit()
+
+ result = sub.to_dict()
+ # Expose secret once at creation
+ if sub.secret:
+ result['secret'] = sub.secret
+
+ return jsonify(result), 201
+
+
+@event_subscriptions_bp.route('/events', methods=['GET'])
+@auth_required()
+def list_events():
+ """List available event types."""
+ return jsonify({'events': EventService.get_available_events()})
+
+
+@event_subscriptions_bp.route('/', methods=['GET'])
+@jwt_required()
+def get_subscription(sub_id):
+ """Get subscription details."""
+ user = get_current_user()
+ if not user:
+ return jsonify({'error': 'User not found'}), 404
+
+ sub = EventSubscription.query.get(sub_id)
+ if not sub:
+ return jsonify({'error': 'Subscription not found'}), 404
+ if not user.is_admin and sub.user_id != user.id:
+ return jsonify({'error': 'Access denied'}), 403
+
+ return jsonify(sub.to_dict())
+
+
+@event_subscriptions_bp.route('/', methods=['PUT'])
+@jwt_required()
+def update_subscription(sub_id):
+ """Update a webhook subscription."""
+ user = get_current_user()
+ if not user:
+ return jsonify({'error': 'User not found'}), 404
+
+ sub = EventSubscription.query.get(sub_id)
+ if not sub:
+ return jsonify({'error': 'Subscription not found'}), 404
+ if not user.is_admin and sub.user_id != user.id:
+ return jsonify({'error': 'Access denied'}), 403
+
+ data = request.get_json() or {}
+
+ if 'name' in data:
+ sub.name = data['name']
+ if 'url' in data:
+ sub.url = data['url']
+ if 'events' in data:
+ sub.set_events(data['events'])
+ if 'is_active' in data:
+ sub.is_active = data['is_active']
+ if 'headers' in data:
+ sub.set_headers(data['headers'])
+ if 'retry_count' in data:
+ sub.retry_count = data['retry_count']
+ if 'timeout_seconds' in data:
+ sub.timeout_seconds = data['timeout_seconds']
+
+ db.session.commit()
+ return jsonify(sub.to_dict())
+
+
+@event_subscriptions_bp.route('/', methods=['DELETE'])
+@jwt_required()
+def delete_subscription(sub_id):
+ """Delete a webhook subscription."""
+ user = get_current_user()
+ if not user:
+ return jsonify({'error': 'User not found'}), 404
+
+ sub = EventSubscription.query.get(sub_id)
+ if not sub:
+ return jsonify({'error': 'Subscription not found'}), 404
+ if not user.is_admin and sub.user_id != user.id:
+ return jsonify({'error': 'Access denied'}), 403
+
+ db.session.delete(sub)
+ db.session.commit()
+ return jsonify({'message': 'Subscription deleted'})
+
+
+@event_subscriptions_bp.route('//test', methods=['POST'])
+@jwt_required()
+def test_subscription(sub_id):
+ """Send a test event to a subscription."""
+ user = get_current_user()
+ if not user:
+ return jsonify({'error': 'User not found'}), 404
+
+ sub = EventSubscription.query.get(sub_id)
+ if not sub:
+ return jsonify({'error': 'Subscription not found'}), 404
+ if not user.is_admin and sub.user_id != user.id:
+ return jsonify({'error': 'Access denied'}), 403
+
+ delivery = EventService.send_test(sub_id)
+ if not delivery:
+ return jsonify({'error': 'Failed to send test'}), 500
+
+ return jsonify(delivery.to_dict())
+
+
+@event_subscriptions_bp.route('//deliveries', methods=['GET'])
+@jwt_required()
+def list_deliveries(sub_id):
+ """Get delivery history for a subscription."""
+ user = get_current_user()
+ if not user:
+ return jsonify({'error': 'User not found'}), 404
+
+ sub = EventSubscription.query.get(sub_id)
+ if not sub:
+ return jsonify({'error': 'Subscription not found'}), 404
+ if not user.is_admin and sub.user_id != user.id:
+ return jsonify({'error': 'Access denied'}), 403
+
+ page = request.args.get('page', 1, type=int)
+ per_page = request.args.get('per_page', 50, type=int)
+
+ pagination = EventService.get_deliveries(sub_id, page, per_page)
+ return jsonify({
+ 'deliveries': [d.to_dict() for d in pagination.items],
+ 'total': pagination.total,
+ 'page': page,
+ 'per_page': per_page,
+ 'pages': pagination.pages,
+ })
+
+
+@event_subscriptions_bp.route('//deliveries//retry', methods=['POST'])
+@jwt_required()
+def retry_delivery(sub_id, delivery_id):
+ """Retry a failed delivery."""
+ user = get_current_user()
+ if not user:
+ return jsonify({'error': 'User not found'}), 404
+
+ sub = EventSubscription.query.get(sub_id)
+ if not sub:
+ return jsonify({'error': 'Subscription not found'}), 404
+ if not user.is_admin and sub.user_id != user.id:
+ return jsonify({'error': 'Access denied'}), 403
+
+ from app.models.event_subscription import EventDelivery
+ delivery = EventDelivery.query.filter_by(id=delivery_id, subscription_id=sub_id).first()
+ if not delivery:
+ return jsonify({'error': 'Delivery not found'}), 404
+
+ delivery.status = EventDelivery.STATUS_PENDING
+ delivery.next_retry_at = None
+ db.session.commit()
+
+ EventService.deliver(delivery_id)
+
+ # Refresh
+ delivery = EventDelivery.query.get(delivery_id)
+ return jsonify(delivery.to_dict())
diff --git a/backend/app/api/invitations.py b/backend/app/api/invitations.py
new file mode 100644
index 0000000..9d09fe9
--- /dev/null
+++ b/backend/app/api/invitations.py
@@ -0,0 +1,140 @@
+"""Invitation API endpoints for team invitations."""
+from flask import Blueprint, request, jsonify
+from flask_jwt_extended import get_jwt_identity
+from app import db
+from app.models import AuditLog
+from app.middleware.rbac import admin_required
+from app.services.invitation_service import InvitationService
+from app.services.audit_service import AuditService
+
+invitations_bp = Blueprint('invitations', __name__)
+
+
+@invitations_bp.route('/', methods=['GET'])
+@admin_required
+def list_invitations():
+ """List all invitations, optionally filtered by status."""
+ status = request.args.get('status')
+ invitations = InvitationService.list_invitations(status=status)
+ return jsonify({
+ 'invitations': [inv.to_dict() for inv in invitations]
+ }), 200
+
+
+@invitations_bp.route('/', methods=['POST'])
+@admin_required
+def create_invitation():
+ """Create a new invitation."""
+ data = request.get_json()
+ current_user_id = get_jwt_identity()
+
+ if not data:
+ return jsonify({'error': 'No data provided'}), 400
+
+ email = data.get('email') # Optional
+ role = data.get('role', 'developer')
+ permissions = data.get('permissions')
+ expires_in_days = data.get('expires_in_days', 7)
+
+ result = InvitationService.create_invitation(
+ email=email,
+ role=role,
+ permissions=permissions,
+ invited_by=current_user_id,
+ expires_in_days=expires_in_days
+ )
+
+ if not result['success']:
+ return jsonify({'error': result['error']}), 400
+
+ invitation = result['invitation']
+
+ # Build invite URL
+ base_url = request.host_url.rstrip('/')
+ invite_url = f"{base_url}/register?invite={invitation.token}"
+
+ # Try to send email if email provided
+ email_sent = False
+ email_error = None
+ if email:
+ email_result = InvitationService.send_invitation_email(invitation, base_url)
+ email_sent = email_result['success']
+ if not email_sent:
+ email_error = email_result.get('error')
+
+ # Audit log
+ AuditService.log(
+ action=AuditLog.ACTION_INVITATION_CREATE,
+ user_id=current_user_id,
+ target_type='invitation',
+ target_id=invitation.id,
+ details={'email': email, 'role': role, 'email_sent': email_sent}
+ )
+ db.session.commit()
+
+ return jsonify({
+ 'message': 'Invitation created successfully',
+ 'invitation': invitation.to_dict(),
+ 'invite_url': invite_url,
+ 'email_sent': email_sent,
+ 'email_error': email_error,
+ }), 201
+
+
+@invitations_bp.route('/', methods=['DELETE'])
+@admin_required
+def revoke_invitation(invitation_id):
+ """Revoke a pending invitation."""
+ current_user_id = get_jwt_identity()
+
+ result = InvitationService.revoke_invitation(invitation_id)
+ if not result['success']:
+ return jsonify({'error': result['error']}), 400
+
+ AuditService.log(
+ action=AuditLog.ACTION_INVITATION_REVOKE,
+ user_id=current_user_id,
+ target_type='invitation',
+ target_id=invitation_id,
+ )
+ db.session.commit()
+
+ return jsonify({'message': 'Invitation revoked'}), 200
+
+
+@invitations_bp.route('/resend/', methods=['POST'])
+@admin_required
+def resend_invitation(invitation_id):
+ """Resend invitation email."""
+ from app.models import Invitation
+ invitation = Invitation.query.get(invitation_id)
+ if not invitation:
+ return jsonify({'error': 'Invitation not found'}), 404
+
+ if invitation.status != Invitation.STATUS_PENDING:
+ return jsonify({'error': 'Can only resend pending invitations'}), 400
+
+ if not invitation.email:
+ return jsonify({'error': 'Invitation has no email address'}), 400
+
+ base_url = request.host_url.rstrip('/')
+ result = InvitationService.send_invitation_email(invitation, base_url)
+
+ if not result['success']:
+ return jsonify({'error': result.get('error', 'Failed to send email')}), 500
+
+ return jsonify({'message': 'Invitation email resent'}), 200
+
+
+@invitations_bp.route('/validate/', methods=['GET'])
+def validate_invitation(token):
+ """Validate an invite token (public endpoint for registration page)."""
+ invitation = InvitationService.validate_token(token)
+ if not invitation:
+ return jsonify({'error': 'Invalid or expired invitation'}), 404
+
+ return jsonify({
+ 'valid': True,
+ 'email': invitation.email,
+ 'role': invitation.role,
+ }), 200
diff --git a/backend/app/api/migrations.py b/backend/app/api/migrations.py
new file mode 100644
index 0000000..a5aea12
--- /dev/null
+++ b/backend/app/api/migrations.py
@@ -0,0 +1,54 @@
+from flask import Blueprint, jsonify, current_app
+from flask_jwt_extended import jwt_required, get_jwt_identity
+
+from app.models import User
+from app.services.migration_service import MigrationService
+
+migrations_bp = Blueprint('migrations', __name__)
+
+
+@migrations_bp.route('/status', methods=['GET'])
+def get_migration_status():
+ """Check if migrations are pending. No auth required (called before login)."""
+ status = MigrationService.get_status()
+ return jsonify(status), 200
+
+
+@migrations_bp.route('/backup', methods=['POST'])
+@jwt_required()
+def create_backup():
+ """Create a database backup before applying migrations. Admin only."""
+ user = User.query.get(get_jwt_identity())
+ if not user or user.role != User.ROLE_ADMIN:
+ return jsonify({'error': 'Admin access required'}), 403
+
+ result = MigrationService.create_backup(current_app)
+ if result['success']:
+ return jsonify(result), 200
+ return jsonify(result), 500
+
+
+@migrations_bp.route('/apply', methods=['POST'])
+@jwt_required()
+def apply_migrations():
+ """Apply all pending migrations. Admin only."""
+ user = User.query.get(get_jwt_identity())
+ if not user or user.role != User.ROLE_ADMIN:
+ return jsonify({'error': 'Admin access required'}), 403
+
+ result = MigrationService.apply_migrations(current_app)
+ if result['success']:
+ return jsonify(result), 200
+ return jsonify(result), 500
+
+
+@migrations_bp.route('/history', methods=['GET'])
+@jwt_required()
+def get_migration_history():
+ """Return all migration revisions. Admin only."""
+ user = User.query.get(get_jwt_identity())
+ if not user or user.role != User.ROLE_ADMIN:
+ return jsonify({'error': 'Admin access required'}), 403
+
+ history = MigrationService.get_migration_history(current_app)
+ return jsonify({'revisions': history}), 200
diff --git a/backend/app/api/sso.py b/backend/app/api/sso.py
new file mode 100644
index 0000000..2f58448
--- /dev/null
+++ b/backend/app/api/sso.py
@@ -0,0 +1,328 @@
+"""SSO / OAuth API blueprint."""
+from datetime import datetime
+from flask import Blueprint, request, jsonify, session
+from flask_jwt_extended import (
+ create_access_token, create_refresh_token, jwt_required, get_jwt_identity
+)
+from app import db
+from app.models import User, AuditLog
+from app.models.oauth_identity import OAuthIdentity
+from app.services import sso_service
+from app.services.settings_service import SettingsService
+from app.services.audit_service import AuditService
+
+sso_bp = Blueprint('sso', __name__)
+
+VALID_PROVIDERS = ('google', 'github', 'oidc', 'saml')
+
+
+# ------------------------------------------------------------------
+# Public endpoints (login flow)
+# ------------------------------------------------------------------
+
+@sso_bp.route('/providers', methods=['GET'])
+def list_providers():
+ """List enabled SSO providers + whether password login is available."""
+ return jsonify({
+ 'providers': sso_service.get_enabled_providers(),
+ 'password_login_enabled': sso_service.is_password_login_allowed(),
+ }), 200
+
+
+@sso_bp.route('/authorize/', methods=['GET'])
+def authorize(provider):
+ """Generate OAuth authorize URL (state + PKCE)."""
+ if provider not in VALID_PROVIDERS or provider == 'saml':
+ return jsonify({'error': f'Invalid OAuth provider: {provider}'}), 400
+
+ redirect_uri = request.args.get('redirect_uri', '')
+ if not redirect_uri:
+ return jsonify({'error': 'redirect_uri is required'}), 400
+
+ try:
+ auth_url, state = sso_service.generate_auth_url(provider, redirect_uri)
+ return jsonify({'auth_url': auth_url, 'state': state}), 200
+ except Exception as e:
+ return jsonify({'error': str(e)}), 400
+
+
+@sso_bp.route('/callback/', methods=['POST'])
+def callback(provider):
+ """Exchange authorization code for tokens, find/create user, return JWT."""
+ if provider not in VALID_PROVIDERS or provider == 'saml':
+ return jsonify({'error': f'Invalid OAuth provider: {provider}'}), 400
+
+ data = request.get_json() or {}
+ code = data.get('code', '')
+ state = data.get('state', '')
+ redirect_uri = data.get('redirect_uri', '')
+
+ if not code or not state:
+ return jsonify({'error': 'code and state are required'}), 400
+
+ try:
+ profile = sso_service.handle_oauth_callback(provider, code, state, redirect_uri)
+ user, is_new = sso_service.find_or_create_user(provider, profile)
+ except ValueError as e:
+ AuditLog.log(
+ action=AuditLog.ACTION_SSO_LOGIN_FAILED,
+ details={'provider': provider, 'error': str(e)},
+ ip_address=request.remote_addr,
+ )
+ db.session.commit()
+ return jsonify({'error': str(e)}), 403
+ except Exception as e:
+ AuditLog.log(
+ action=AuditLog.ACTION_SSO_LOGIN_FAILED,
+ details={'provider': provider, 'error': str(e)},
+ ip_address=request.remote_addr,
+ )
+ db.session.commit()
+ return jsonify({'error': 'SSO authentication failed'}), 500
+
+ return _complete_sso_login(user, provider, is_new)
+
+
+@sso_bp.route('/saml/callback', methods=['POST'])
+def saml_callback():
+ """SAML ACS endpoint (form POST from IdP)."""
+ try:
+ request_data = {
+ 'https': request.scheme == 'https',
+ 'http_host': request.host,
+ 'script_name': request.path,
+ 'acs_url': request.url,
+ 'sp_entity_id': request.host_url.rstrip('/'),
+ }
+ profile = sso_service.handle_saml_callback(request.form, request_data)
+ user, is_new = sso_service.find_or_create_user('saml', profile)
+ except ValueError as e:
+ return jsonify({'error': str(e)}), 403
+ except Exception as e:
+ return jsonify({'error': 'SAML authentication failed'}), 500
+
+ return _complete_sso_login(user, 'saml', is_new)
+
+
+@sso_bp.route('/saml/metadata', methods=['GET'])
+def saml_metadata():
+ """Return SP metadata XML."""
+ try:
+ from onelogin.saml2.auth import OneLogin_Saml2_Auth
+ cfg = sso_service.get_provider_config('saml')
+ request_data = {
+ 'https': request.scheme == 'https',
+ 'http_host': request.host,
+ 'script_name': request.path,
+ 'acs_url': request.url_root.rstrip('/') + '/api/v1/sso/saml/callback',
+ 'sp_entity_id': request.host_url.rstrip('/'),
+ }
+ settings = sso_service.get_saml_settings(cfg, request_data)
+ saml_req = {
+ 'https': 'on' if request_data.get('https') else 'off',
+ 'http_host': request_data.get('http_host', ''),
+ 'script_name': request_data.get('script_name', ''),
+ }
+ auth = OneLogin_Saml2_Auth(saml_req, settings)
+ metadata = auth.get_settings().get_sp_metadata()
+ from flask import Response
+ return Response(metadata, mimetype='application/xml')
+ except Exception as e:
+ return jsonify({'error': str(e)}), 500
+
+
+# ------------------------------------------------------------------
+# Authenticated endpoints (account linking)
+# ------------------------------------------------------------------
+
+@sso_bp.route('/identities', methods=['GET'])
+@jwt_required()
+def list_identities():
+ """Current user's linked OAuth identities."""
+ user_id = get_jwt_identity()
+ identities = OAuthIdentity.query.filter_by(user_id=user_id).all()
+ return jsonify({'identities': [i.to_dict() for i in identities]}), 200
+
+
+@sso_bp.route('/link/', methods=['POST'])
+@jwt_required()
+def link_provider(provider):
+ """Link an OAuth identity to the current user."""
+ if provider not in VALID_PROVIDERS:
+ return jsonify({'error': f'Invalid provider: {provider}'}), 400
+
+ user_id = get_jwt_identity()
+ data = request.get_json() or {}
+ code = data.get('code', '')
+ state = data.get('state', '')
+ redirect_uri = data.get('redirect_uri', '')
+
+ if not code or not state:
+ return jsonify({'error': 'code and state are required'}), 400
+
+ try:
+ profile = sso_service.handle_oauth_callback(provider, code, state, redirect_uri)
+ except Exception as e:
+ return jsonify({'error': str(e)}), 400
+
+ # Check if this identity is already linked to another user
+ existing = OAuthIdentity.query.filter_by(
+ provider=provider,
+ provider_user_id=profile['provider_user_id'],
+ ).first()
+ if existing:
+ if existing.user_id == user_id:
+ return jsonify({'error': 'This identity is already linked to your account'}), 409
+ return jsonify({'error': 'This identity is already linked to another account'}), 409
+
+ identity = sso_service.link_identity(user_id, provider, profile, profile.get('_tokens', {}))
+ return jsonify({'identity': identity.to_dict()}), 201
+
+
+@sso_bp.route('/link/', methods=['DELETE'])
+@jwt_required()
+def unlink_provider(provider):
+ """Unlink an OAuth identity."""
+ if provider not in VALID_PROVIDERS:
+ return jsonify({'error': f'Invalid provider: {provider}'}), 400
+
+ user_id = get_jwt_identity()
+ try:
+ sso_service.unlink_identity(user_id, provider)
+ return jsonify({'message': f'{provider} identity unlinked'}), 200
+ except ValueError as e:
+ return jsonify({'error': str(e)}), 400
+
+
+# ------------------------------------------------------------------
+# Admin endpoints (SSO configuration)
+# ------------------------------------------------------------------
+
+@sso_bp.route('/admin/config', methods=['GET'])
+@jwt_required()
+def get_sso_config():
+ """All SSO settings (secrets redacted)."""
+ user = User.query.get(get_jwt_identity())
+ if not user or not user.is_admin:
+ return jsonify({'error': 'Admin access required'}), 403
+
+ config = {}
+ for key in SettingsService.DEFAULT_SETTINGS:
+ if not key.startswith('sso_'):
+ continue
+ val = SettingsService.get(key, SettingsService.DEFAULT_SETTINGS[key]['value'])
+ # Redact secrets
+ if 'secret' in key or 'cert' in key:
+ if val and isinstance(val, str) and len(val) > 4:
+ val = '****' + val[-4:]
+ config[key] = val
+ return jsonify({'config': config}), 200
+
+
+@sso_bp.route('/admin/config/', methods=['PUT'])
+@jwt_required()
+def update_provider_config(provider):
+ """Update a provider's SSO config."""
+ user = User.query.get(get_jwt_identity())
+ if not user or not user.is_admin:
+ return jsonify({'error': 'Admin access required'}), 403
+
+ if provider not in VALID_PROVIDERS:
+ return jsonify({'error': f'Invalid provider: {provider}'}), 400
+
+ data = request.get_json() or {}
+ prefix = f'sso_{provider}_'
+ updated = []
+
+ for key, value in data.items():
+ full_key = f'{prefix}{key}'
+ if full_key not in SettingsService.DEFAULT_SETTINGS:
+ continue
+ # Skip unchanged redacted secrets
+ if ('secret' in key or 'cert' in key) and isinstance(value, str) and value.startswith('****'):
+ continue
+ SettingsService.set(full_key, value, user_id=user.id)
+ updated.append(key)
+
+ AuditLog.log(
+ action=AuditLog.ACTION_SETTINGS_UPDATE,
+ user_id=user.id,
+ details={'sso_provider': provider, 'updated_fields': updated},
+ )
+ db.session.commit()
+ return jsonify({'message': f'{provider} SSO config updated', 'updated': updated}), 200
+
+
+@sso_bp.route('/admin/test/', methods=['POST'])
+@jwt_required()
+def test_provider(provider):
+ """Test provider connectivity."""
+ user = User.query.get(get_jwt_identity())
+ if not user or not user.is_admin:
+ return jsonify({'error': 'Admin access required'}), 403
+
+ result = sso_service.test_provider_connectivity(provider)
+ return jsonify(result), 200 if result['ok'] else 400
+
+
+@sso_bp.route('/admin/general', methods=['PUT'])
+@jwt_required()
+def update_general_settings():
+ """Update general SSO settings (auto_provision, force_sso, etc.)."""
+ user = User.query.get(get_jwt_identity())
+ if not user or not user.is_admin:
+ return jsonify({'error': 'Admin access required'}), 403
+
+ data = request.get_json() or {}
+ general_keys = ['sso_auto_provision', 'sso_default_role', 'sso_force_sso', 'sso_allowed_domains']
+ updated = []
+
+ for key in general_keys:
+ if key in data:
+ SettingsService.set(key, data[key], user_id=user.id)
+ updated.append(key)
+
+ AuditLog.log(
+ action=AuditLog.ACTION_SETTINGS_UPDATE,
+ user_id=user.id,
+ details={'sso_general': updated},
+ )
+ db.session.commit()
+ return jsonify({'message': 'SSO general settings updated', 'updated': updated}), 200
+
+
+# ------------------------------------------------------------------
+# Helper
+# ------------------------------------------------------------------
+
+def _complete_sso_login(user, provider, is_new):
+ """Issue JWT or trigger 2FA for an SSO-authenticated user."""
+ # Check 2FA
+ if user.totp_enabled:
+ temp_token = create_access_token(
+ identity=user.id,
+ additional_claims={'2fa_pending': True},
+ expires_delta=False,
+ )
+ return jsonify({
+ 'requires_2fa': True,
+ 'temp_token': temp_token,
+ 'message': 'Two-factor authentication required',
+ }), 200
+
+ user.last_login_at = datetime.utcnow()
+ user.reset_failed_login()
+ db.session.commit()
+
+ AuditService.log_login(user.id, success=True, details={'provider': provider, 'is_new': is_new})
+ db.session.commit()
+
+ access_token = create_access_token(identity=user.id)
+ refresh_token = create_refresh_token(identity=user.id)
+
+ return jsonify({
+ 'user': user.to_dict(),
+ 'access_token': access_token,
+ 'refresh_token': refresh_token,
+ 'is_new_user': is_new,
+ }), 200
diff --git a/backend/app/middleware/api_analytics.py b/backend/app/middleware/api_analytics.py
new file mode 100644
index 0000000..aee428d
--- /dev/null
+++ b/backend/app/middleware/api_analytics.py
@@ -0,0 +1,115 @@
+"""API analytics middleware for logging request metrics."""
+import time
+import threading
+import logging
+from flask import g, request
+
+logger = logging.getLogger(__name__)
+
+# Buffer for batch inserts
+_log_buffer = []
+_buffer_lock = threading.Lock()
+
+
+def register_api_analytics(app):
+ """Register before/after request handlers for API analytics."""
+
+ @app.before_request
+ def record_request_start():
+ """Record request start time."""
+ if request.path.startswith('/api/'):
+ g.request_start_time = time.time()
+
+ @app.after_request
+ def record_request_metrics(response):
+ """Log API request metrics."""
+ if not request.path.startswith('/api/'):
+ return response
+
+ start_time = getattr(g, 'request_start_time', None)
+ if start_time is None:
+ return response
+
+ elapsed_ms = (time.time() - start_time) * 1000
+
+ api_key = getattr(g, 'api_key', None)
+ api_key_user = getattr(g, 'api_key_user', None)
+
+ # Get user_id from API key or JWT
+ user_id = None
+ if api_key_user:
+ user_id = api_key_user.id
+ else:
+ try:
+ from flask_jwt_extended import get_jwt_identity
+ user_id = get_jwt_identity()
+ except Exception:
+ pass
+
+ ip_address = request.remote_addr
+ if request.headers.get('X-Forwarded-For'):
+ ip_address = request.headers.get('X-Forwarded-For').split(',')[0].strip()
+ elif request.headers.get('X-Real-IP'):
+ ip_address = request.headers.get('X-Real-IP')
+
+ log_entry = {
+ 'api_key_id': api_key.id if api_key else None,
+ 'user_id': user_id,
+ 'method': request.method,
+ 'endpoint': request.path,
+ 'blueprint': request.blueprints[0] if request.blueprints else None,
+ 'status_code': response.status_code,
+ 'response_time_ms': round(elapsed_ms, 2),
+ 'ip_address': ip_address,
+ 'user_agent': (request.headers.get('User-Agent') or '')[:500],
+ 'request_size': request.content_length or 0,
+ 'response_size': response.content_length or 0,
+ }
+
+ with _buffer_lock:
+ _log_buffer.append(log_entry)
+
+ return response
+
+
+def start_analytics_flush_thread(app):
+ """Start background thread to flush analytics buffer to DB."""
+
+ def flush_loop():
+ while True:
+ time.sleep(5)
+ try:
+ _flush_buffer(app)
+ except Exception as e:
+ logger.error(f'Analytics flush error: {e}')
+
+ thread = threading.Thread(
+ target=flush_loop,
+ daemon=True,
+ name='api-analytics-flush'
+ )
+ thread.start()
+ return thread
+
+
+def _flush_buffer(app):
+ """Flush buffered log entries to the database."""
+ with _buffer_lock:
+ if not _log_buffer:
+ return
+ entries = _log_buffer.copy()
+ _log_buffer.clear()
+
+ with app.app_context():
+ from app import db
+ from app.models.api_usage import ApiUsageLog
+
+ for entry in entries:
+ log = ApiUsageLog(**entry)
+ db.session.add(log)
+
+ try:
+ db.session.commit()
+ except Exception as e:
+ db.session.rollback()
+ logger.error(f'Failed to flush analytics: {e}')
diff --git a/backend/app/middleware/api_key_auth.py b/backend/app/middleware/api_key_auth.py
new file mode 100644
index 0000000..6460188
--- /dev/null
+++ b/backend/app/middleware/api_key_auth.py
@@ -0,0 +1,36 @@
+"""API Key authentication middleware."""
+from flask import g, request, jsonify
+
+
+def register_api_key_auth(app):
+ """Register API key authentication as a before_request handler."""
+
+ @app.before_request
+ def authenticate_api_key():
+ """Check for X-API-Key header and validate."""
+ api_key_header = request.headers.get('X-API-Key')
+
+ if not api_key_header:
+ return # No API key provided, fall through to JWT auth
+
+ from app.services.api_key_service import ApiKeyService
+ api_key = ApiKeyService.validate_key(api_key_header)
+
+ if not api_key:
+ return jsonify({'error': 'Invalid or expired API key'}), 401
+
+ # Record usage
+ ip_address = request.remote_addr
+ if request.headers.get('X-Forwarded-For'):
+ ip_address = request.headers.get('X-Forwarded-For').split(',')[0].strip()
+ elif request.headers.get('X-Real-IP'):
+ ip_address = request.headers.get('X-Real-IP')
+
+ api_key.record_usage(ip_address)
+
+ from app import db
+ db.session.commit()
+
+ # Store in g for downstream use
+ g.api_key = api_key
+ g.api_key_user = api_key.user
diff --git a/backend/app/middleware/rate_limit.py b/backend/app/middleware/rate_limit.py
new file mode 100644
index 0000000..111ffbb
--- /dev/null
+++ b/backend/app/middleware/rate_limit.py
@@ -0,0 +1,86 @@
+"""Enhanced rate limiting with per-tier limits and response headers."""
+from flask import g, request
+
+
+def get_rate_limit_key():
+ """Custom key function for rate limiting based on auth context."""
+ # API key auth
+ api_key = getattr(g, 'api_key', None)
+ if api_key:
+ return f'apikey:{api_key.id}'
+
+ # JWT auth - try to get user id
+ try:
+ from flask_jwt_extended import get_jwt_identity
+ user_id = get_jwt_identity()
+ if user_id:
+ return f'user:{user_id}'
+ except Exception:
+ pass
+
+ # Fall back to IP
+ ip = request.remote_addr
+ if request.headers.get('X-Forwarded-For'):
+ ip = request.headers.get('X-Forwarded-For').split(',')[0].strip()
+ elif request.headers.get('X-Real-IP'):
+ ip = request.headers.get('X-Real-IP')
+ return f'ip:{ip}'
+
+
+def get_dynamic_limit():
+ """Return rate limit string based on auth context tier."""
+ from app.services.settings_service import SettingsService
+
+ # API key tier
+ api_key = getattr(g, 'api_key', None)
+ if api_key:
+ tier = api_key.tier or 'standard'
+ setting_key = f'rate_limit_{tier}'
+ return SettingsService.get(setting_key, _default_for_tier(tier))
+
+ # Authenticated user
+ api_key_user = getattr(g, 'api_key_user', None)
+ if api_key_user:
+ return SettingsService.get('rate_limit_standard', '100 per minute')
+
+ try:
+ from flask_jwt_extended import get_jwt_identity
+ user_id = get_jwt_identity()
+ if user_id:
+ return SettingsService.get('rate_limit_standard', '100 per minute')
+ except Exception:
+ pass
+
+ # Unauthenticated
+ return SettingsService.get('rate_limit_unauthenticated', '30 per minute')
+
+
+def _default_for_tier(tier):
+ """Return default rate limit for a tier."""
+ defaults = {
+ 'standard': '100 per minute',
+ 'elevated': '500 per minute',
+ 'unlimited': '5000 per minute',
+ }
+ return defaults.get(tier, '100 per minute')
+
+
+def register_rate_limit_headers(app):
+ """Register after_request handler to add rate limit headers."""
+
+ @app.after_request
+ def add_rate_limit_headers(response):
+ # Flask-Limiter adds these headers automatically when configured,
+ # but we ensure they are present with standard names
+ limit = response.headers.get('X-RateLimit-Limit')
+ remaining = response.headers.get('X-RateLimit-Remaining')
+ reset = response.headers.get('X-RateLimit-Reset')
+
+ if limit:
+ response.headers['X-RateLimit-Limit'] = limit
+ if remaining:
+ response.headers['X-RateLimit-Remaining'] = remaining
+ if reset:
+ response.headers['X-RateLimit-Reset'] = reset
+
+ return response
diff --git a/backend/app/middleware/rbac.py b/backend/app/middleware/rbac.py
index deb2bf6..acad1e4 100644
--- a/backend/app/middleware/rbac.py
+++ b/backend/app/middleware/rbac.py
@@ -1,18 +1,42 @@
"""Role-Based Access Control middleware and decorators."""
from functools import wraps
-from flask import jsonify
-from flask_jwt_extended import jwt_required, get_jwt_identity
+from flask import g, jsonify
+from flask_jwt_extended import jwt_required, get_jwt_identity, verify_jwt_in_request
from app.models import User
def get_current_user():
- """Get the current authenticated user."""
+ """Get the current authenticated user (via API key or JWT)."""
+ # Check API key auth first
+ api_key_user = getattr(g, 'api_key_user', None)
+ if api_key_user:
+ return api_key_user
+
+ # Fall back to JWT
user_id = get_jwt_identity()
if user_id:
return User.query.get(user_id)
return None
+def auth_required():
+ """
+ Decorator that accepts either API key or JWT authentication.
+ Use this instead of @jwt_required() to support both auth methods.
+ """
+ def decorator(fn):
+ @wraps(fn)
+ def wrapper(*args, **kwargs):
+ # If API key already authenticated via middleware, proceed
+ if getattr(g, 'api_key_user', None):
+ return fn(*args, **kwargs)
+ # Otherwise require JWT
+ verify_jwt_in_request()
+ return fn(*args, **kwargs)
+ return wrapper
+ return decorator
+
+
def require_role(*allowed_roles):
"""
Decorator that requires the user to have one of the specified roles.
@@ -24,7 +48,7 @@ def my_endpoint():
"""
def decorator(fn):
@wraps(fn)
- @jwt_required()
+ @auth_required()
def wrapper(*args, **kwargs):
user = get_current_user()
if not user:
@@ -48,7 +72,7 @@ def admin_only_endpoint():
...
"""
@wraps(fn)
- @jwt_required()
+ @auth_required()
def wrapper(*args, **kwargs):
user = get_current_user()
if not user:
@@ -71,7 +95,7 @@ def developer_endpoint():
...
"""
@wraps(fn)
- @jwt_required()
+ @auth_required()
def wrapper(*args, **kwargs):
user = get_current_user()
if not user:
@@ -84,6 +108,33 @@ def wrapper(*args, **kwargs):
return wrapper
+def permission_required(feature, level='read'):
+ """
+ Decorator that checks per-feature permissions.
+
+ Usage:
+ @permission_required('applications', 'write')
+ def create_app():
+ ...
+ """
+ def decorator(fn):
+ @wraps(fn)
+ @auth_required()
+ def wrapper(*args, **kwargs):
+ user = get_current_user()
+ if not user:
+ return jsonify({'error': 'User not found'}), 404
+ if not user.is_active:
+ return jsonify({'error': 'Account is deactivated'}), 403
+ if not user.has_permission(feature, level):
+ return jsonify({
+ 'error': f'Permission denied: {level} access to {feature} required'
+ }), 403
+ return fn(*args, **kwargs)
+ return wrapper
+ return decorator
+
+
def viewer_required(fn):
"""
Decorator that requires viewer role or higher (any valid role).
@@ -95,7 +146,7 @@ def viewer_endpoint():
...
"""
@wraps(fn)
- @jwt_required()
+ @auth_required()
def wrapper(*args, **kwargs):
user = get_current_user()
if not user:
diff --git a/backend/app/middleware/security.py b/backend/app/middleware/security.py
index 44780d3..a5303ac 100644
--- a/backend/app/middleware/security.py
+++ b/backend/app/middleware/security.py
@@ -23,8 +23,8 @@ def add_security_headers(response):
# Content Security Policy
csp_directives = [
"default-src 'self'",
- "script-src 'self' 'unsafe-inline' 'unsafe-eval'",
- "style-src 'self' 'unsafe-inline'",
+ "script-src 'self' 'unsafe-inline' 'unsafe-eval' https://unpkg.com",
+ "style-src 'self' 'unsafe-inline' https://unpkg.com",
"img-src 'self' data: https:",
"font-src 'self'",
"connect-src 'self' ws: wss:",
diff --git a/backend/app/models/__init__.py b/backend/app/models/__init__.py
index c1340a4..04a7d9b 100644
--- a/backend/app/models/__init__.py
+++ b/backend/app/models/__init__.py
@@ -15,6 +15,12 @@
from app.models.environment_activity import EnvironmentActivity
from app.models.promotion_job import PromotionJob
from app.models.sanitization_profile import SanitizationProfile
+from app.models.email import EmailDomain, EmailAccount, EmailAlias, EmailForwardingRule, DNSProviderConfig
+from app.models.oauth_identity import OAuthIdentity
+from app.models.api_key import ApiKey
+from app.models.api_usage import ApiUsageLog, ApiUsageSummary
+from app.models.event_subscription import EventSubscription, EventDelivery
+from app.models.invitation import Invitation
__all__ = [
'User', 'Application', 'Domain', 'EnvironmentVariable', 'EnvironmentVariableHistory',
@@ -22,5 +28,8 @@
'MetricsHistory', 'Workflow', 'GitWebhook', 'WebhookLog', 'GitDeployment',
'Server', 'ServerGroup', 'ServerMetrics', 'ServerCommand', 'AgentSession', 'SecurityAlert',
'WordPressSite', 'DatabaseSnapshot', 'SyncJob',
- 'EnvironmentActivity', 'PromotionJob', 'SanitizationProfile'
+ 'EnvironmentActivity', 'PromotionJob', 'SanitizationProfile',
+ 'EmailDomain', 'EmailAccount', 'EmailAlias', 'EmailForwardingRule', 'DNSProviderConfig',
+ 'OAuthIdentity', 'ApiKey', 'ApiUsageLog', 'ApiUsageSummary',
+ 'EventSubscription', 'EventDelivery', 'Invitation'
]
diff --git a/backend/app/models/api_key.py b/backend/app/models/api_key.py
new file mode 100644
index 0000000..4ece959
--- /dev/null
+++ b/backend/app/models/api_key.py
@@ -0,0 +1,113 @@
+"""API Key model for programmatic API access."""
+from datetime import datetime
+from app import db
+import hashlib
+import json
+import secrets
+
+
+class ApiKey(db.Model):
+ """API key for programmatic access to the ServerKit API."""
+ __tablename__ = 'api_keys'
+
+ id = db.Column(db.Integer, primary_key=True)
+ user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
+ name = db.Column(db.String(100), nullable=False)
+ key_prefix = db.Column(db.String(8), nullable=False)
+ key_hash = db.Column(db.String(256), unique=True, nullable=False)
+ scopes = db.Column(db.Text, nullable=True) # JSON array
+ tier = db.Column(db.String(20), default='standard') # standard | elevated | unlimited
+ is_active = db.Column(db.Boolean, default=True)
+ expires_at = db.Column(db.DateTime, nullable=True)
+ last_used_at = db.Column(db.DateTime, nullable=True)
+ last_used_ip = db.Column(db.String(45), nullable=True)
+ usage_count = db.Column(db.Integer, default=0)
+ created_at = db.Column(db.DateTime, default=datetime.utcnow)
+ revoked_at = db.Column(db.DateTime, nullable=True)
+
+ user = db.relationship('User', foreign_keys=[user_id])
+
+ TIER_STANDARD = 'standard'
+ TIER_ELEVATED = 'elevated'
+ TIER_UNLIMITED = 'unlimited'
+ VALID_TIERS = [TIER_STANDARD, TIER_ELEVATED, TIER_UNLIMITED]
+
+ @staticmethod
+ def generate_key():
+ """Generate a new API key. Returns (raw_key, prefix, hash)."""
+ raw = 'sk_' + secrets.token_hex(20)
+ prefix = raw[:8]
+ key_hash = hashlib.sha256(raw.encode()).hexdigest()
+ return raw, prefix, key_hash
+
+ @staticmethod
+ def hash_key(raw_key):
+ """Hash a raw API key."""
+ return hashlib.sha256(raw_key.encode()).hexdigest()
+
+ def get_scopes(self):
+ """Return parsed scopes list."""
+ if self.scopes:
+ try:
+ return json.loads(self.scopes)
+ except (json.JSONDecodeError, TypeError):
+ return []
+ return []
+
+ def set_scopes(self, scopes_list):
+ """Set scopes from a list."""
+ if scopes_list:
+ self.scopes = json.dumps(scopes_list)
+ else:
+ self.scopes = None
+
+ def is_expired(self):
+ """Check if the key has expired."""
+ if self.expires_at is None:
+ return False
+ return datetime.utcnow() > self.expires_at
+
+ def is_valid(self):
+ """Check if the key is active and not expired or revoked."""
+ return self.is_active and not self.is_expired() and self.revoked_at is None
+
+ def has_scope(self, required_scope):
+ """Check if this key has the required scope."""
+ scopes = self.get_scopes()
+ if not scopes or '*' in scopes:
+ return True
+ if required_scope in scopes:
+ return True
+ # Check wildcard resource match (e.g. 'apps:*' matches 'apps:read')
+ resource = required_scope.split(':')[0] if ':' in required_scope else required_scope
+ if f'{resource}:*' in scopes:
+ return True
+ return False
+
+ def record_usage(self, ip_address=None):
+ """Record a usage of this key."""
+ self.last_used_at = datetime.utcnow()
+ self.usage_count = (self.usage_count or 0) + 1
+ if ip_address:
+ self.last_used_ip = ip_address
+
+ def to_dict(self):
+ """Serialize key info (never expose hash)."""
+ return {
+ 'id': self.id,
+ 'user_id': self.user_id,
+ 'name': self.name,
+ 'key_prefix': self.key_prefix,
+ 'scopes': self.get_scopes(),
+ 'tier': self.tier,
+ 'is_active': self.is_active,
+ 'expires_at': self.expires_at.isoformat() if self.expires_at else None,
+ 'last_used_at': self.last_used_at.isoformat() if self.last_used_at else None,
+ 'last_used_ip': self.last_used_ip,
+ 'usage_count': self.usage_count,
+ 'created_at': self.created_at.isoformat() if self.created_at else None,
+ 'revoked_at': self.revoked_at.isoformat() if self.revoked_at else None,
+ }
+
+ def __repr__(self):
+ return f''
diff --git a/backend/app/models/api_usage.py b/backend/app/models/api_usage.py
new file mode 100644
index 0000000..e746178
--- /dev/null
+++ b/backend/app/models/api_usage.py
@@ -0,0 +1,68 @@
+"""API usage tracking models."""
+from datetime import datetime
+from app import db
+
+
+class ApiUsageLog(db.Model):
+ """Raw API usage log for every request."""
+ __tablename__ = 'api_usage_logs'
+
+ id = db.Column(db.Integer, primary_key=True)
+ api_key_id = db.Column(db.Integer, db.ForeignKey('api_keys.id'), nullable=True)
+ user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=True)
+ method = db.Column(db.String(10), nullable=False)
+ endpoint = db.Column(db.String(500), nullable=False)
+ blueprint = db.Column(db.String(100), nullable=True)
+ status_code = db.Column(db.Integer, nullable=False)
+ response_time_ms = db.Column(db.Float, nullable=True)
+ ip_address = db.Column(db.String(45), nullable=True)
+ user_agent = db.Column(db.String(500), nullable=True)
+ request_size = db.Column(db.Integer, nullable=True)
+ response_size = db.Column(db.Integer, nullable=True)
+ created_at = db.Column(db.DateTime, default=datetime.utcnow, index=True)
+
+ def to_dict(self):
+ return {
+ 'id': self.id,
+ 'api_key_id': self.api_key_id,
+ 'user_id': self.user_id,
+ 'method': self.method,
+ 'endpoint': self.endpoint,
+ 'blueprint': self.blueprint,
+ 'status_code': self.status_code,
+ 'response_time_ms': self.response_time_ms,
+ 'ip_address': self.ip_address,
+ 'created_at': self.created_at.isoformat() if self.created_at else None,
+ }
+
+
+class ApiUsageSummary(db.Model):
+ """Aggregated API usage summary per hour."""
+ __tablename__ = 'api_usage_summaries'
+
+ id = db.Column(db.Integer, primary_key=True)
+ period_start = db.Column(db.DateTime, nullable=False, index=True)
+ api_key_id = db.Column(db.Integer, db.ForeignKey('api_keys.id'), nullable=True)
+ user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=True)
+ endpoint = db.Column(db.String(500), nullable=True)
+ total_requests = db.Column(db.Integer, default=0)
+ success_count = db.Column(db.Integer, default=0)
+ client_error_count = db.Column(db.Integer, default=0)
+ server_error_count = db.Column(db.Integer, default=0)
+ avg_response_time_ms = db.Column(db.Float, nullable=True)
+ max_response_time_ms = db.Column(db.Float, nullable=True)
+
+ def to_dict(self):
+ return {
+ 'id': self.id,
+ 'period_start': self.period_start.isoformat() if self.period_start else None,
+ 'api_key_id': self.api_key_id,
+ 'user_id': self.user_id,
+ 'endpoint': self.endpoint,
+ 'total_requests': self.total_requests,
+ 'success_count': self.success_count,
+ 'client_error_count': self.client_error_count,
+ 'server_error_count': self.server_error_count,
+ 'avg_response_time_ms': self.avg_response_time_ms,
+ 'max_response_time_ms': self.max_response_time_ms,
+ }
diff --git a/backend/app/models/audit_log.py b/backend/app/models/audit_log.py
index b6dc0ab..9f58ef3 100644
--- a/backend/app/models/audit_log.py
+++ b/backend/app/models/audit_log.py
@@ -39,6 +39,19 @@ class AuditLog(db.Model):
ACTION_DEPLOY = 'app.deploy'
ACTION_BACKUP_CREATE = 'backup.create'
ACTION_BACKUP_RESTORE = 'backup.restore'
+ ACTION_SSO_LOGIN = 'sso.login'
+ ACTION_SSO_LOGIN_FAILED = 'sso.login_failed'
+ ACTION_SSO_PROVISION = 'sso.provision'
+ ACTION_SSO_LINK = 'sso.link'
+ ACTION_SSO_UNLINK = 'sso.unlink'
+ ACTION_API_KEY_CREATE = 'api_key.create'
+ ACTION_API_KEY_REVOKE = 'api_key.revoke'
+ ACTION_API_KEY_ROTATE = 'api_key.rotate'
+ ACTION_INVITATION_CREATE = 'invitation.create'
+ ACTION_INVITATION_REVOKE = 'invitation.revoke'
+ ACTION_INVITATION_ACCEPT = 'invitation.accept'
+ ACTION_USER_PERMISSIONS_UPDATE = 'user.permissions_update'
+ ACTION_USER_PERMISSIONS_RESET = 'user.permissions_reset'
def get_details(self):
"""Return parsed details JSON."""
diff --git a/backend/app/models/email.py b/backend/app/models/email.py
new file mode 100644
index 0000000..2cbcd36
--- /dev/null
+++ b/backend/app/models/email.py
@@ -0,0 +1,171 @@
+from datetime import datetime
+from app import db
+
+
+class EmailDomain(db.Model):
+ __tablename__ = 'email_domains'
+
+ id = db.Column(db.Integer, primary_key=True)
+ name = db.Column(db.String(255), unique=True, nullable=False, index=True)
+ is_active = db.Column(db.Boolean, default=True)
+
+ # DKIM
+ dkim_selector = db.Column(db.String(63), default='default')
+ dkim_private_key_path = db.Column(db.String(500))
+ dkim_public_key = db.Column(db.Text)
+
+ # SPF / DMARC
+ spf_record = db.Column(db.String(500))
+ dmarc_record = db.Column(db.String(500))
+
+ # DNS provider linkage
+ dns_provider_id = db.Column(db.Integer, db.ForeignKey('dns_provider_configs.id'), nullable=True)
+ dns_zone_id = db.Column(db.String(255))
+
+ created_at = db.Column(db.DateTime, default=datetime.utcnow)
+ updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
+
+ # Relationships
+ accounts = db.relationship('EmailAccount', backref='domain', lazy=True, cascade='all, delete-orphan')
+ aliases = db.relationship('EmailAlias', backref='domain', lazy=True, cascade='all, delete-orphan')
+
+ def to_dict(self):
+ return {
+ 'id': self.id,
+ 'name': self.name,
+ 'is_active': self.is_active,
+ 'dkim_selector': self.dkim_selector,
+ 'dkim_public_key': self.dkim_public_key,
+ 'spf_record': self.spf_record,
+ 'dmarc_record': self.dmarc_record,
+ 'dns_provider_id': self.dns_provider_id,
+ 'dns_zone_id': self.dns_zone_id,
+ 'accounts_count': len(self.accounts) if self.accounts else 0,
+ 'aliases_count': len(self.aliases) if self.aliases else 0,
+ 'created_at': self.created_at.isoformat() if self.created_at else None,
+ 'updated_at': self.updated_at.isoformat() if self.updated_at else None,
+ }
+
+ def __repr__(self):
+ return f''
+
+
+class EmailAccount(db.Model):
+ __tablename__ = 'email_accounts'
+
+ id = db.Column(db.Integer, primary_key=True)
+ email = db.Column(db.String(255), unique=True, nullable=False, index=True)
+ username = db.Column(db.String(255), nullable=False)
+ password_hash = db.Column(db.String(500), nullable=False)
+ domain_id = db.Column(db.Integer, db.ForeignKey('email_domains.id'), nullable=False)
+ quota_mb = db.Column(db.Integer, default=1024)
+ quota_used_mb = db.Column(db.Integer, default=0)
+ is_active = db.Column(db.Boolean, default=True)
+ created_at = db.Column(db.DateTime, default=datetime.utcnow)
+ updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
+
+ forwarding_rules = db.relationship('EmailForwardingRule', backref='account', lazy=True, cascade='all, delete-orphan')
+
+ def to_dict(self):
+ return {
+ 'id': self.id,
+ 'email': self.email,
+ 'username': self.username,
+ 'domain_id': self.domain_id,
+ 'domain_name': self.domain.name if self.domain else None,
+ 'quota_mb': self.quota_mb,
+ 'quota_used_mb': self.quota_used_mb,
+ 'is_active': self.is_active,
+ 'forwarding_count': len(self.forwarding_rules) if self.forwarding_rules else 0,
+ 'created_at': self.created_at.isoformat() if self.created_at else None,
+ 'updated_at': self.updated_at.isoformat() if self.updated_at else None,
+ }
+
+ def __repr__(self):
+ return f''
+
+
+class EmailAlias(db.Model):
+ __tablename__ = 'email_aliases'
+
+ id = db.Column(db.Integer, primary_key=True)
+ source = db.Column(db.String(255), nullable=False, index=True)
+ destination = db.Column(db.String(255), nullable=False)
+ domain_id = db.Column(db.Integer, db.ForeignKey('email_domains.id'), nullable=False)
+ is_active = db.Column(db.Boolean, default=True)
+ created_at = db.Column(db.DateTime, default=datetime.utcnow)
+
+ def to_dict(self):
+ return {
+ 'id': self.id,
+ 'source': self.source,
+ 'destination': self.destination,
+ 'domain_id': self.domain_id,
+ 'domain_name': self.domain.name if self.domain else None,
+ 'is_active': self.is_active,
+ 'created_at': self.created_at.isoformat() if self.created_at else None,
+ }
+
+ def __repr__(self):
+ return f' {self.destination}>'
+
+
+class EmailForwardingRule(db.Model):
+ __tablename__ = 'email_forwarding_rules'
+
+ id = db.Column(db.Integer, primary_key=True)
+ account_id = db.Column(db.Integer, db.ForeignKey('email_accounts.id'), nullable=False)
+ destination = db.Column(db.String(255), nullable=False)
+ keep_copy = db.Column(db.Boolean, default=True)
+ is_active = db.Column(db.Boolean, default=True)
+ created_at = db.Column(db.DateTime, default=datetime.utcnow)
+
+ def to_dict(self):
+ return {
+ 'id': self.id,
+ 'account_id': self.account_id,
+ 'account_email': self.account.email if self.account else None,
+ 'destination': self.destination,
+ 'keep_copy': self.keep_copy,
+ 'is_active': self.is_active,
+ 'created_at': self.created_at.isoformat() if self.created_at else None,
+ }
+
+ def __repr__(self):
+ return f' {self.destination}>'
+
+
+class DNSProviderConfig(db.Model):
+ __tablename__ = 'dns_provider_configs'
+
+ id = db.Column(db.Integer, primary_key=True)
+ name = db.Column(db.String(100), nullable=False)
+ provider = db.Column(db.String(50), nullable=False) # 'cloudflare' | 'route53'
+ api_key = db.Column(db.String(500))
+ api_secret = db.Column(db.String(500))
+ api_email = db.Column(db.String(255))
+ is_default = db.Column(db.Boolean, default=False)
+ created_at = db.Column(db.DateTime, default=datetime.utcnow)
+
+ domains = db.relationship('EmailDomain', backref='dns_provider', lazy=True)
+
+ def to_dict(self, mask_secrets=True):
+ result = {
+ 'id': self.id,
+ 'name': self.name,
+ 'provider': self.provider,
+ 'api_email': self.api_email,
+ 'is_default': self.is_default,
+ 'domains_count': len(self.domains) if self.domains else 0,
+ 'created_at': self.created_at.isoformat() if self.created_at else None,
+ }
+ if mask_secrets:
+ result['api_key'] = '****' + (self.api_key[-4:] if self.api_key and len(self.api_key) > 4 else '')
+ result['api_secret'] = '****' if self.api_secret else None
+ else:
+ result['api_key'] = self.api_key
+ result['api_secret'] = self.api_secret
+ return result
+
+ def __repr__(self):
+ return f''
diff --git a/backend/app/models/event_subscription.py b/backend/app/models/event_subscription.py
new file mode 100644
index 0000000..9d4b98d
--- /dev/null
+++ b/backend/app/models/event_subscription.py
@@ -0,0 +1,144 @@
+"""Event subscription and delivery models for webhook system."""
+from datetime import datetime
+from app import db
+import json
+import secrets
+
+
+class EventSubscription(db.Model):
+ """Webhook subscription for event notifications."""
+ __tablename__ = 'event_subscriptions'
+
+ id = db.Column(db.Integer, primary_key=True)
+ user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
+ name = db.Column(db.String(100), nullable=False)
+ url = db.Column(db.String(2048), nullable=False)
+ secret = db.Column(db.String(256), nullable=True)
+ events = db.Column(db.Text, nullable=False) # JSON array of event types
+ is_active = db.Column(db.Boolean, default=True)
+ headers = db.Column(db.Text, nullable=True) # JSON dict of custom headers
+ retry_count = db.Column(db.Integer, default=3)
+ timeout_seconds = db.Column(db.Integer, default=10)
+ created_at = db.Column(db.DateTime, default=datetime.utcnow)
+ updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
+
+ user = db.relationship('User', foreign_keys=[user_id])
+ deliveries = db.relationship('EventDelivery', back_populates='subscription',
+ lazy='dynamic', cascade='all, delete-orphan')
+
+ @staticmethod
+ def generate_secret():
+ """Generate a signing secret for HMAC."""
+ return 'whsec_' + secrets.token_hex(24)
+
+ def get_events(self):
+ """Return parsed events list."""
+ if self.events:
+ try:
+ return json.loads(self.events)
+ except (json.JSONDecodeError, TypeError):
+ return []
+ return []
+
+ def set_events(self, events_list):
+ """Set events from a list."""
+ self.events = json.dumps(events_list) if events_list else '[]'
+
+ def get_headers(self):
+ """Return parsed custom headers."""
+ if self.headers:
+ try:
+ return json.loads(self.headers)
+ except (json.JSONDecodeError, TypeError):
+ return {}
+ return {}
+
+ def set_headers(self, headers_dict):
+ """Set custom headers from a dict."""
+ self.headers = json.dumps(headers_dict) if headers_dict else None
+
+ def matches_event(self, event_type):
+ """Check if this subscription matches the given event type."""
+ events = self.get_events()
+ if '*' in events:
+ return True
+ if event_type in events:
+ return True
+ # Check wildcard category match (e.g. 'app.*' matches 'app.created')
+ category = event_type.split('.')[0] if '.' in event_type else event_type
+ if f'{category}.*' in events:
+ return True
+ return False
+
+ def to_dict(self):
+ return {
+ 'id': self.id,
+ 'user_id': self.user_id,
+ 'name': self.name,
+ 'url': self.url,
+ 'has_secret': bool(self.secret),
+ 'events': self.get_events(),
+ 'is_active': self.is_active,
+ 'headers': self.get_headers(),
+ 'retry_count': self.retry_count,
+ 'timeout_seconds': self.timeout_seconds,
+ 'created_at': self.created_at.isoformat() if self.created_at else None,
+ 'updated_at': self.updated_at.isoformat() if self.updated_at else None,
+ }
+
+ def __repr__(self):
+ return f''
+
+
+class EventDelivery(db.Model):
+ """Record of a webhook delivery attempt."""
+ __tablename__ = 'event_deliveries'
+
+ id = db.Column(db.Integer, primary_key=True)
+ subscription_id = db.Column(db.Integer, db.ForeignKey('event_subscriptions.id'), nullable=False)
+ event_type = db.Column(db.String(100), nullable=False)
+ payload = db.Column(db.Text, nullable=True) # JSON
+ status = db.Column(db.String(20), default='pending') # pending | success | failed
+ http_status = db.Column(db.Integer, nullable=True)
+ response_body = db.Column(db.String(1000), nullable=True)
+ attempts = db.Column(db.Integer, default=0)
+ next_retry_at = db.Column(db.DateTime, nullable=True)
+ delivered_at = db.Column(db.DateTime, nullable=True)
+ created_at = db.Column(db.DateTime, default=datetime.utcnow, index=True)
+ duration_ms = db.Column(db.Float, nullable=True)
+
+ subscription = db.relationship('EventSubscription', back_populates='deliveries')
+
+ STATUS_PENDING = 'pending'
+ STATUS_SUCCESS = 'success'
+ STATUS_FAILED = 'failed'
+
+ def get_payload(self):
+ if self.payload:
+ try:
+ return json.loads(self.payload)
+ except (json.JSONDecodeError, TypeError):
+ return {}
+ return {}
+
+ def set_payload(self, payload_dict):
+ self.payload = json.dumps(payload_dict) if payload_dict else None
+
+ def to_dict(self):
+ return {
+ 'id': self.id,
+ 'subscription_id': self.subscription_id,
+ 'event_type': self.event_type,
+ 'payload': self.get_payload(),
+ 'status': self.status,
+ 'http_status': self.http_status,
+ 'response_body': self.response_body,
+ 'attempts': self.attempts,
+ 'next_retry_at': self.next_retry_at.isoformat() if self.next_retry_at else None,
+ 'delivered_at': self.delivered_at.isoformat() if self.delivered_at else None,
+ 'created_at': self.created_at.isoformat() if self.created_at else None,
+ 'duration_ms': self.duration_ms,
+ }
+
+ def __repr__(self):
+ return f''
diff --git a/backend/app/models/invitation.py b/backend/app/models/invitation.py
new file mode 100644
index 0000000..4655b29
--- /dev/null
+++ b/backend/app/models/invitation.py
@@ -0,0 +1,72 @@
+from datetime import datetime
+from uuid import uuid4
+from app import db
+import json
+
+
+class Invitation(db.Model):
+ """Team invitation model for invite-based registration."""
+ __tablename__ = 'invitations'
+
+ STATUS_PENDING = 'pending'
+ STATUS_ACCEPTED = 'accepted'
+ STATUS_EXPIRED = 'expired'
+ STATUS_REVOKED = 'revoked'
+
+ id = db.Column(db.Integer, primary_key=True)
+ email = db.Column(db.String(255), nullable=True) # Nullable for link-only invites
+ token = db.Column(db.String(64), unique=True, nullable=False, index=True,
+ default=lambda: uuid4().hex)
+ role = db.Column(db.String(20), nullable=False, default='developer')
+ permissions = db.Column(db.Text, nullable=True) # JSON custom permissions
+ invited_by = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
+ expires_at = db.Column(db.DateTime, nullable=True)
+ accepted_at = db.Column(db.DateTime, nullable=True)
+ accepted_by = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=True)
+ status = db.Column(db.String(20), default=STATUS_PENDING, nullable=False, index=True)
+ created_at = db.Column(db.DateTime, default=datetime.utcnow)
+
+ # Relationships
+ inviter = db.relationship('User', foreign_keys=[invited_by])
+ accepter = db.relationship('User', foreign_keys=[accepted_by])
+
+ @property
+ def is_expired(self):
+ if self.expires_at is None:
+ return False
+ return datetime.utcnow() > self.expires_at
+
+ @property
+ def is_valid(self):
+ return self.status == self.STATUS_PENDING and not self.is_expired
+
+ def get_permissions(self):
+ if self.permissions:
+ try:
+ return json.loads(self.permissions)
+ except (json.JSONDecodeError, TypeError):
+ return None
+ return None
+
+ def set_permissions(self, perms_dict):
+ self.permissions = json.dumps(perms_dict) if perms_dict else None
+
+ def to_dict(self):
+ return {
+ 'id': self.id,
+ 'email': self.email,
+ 'token': self.token,
+ 'role': self.role,
+ 'permissions': self.get_permissions(),
+ 'invited_by': self.invited_by,
+ 'inviter_username': self.inviter.username if self.inviter else None,
+ 'expires_at': self.expires_at.isoformat() if self.expires_at else None,
+ 'accepted_at': self.accepted_at.isoformat() if self.accepted_at else None,
+ 'accepted_by': self.accepted_by,
+ 'status': self.status,
+ 'is_expired': self.is_expired,
+ 'created_at': self.created_at.isoformat() if self.created_at else None,
+ }
+
+ def __repr__(self):
+ return f''
diff --git a/backend/app/models/oauth_identity.py b/backend/app/models/oauth_identity.py
new file mode 100644
index 0000000..a27bbb7
--- /dev/null
+++ b/backend/app/models/oauth_identity.py
@@ -0,0 +1,38 @@
+from datetime import datetime
+from app import db
+
+
+class OAuthIdentity(db.Model):
+ """Links an external OAuth/SAML identity to a local user."""
+ __tablename__ = 'oauth_identities'
+ __table_args__ = (
+ db.UniqueConstraint('provider', 'provider_user_id', name='uq_provider_identity'),
+ )
+
+ id = db.Column(db.Integer, primary_key=True)
+ user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete='CASCADE'), nullable=False, index=True)
+ provider = db.Column(db.String(50), nullable=False) # google, github, oidc, saml
+ provider_user_id = db.Column(db.String(256), nullable=False)
+ provider_email = db.Column(db.String(256), nullable=True)
+ provider_display_name = db.Column(db.String(256), nullable=True)
+ access_token_encrypted = db.Column(db.Text, nullable=True)
+ refresh_token_encrypted = db.Column(db.Text, nullable=True)
+ token_expires_at = db.Column(db.DateTime, nullable=True)
+ created_at = db.Column(db.DateTime, default=datetime.utcnow)
+ updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
+ last_login_at = db.Column(db.DateTime, nullable=True)
+
+ user = db.relationship('User', backref=db.backref('oauth_identities', lazy='dynamic'))
+
+ def to_dict(self):
+ return {
+ 'id': self.id,
+ 'provider': self.provider,
+ 'provider_email': self.provider_email,
+ 'provider_display_name': self.provider_display_name,
+ 'created_at': self.created_at.isoformat() if self.created_at else None,
+ 'last_login_at': self.last_login_at.isoformat() if self.last_login_at else None,
+ }
+
+ def __repr__(self):
+ return f''
diff --git a/backend/app/models/user.py b/backend/app/models/user.py
index e5762a9..ab8fb24 100644
--- a/backend/app/models/user.py
+++ b/backend/app/models/user.py
@@ -16,8 +16,10 @@ class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(120), unique=True, nullable=False, index=True)
username = db.Column(db.String(80), unique=True, nullable=False, index=True)
- password_hash = db.Column(db.String(256), nullable=False)
+ password_hash = db.Column(db.String(256), nullable=True)
+ auth_provider = db.Column(db.String(50), default='local') # local, google, github, oidc, saml
role = db.Column(db.String(20), default='developer') # 'admin', 'developer', 'viewer'
+ permissions = db.Column(db.Text, nullable=True) # JSON per-feature read/write flags
is_active = db.Column(db.Boolean, default=True)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
@@ -41,6 +43,51 @@ class User(db.Model):
LOCKOUT_DURATIONS = [5, 15, 60]
MAX_FAILED_ATTEMPTS = 5
+ # Per-feature permission system
+ PERMISSION_FEATURES = [
+ 'applications', 'databases', 'docker', 'domains', 'files',
+ 'monitoring', 'backups', 'security', 'email', 'git', 'cron',
+ 'terminal', 'users', 'settings', 'servers'
+ ]
+
+ ROLE_PERMISSION_TEMPLATES = {
+ 'admin': {f: {'read': True, 'write': True} for f in PERMISSION_FEATURES},
+ 'developer': {
+ 'applications': {'read': True, 'write': True},
+ 'databases': {'read': True, 'write': True},
+ 'docker': {'read': True, 'write': True},
+ 'domains': {'read': True, 'write': True},
+ 'files': {'read': True, 'write': True},
+ 'email': {'read': True, 'write': True},
+ 'git': {'read': True, 'write': True},
+ 'cron': {'read': True, 'write': True},
+ 'monitoring': {'read': True, 'write': False},
+ 'backups': {'read': True, 'write': False},
+ 'security': {'read': True, 'write': False},
+ 'terminal': {'read': True, 'write': False},
+ 'servers': {'read': True, 'write': False},
+ 'users': {'read': False, 'write': False},
+ 'settings': {'read': False, 'write': False},
+ },
+ 'viewer': {
+ 'applications': {'read': True, 'write': False},
+ 'databases': {'read': True, 'write': False},
+ 'docker': {'read': True, 'write': False},
+ 'domains': {'read': True, 'write': False},
+ 'files': {'read': True, 'write': False},
+ 'email': {'read': True, 'write': False},
+ 'git': {'read': True, 'write': False},
+ 'cron': {'read': True, 'write': False},
+ 'monitoring': {'read': True, 'write': False},
+ 'backups': {'read': True, 'write': False},
+ 'security': {'read': True, 'write': False},
+ 'terminal': {'read': False, 'write': False},
+ 'users': {'read': False, 'write': False},
+ 'settings': {'read': False, 'write': False},
+ 'servers': {'read': True, 'write': False},
+ },
+ }
+
@property
def is_locked(self):
"""Check if account is currently locked."""
@@ -69,6 +116,8 @@ def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
+ if not self.password_hash:
+ return False
return check_password_hash(self.password_hash, password)
@property
@@ -90,14 +139,44 @@ def has_role(self, *roles):
"""Check if user has any of the specified roles."""
return self.role in roles
+ @property
+ def has_password(self):
+ return self.password_hash is not None
+
+ def get_permissions(self):
+ """Return resolved permissions: custom if set, otherwise role template."""
+ if self.role == self.ROLE_ADMIN:
+ return self.ROLE_PERMISSION_TEMPLATES['admin']
+ if self.permissions:
+ try:
+ return json.loads(self.permissions)
+ except (json.JSONDecodeError, TypeError):
+ pass
+ return self.ROLE_PERMISSION_TEMPLATES.get(self.role, {})
+
+ def set_permissions(self, perms_dict):
+ """Store custom permissions as JSON."""
+ self.permissions = json.dumps(perms_dict) if perms_dict else None
+
+ def has_permission(self, feature, level='read'):
+ """Check if user has a specific permission. Admin always returns True."""
+ if self.role == self.ROLE_ADMIN:
+ return True
+ perms = self.get_permissions()
+ feature_perms = perms.get(feature, {})
+ return feature_perms.get(level, False)
+
def to_dict(self):
return {
'id': self.id,
'email': self.email,
'username': self.username,
'role': self.role,
+ 'permissions': self.get_permissions(),
'is_active': self.is_active,
'totp_enabled': self.totp_enabled,
+ 'auth_provider': self.auth_provider or 'local',
+ 'has_password': self.has_password,
'created_at': self.created_at.isoformat(),
'updated_at': self.updated_at.isoformat(),
'last_login_at': self.last_login_at.isoformat() if self.last_login_at else None,
diff --git a/backend/app/paths.py b/backend/app/paths.py
index d3d1fbb..fd2f163 100644
--- a/backend/app/paths.py
+++ b/backend/app/paths.py
@@ -17,3 +17,9 @@
DB_BACKUP_DIR = os.path.join(SERVERKIT_BACKUP_DIR, 'databases')
WP_BACKUP_DIR = os.path.join(SERVERKIT_BACKUP_DIR, 'wordpress')
SNAPSHOT_DIR = os.path.join(SERVERKIT_BACKUP_DIR, 'snapshots')
+
+# Email / Mail server paths
+VMAIL_DIR = os.environ.get('VMAIL_DIR', '/var/vmail')
+VMAIL_UID = 5000
+VMAIL_GID = 5000
+EMAIL_CONFIG_DIR = os.path.join(SERVERKIT_CONFIG_DIR, 'email')
diff --git a/backend/app/services/api_analytics_service.py b/backend/app/services/api_analytics_service.py
new file mode 100644
index 0000000..a7daec5
--- /dev/null
+++ b/backend/app/services/api_analytics_service.py
@@ -0,0 +1,250 @@
+"""Service for API usage analytics."""
+from datetime import datetime, timedelta
+from sqlalchemy import func, case
+from app import db
+from app.models.api_usage import ApiUsageLog, ApiUsageSummary
+
+
+class ApiAnalyticsService:
+ """Query and aggregate API usage data."""
+
+ @staticmethod
+ def get_overview(period='24h'):
+ """Get overall API usage stats for a period."""
+ since = _period_to_datetime(period)
+ query = ApiUsageLog.query.filter(ApiUsageLog.created_at >= since)
+
+ total = query.count()
+ if total == 0:
+ return {
+ 'total_requests': 0,
+ 'error_rate': 0,
+ 'avg_response_time_ms': 0,
+ 'success_count': 0,
+ 'client_error_count': 0,
+ 'server_error_count': 0,
+ }
+
+ stats = db.session.query(
+ func.count(ApiUsageLog.id).label('total'),
+ func.avg(ApiUsageLog.response_time_ms).label('avg_time'),
+ func.sum(case((ApiUsageLog.status_code < 400, 1), else_=0)).label('success'),
+ func.sum(case((ApiUsageLog.status_code.between(400, 499), 1), else_=0)).label('client_errors'),
+ func.sum(case((ApiUsageLog.status_code >= 500, 1), else_=0)).label('server_errors'),
+ ).filter(ApiUsageLog.created_at >= since).first()
+
+ total_req = stats.total or 0
+ error_count = (stats.client_errors or 0) + (stats.server_errors or 0)
+
+ return {
+ 'total_requests': total_req,
+ 'error_rate': round(error_count / total_req * 100, 2) if total_req > 0 else 0,
+ 'avg_response_time_ms': round(stats.avg_time or 0, 2),
+ 'success_count': stats.success or 0,
+ 'client_error_count': stats.client_errors or 0,
+ 'server_error_count': stats.server_errors or 0,
+ }
+
+ @staticmethod
+ def get_endpoint_stats(period='24h', limit=20):
+ """Get top endpoints by request count."""
+ since = _period_to_datetime(period)
+
+ results = db.session.query(
+ ApiUsageLog.endpoint,
+ ApiUsageLog.method,
+ func.count(ApiUsageLog.id).label('count'),
+ func.avg(ApiUsageLog.response_time_ms).label('avg_time'),
+ func.sum(case((ApiUsageLog.status_code >= 400, 1), else_=0)).label('errors'),
+ ).filter(
+ ApiUsageLog.created_at >= since
+ ).group_by(
+ ApiUsageLog.endpoint, ApiUsageLog.method
+ ).order_by(
+ func.count(ApiUsageLog.id).desc()
+ ).limit(limit).all()
+
+ return [{
+ 'endpoint': r.endpoint,
+ 'method': r.method,
+ 'count': r.count,
+ 'avg_response_time_ms': round(r.avg_time or 0, 2),
+ 'error_count': r.errors or 0,
+ } for r in results]
+
+ @staticmethod
+ def get_error_stats(period='24h'):
+ """Get error breakdown by status code."""
+ since = _period_to_datetime(period)
+
+ results = db.session.query(
+ ApiUsageLog.status_code,
+ ApiUsageLog.endpoint,
+ func.count(ApiUsageLog.id).label('count'),
+ ).filter(
+ ApiUsageLog.created_at >= since,
+ ApiUsageLog.status_code >= 400,
+ ).group_by(
+ ApiUsageLog.status_code, ApiUsageLog.endpoint
+ ).order_by(
+ func.count(ApiUsageLog.id).desc()
+ ).limit(50).all()
+
+ return [{
+ 'status_code': r.status_code,
+ 'endpoint': r.endpoint,
+ 'count': r.count,
+ } for r in results]
+
+ @staticmethod
+ def get_time_series(period='24h', interval='hour'):
+ """Get request counts over time for charting."""
+ since = _period_to_datetime(period)
+
+ # Use raw logs grouped by truncated time
+ if interval == 'hour':
+ trunc = func.strftime('%Y-%m-%d %H:00:00', ApiUsageLog.created_at)
+ elif interval == 'day':
+ trunc = func.strftime('%Y-%m-%d', ApiUsageLog.created_at)
+ else:
+ trunc = func.strftime('%Y-%m-%d %H:%M:00', ApiUsageLog.created_at)
+
+ results = db.session.query(
+ trunc.label('period'),
+ func.count(ApiUsageLog.id).label('count'),
+ func.sum(case((ApiUsageLog.status_code >= 400, 1), else_=0)).label('errors'),
+ func.avg(ApiUsageLog.response_time_ms).label('avg_time'),
+ ).filter(
+ ApiUsageLog.created_at >= since
+ ).group_by('period').order_by('period').all()
+
+ return [{
+ 'period': r.period,
+ 'count': r.count,
+ 'errors': r.errors or 0,
+ 'avg_response_time_ms': round(r.avg_time or 0, 2),
+ } for r in results]
+
+ @staticmethod
+ def get_key_usage(api_key_id, period='24h'):
+ """Get usage stats for a specific API key."""
+ since = _period_to_datetime(period)
+
+ stats = db.session.query(
+ func.count(ApiUsageLog.id).label('total'),
+ func.avg(ApiUsageLog.response_time_ms).label('avg_time'),
+ func.sum(case((ApiUsageLog.status_code >= 400, 1), else_=0)).label('errors'),
+ ).filter(
+ ApiUsageLog.api_key_id == api_key_id,
+ ApiUsageLog.created_at >= since,
+ ).first()
+
+ # Top endpoints for this key
+ endpoints = db.session.query(
+ ApiUsageLog.endpoint,
+ func.count(ApiUsageLog.id).label('count'),
+ ).filter(
+ ApiUsageLog.api_key_id == api_key_id,
+ ApiUsageLog.created_at >= since,
+ ).group_by(
+ ApiUsageLog.endpoint
+ ).order_by(
+ func.count(ApiUsageLog.id).desc()
+ ).limit(10).all()
+
+ return {
+ 'total_requests': stats.total or 0,
+ 'avg_response_time_ms': round(stats.avg_time or 0, 2),
+ 'error_count': stats.errors or 0,
+ 'top_endpoints': [{'endpoint': e.endpoint, 'count': e.count} for e in endpoints],
+ }
+
+ @staticmethod
+ def aggregate_hourly():
+ """Roll up raw logs into hourly summaries."""
+ # Find the latest summary period
+ latest = db.session.query(
+ func.max(ApiUsageSummary.period_start)
+ ).scalar()
+
+ if latest:
+ since = latest
+ else:
+ since = datetime.utcnow() - timedelta(days=7)
+
+ # Group raw logs by hour, endpoint, user, api_key
+ trunc = func.strftime('%Y-%m-%d %H:00:00', ApiUsageLog.created_at)
+
+ results = db.session.query(
+ trunc.label('period'),
+ ApiUsageLog.api_key_id,
+ ApiUsageLog.user_id,
+ ApiUsageLog.endpoint,
+ func.count(ApiUsageLog.id).label('total'),
+ func.sum(case((ApiUsageLog.status_code < 400, 1), else_=0)).label('success'),
+ func.sum(case((ApiUsageLog.status_code.between(400, 499), 1), else_=0)).label('client_errors'),
+ func.sum(case((ApiUsageLog.status_code >= 500, 1), else_=0)).label('server_errors'),
+ func.avg(ApiUsageLog.response_time_ms).label('avg_time'),
+ func.max(ApiUsageLog.response_time_ms).label('max_time'),
+ ).filter(
+ ApiUsageLog.created_at >= since
+ ).group_by(
+ 'period', ApiUsageLog.api_key_id, ApiUsageLog.user_id, ApiUsageLog.endpoint
+ ).all()
+
+ for r in results:
+ try:
+ period_start = datetime.strptime(r.period, '%Y-%m-%d %H:%M:%S')
+ except (ValueError, TypeError):
+ continue
+
+ # Check if summary already exists
+ existing = ApiUsageSummary.query.filter_by(
+ period_start=period_start,
+ api_key_id=r.api_key_id,
+ user_id=r.user_id,
+ endpoint=r.endpoint,
+ ).first()
+
+ if existing:
+ continue
+
+ summary = ApiUsageSummary(
+ period_start=period_start,
+ api_key_id=r.api_key_id,
+ user_id=r.user_id,
+ endpoint=r.endpoint,
+ total_requests=r.total or 0,
+ success_count=r.success or 0,
+ client_error_count=r.client_errors or 0,
+ server_error_count=r.server_errors or 0,
+ avg_response_time_ms=round(r.avg_time or 0, 2),
+ max_response_time_ms=round(r.max_time or 0, 2),
+ )
+ db.session.add(summary)
+
+ db.session.commit()
+
+ @staticmethod
+ def cleanup_old_logs(days=30):
+ """Purge raw usage logs older than specified days."""
+ cutoff = datetime.utcnow() - timedelta(days=days)
+ deleted = ApiUsageLog.query.filter(ApiUsageLog.created_at < cutoff).delete()
+ db.session.commit()
+ return deleted
+
+
+def _period_to_datetime(period):
+ """Convert a period string to a datetime."""
+ now = datetime.utcnow()
+ if period == '1h':
+ return now - timedelta(hours=1)
+ elif period == '24h':
+ return now - timedelta(hours=24)
+ elif period == '7d':
+ return now - timedelta(days=7)
+ elif period == '30d':
+ return now - timedelta(days=30)
+ elif period == '90d':
+ return now - timedelta(days=90)
+ return now - timedelta(hours=24)
diff --git a/backend/app/services/api_key_service.py b/backend/app/services/api_key_service.py
new file mode 100644
index 0000000..29a0068
--- /dev/null
+++ b/backend/app/services/api_key_service.py
@@ -0,0 +1,126 @@
+"""Service for API key management."""
+from datetime import datetime
+from app import db
+from app.models.api_key import ApiKey
+
+
+class ApiKeyService:
+ """CRUD and validation for API keys."""
+
+ @staticmethod
+ def create_key(user_id, name, scopes=None, tier='standard', expires_at=None):
+ """Create a new API key. Returns (api_key_record, raw_key)."""
+ if tier not in ApiKey.VALID_TIERS:
+ tier = ApiKey.TIER_STANDARD
+
+ raw_key, prefix, key_hash = ApiKey.generate_key()
+
+ api_key = ApiKey(
+ user_id=user_id,
+ name=name,
+ key_prefix=prefix,
+ key_hash=key_hash,
+ tier=tier,
+ expires_at=expires_at,
+ )
+ api_key.set_scopes(scopes)
+
+ db.session.add(api_key)
+ db.session.commit()
+
+ return api_key, raw_key
+
+ @staticmethod
+ def validate_key(raw_key):
+ """Validate a raw API key. Returns ApiKey if valid, None otherwise."""
+ if not raw_key or not raw_key.startswith('sk_'):
+ return None
+
+ key_hash = ApiKey.hash_key(raw_key)
+ api_key = ApiKey.query.filter_by(key_hash=key_hash).first()
+
+ if not api_key:
+ return None
+ if not api_key.is_valid():
+ return None
+
+ return api_key
+
+ @staticmethod
+ def revoke_key(key_id, user_id):
+ """Revoke an API key."""
+ api_key = ApiKey.query.filter_by(id=key_id, user_id=user_id).first()
+ if not api_key:
+ return None
+
+ api_key.is_active = False
+ api_key.revoked_at = datetime.utcnow()
+ db.session.commit()
+ return api_key
+
+ @staticmethod
+ def list_keys(user_id):
+ """List all API keys for a user."""
+ return ApiKey.query.filter_by(user_id=user_id).order_by(
+ ApiKey.created_at.desc()
+ ).all()
+
+ @staticmethod
+ def get_key(key_id, user_id=None):
+ """Get a single API key by ID."""
+ query = ApiKey.query.filter_by(id=key_id)
+ if user_id:
+ query = query.filter_by(user_id=user_id)
+ return query.first()
+
+ @staticmethod
+ def update_key(key_id, user_id, name=None, scopes=None, tier=None):
+ """Update an API key's metadata."""
+ api_key = ApiKey.query.filter_by(id=key_id, user_id=user_id).first()
+ if not api_key:
+ return None
+
+ if name is not None:
+ api_key.name = name
+ if scopes is not None:
+ api_key.set_scopes(scopes)
+ if tier is not None and tier in ApiKey.VALID_TIERS:
+ api_key.tier = tier
+
+ db.session.commit()
+ return api_key
+
+ @staticmethod
+ def rotate_key(key_id, user_id):
+ """Rotate an API key: revoke old, create new with same config."""
+ old_key = ApiKey.query.filter_by(id=key_id, user_id=user_id).first()
+ if not old_key:
+ return None, None
+
+ # Capture config before revoking
+ name = old_key.name
+ scopes = old_key.get_scopes()
+ tier = old_key.tier
+ expires_at = old_key.expires_at
+
+ # Revoke old key
+ old_key.is_active = False
+ old_key.revoked_at = datetime.utcnow()
+
+ # Create new key with same config
+ new_key, raw_key = ApiKeyService.create_key(
+ user_id=user_id,
+ name=name,
+ scopes=scopes,
+ tier=tier,
+ expires_at=expires_at,
+ )
+
+ return new_key, raw_key
+
+ @staticmethod
+ def check_scope(api_key, required_scope):
+ """Check if an API key has the required scope."""
+ if not api_key:
+ return False
+ return api_key.has_scope(required_scope)
diff --git a/backend/app/services/audit_service.py b/backend/app/services/audit_service.py
index e0ee93d..8f44570 100644
--- a/backend/app/services/audit_service.py
+++ b/backend/app/services/audit_service.py
@@ -46,6 +46,14 @@ def log(action, user_id=None, target_type=None, target_id=None, details=None):
ip_address=ip_address,
user_agent=user_agent
)
+
+ # Emit webhook event for matching audit actions
+ try:
+ from app.services.event_service import EventService
+ EventService.emit_for_audit(action, target_type, target_id, details, user_id)
+ except Exception:
+ pass # Don't let event emission failures break audit logging
+
return log_entry
@staticmethod
diff --git a/backend/app/services/backup_service.py b/backend/app/services/backup_service.py
index 5104c9d..32f1d69 100644
--- a/backend/app/services/backup_service.py
+++ b/backend/app/services/backup_service.py
@@ -13,6 +13,7 @@
import schedule
from app import paths
+from app.utils.system import is_command_available
class BackupService:
@@ -145,6 +146,8 @@ def _backup_database_internal(cls, db_type: str, db_name: str,
try:
if db_type == 'mysql':
+ if not is_command_available('mysqldump'):
+ return {'success': False, 'error': 'mysqldump not installed'}
cmd = ['mysqldump']
if config.get('user'):
cmd.extend(['-u', config['user']])
@@ -161,6 +164,9 @@ def _backup_database_internal(cls, db_type: str, db_name: str,
return {'success': False, 'error': result.stderr}
elif db_type == 'postgresql':
+ if not is_command_available('pg_dump'):
+ return {'success': False, 'error': 'pg_dump not installed'}
+
env = os.environ.copy()
if config.get('password'):
env['PGPASSWORD'] = config['password']
@@ -335,6 +341,8 @@ def restore_database(cls, backup_path: str, db_type: str, db_name: str,
try:
if db_type == 'mysql':
+ if not is_command_available('mysql'):
+ return {'success': False, 'error': 'mysql client not installed'}
cmd = ['mysql']
if user:
cmd.extend(['-u', user])
@@ -348,6 +356,9 @@ def restore_database(cls, backup_path: str, db_type: str, db_name: str,
result = subprocess.run(cmd, stdin=f, capture_output=True, text=True)
elif db_type == 'postgresql':
+ if not is_command_available('psql'):
+ return {'success': False, 'error': 'psql client not installed'}
+
env = os.environ.copy()
if password:
env['PGPASSWORD'] = password
diff --git a/backend/app/services/cron_service.py b/backend/app/services/cron_service.py
index 875569c..289210f 100644
--- a/backend/app/services/cron_service.py
+++ b/backend/app/services/cron_service.py
@@ -499,10 +499,82 @@ def get_presets(cls) -> Dict:
"""Get available schedule presets."""
return {
'success': True,
- 'presets': [
- {'name': name, 'schedule': schedule, 'description': cls._describe_schedule(schedule)}
- for name, schedule in cls.PRESETS.items()
- ]
+ 'presets': cls.PRESETS
+ }
+
+ @classmethod
+ def update_job(cls, job_id: str, name: str = None, command: str = None,
+ schedule: str = None, description: str = None) -> Dict:
+ """Update an existing cron job."""
+ metadata = cls._load_jobs_metadata()
+
+ if job_id not in metadata.get('jobs', {}):
+ return {'success': False, 'error': 'Job not found'}
+
+ job_data = metadata['jobs'][job_id]
+ old_schedule = job_data.get('schedule', '')
+ old_command = job_data.get('command', '')
+
+ new_schedule = schedule or old_schedule
+ new_command = command or old_command
+
+ if schedule and not cls._validate_schedule(schedule):
+ return {'success': False, 'error': 'Invalid cron schedule format'}
+
+ # Update crontab on Linux if schedule or command changed
+ if cls.is_linux() and (new_schedule != old_schedule or new_command != old_command):
+ try:
+ result = subprocess.run(
+ ['crontab', '-l'],
+ capture_output=True,
+ text=True,
+ timeout=10
+ )
+
+ if result.returncode == 0:
+ old_line = f"{old_schedule} {old_command}"
+ new_line = f"{new_schedule} {new_command}"
+ lines = result.stdout.split('\n')
+ new_lines = []
+ for line in lines:
+ if old_line in line:
+ new_lines.append(new_line)
+ else:
+ new_lines.append(line)
+
+ new_crontab = '\n'.join(new_lines)
+ process = subprocess.Popen(
+ ['crontab', '-'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ text=True
+ )
+ stdout, stderr = process.communicate(input=new_crontab, timeout=10)
+
+ if process.returncode != 0:
+ return {'success': False, 'error': stderr or 'Failed to update crontab'}
+
+ except subprocess.SubprocessError as e:
+ return {'success': False, 'error': str(e)}
+
+ # Update metadata
+ if name is not None:
+ job_data['name'] = name
+ if command is not None:
+ job_data['command'] = command
+ if schedule is not None:
+ job_data['schedule'] = schedule
+ if description is not None:
+ job_data['description'] = description
+ job_data['updated_at'] = datetime.now().isoformat()
+
+ cls._save_jobs_metadata(metadata)
+
+ return {
+ 'success': True,
+ 'job_id': job_id,
+ 'message': 'Job updated successfully'
}
@classmethod
diff --git a/backend/app/services/deployment_service.py b/backend/app/services/deployment_service.py
index 29b21d6..4f9f69f 100644
--- a/backend/app/services/deployment_service.py
+++ b/backend/app/services/deployment_service.py
@@ -9,6 +9,7 @@
- Diff generation between deployments
"""
+import logging
import os
import subprocess
import json
@@ -26,6 +27,9 @@
from app import paths
+logger = logging.getLogger(__name__)
+
+
class DeploymentService:
"""Service for orchestrating deployments."""
@@ -543,8 +547,8 @@ def _generate_diff(cls, deployment: Deployment) -> None:
db.session.add(diff)
db.session.commit()
- except Exception:
- pass
+ except Exception as e:
+ logger.warning('Failed to generate deployment diff: %s', e)
@classmethod
def get_deployments(cls, app_id: int, limit: int = 20, offset: int = 0) -> List[Dict]:
diff --git a/backend/app/services/dkim_service.py b/backend/app/services/dkim_service.py
new file mode 100644
index 0000000..d705a7f
--- /dev/null
+++ b/backend/app/services/dkim_service.py
@@ -0,0 +1,270 @@
+"""OpenDKIM management service for DKIM signing."""
+import os
+import re
+import subprocess
+from typing import Dict
+
+from app.utils.system import PackageManager, ServiceControl, run_privileged
+
+
+class DKIMService:
+ """Service for managing OpenDKIM (DKIM email signing)."""
+
+ OPENDKIM_CONF = '/etc/opendkim.conf'
+ OPENDKIM_DIR = '/etc/opendkim'
+ OPENDKIM_KEYS_DIR = '/etc/opendkim/keys'
+ KEY_TABLE = '/etc/opendkim/KeyTable'
+ SIGNING_TABLE = '/etc/opendkim/SigningTable'
+ TRUSTED_HOSTS = '/etc/opendkim/TrustedHosts'
+
+ OPENDKIM_CONF_CONTENT = """# OpenDKIM configuration - Managed by ServerKit
+Syslog yes
+SyslogSuccess yes
+LogWhy yes
+Canonicalization relaxed/simple
+Mode sv
+SubDomains no
+AutoRestart yes
+AutoRestartRate 10/1M
+Background yes
+DNSTimeout 5
+SignatureAlgorithm rsa-sha256
+KeyTable refile:/etc/opendkim/KeyTable
+SigningTable refile:/etc/opendkim/SigningTable
+ExternalIgnoreList /etc/opendkim/TrustedHosts
+InternalHosts /etc/opendkim/TrustedHosts
+Socket inet:8891@localhost
+PidFile /run/opendkim/opendkim.pid
+OversignHeaders From
+UserID opendkim
+UMask 007
+"""
+
+ TRUSTED_HOSTS_DEFAULT = """# Trusted hosts - Managed by ServerKit
+127.0.0.1
+::1
+localhost
+"""
+
+ @classmethod
+ def get_status(cls) -> Dict:
+ """Get OpenDKIM installation and running status."""
+ installed = False
+ running = False
+ enabled = False
+ version = None
+ try:
+ result = subprocess.run(['which', 'opendkim'], capture_output=True, text=True)
+ installed = result.returncode == 0
+ if not installed:
+ installed = PackageManager.is_installed('opendkim')
+ if installed:
+ running = ServiceControl.is_active('opendkim')
+ enabled = ServiceControl.is_enabled('opendkim')
+ result = subprocess.run(['opendkim', '-V'], capture_output=True, text=True, stderr=subprocess.STDOUT)
+ match = re.search(r'OpenDKIM\s+Filter\s+v(\S+)', result.stdout)
+ if match:
+ version = match.group(1)
+ except (subprocess.SubprocessError, FileNotFoundError):
+ pass
+
+ return {
+ 'installed': installed,
+ 'running': running,
+ 'enabled': enabled,
+ 'version': version,
+ }
+
+ @classmethod
+ def install(cls) -> Dict:
+ """Install OpenDKIM."""
+ try:
+ result = PackageManager.install(['opendkim', 'opendkim-tools'], timeout=300)
+ if result.returncode != 0:
+ return {'success': False, 'error': result.stderr or 'Failed to install OpenDKIM'}
+
+ # Create directories
+ run_privileged(['mkdir', '-p', cls.OPENDKIM_KEYS_DIR])
+ run_privileged(['chown', '-R', 'opendkim:opendkim', cls.OPENDKIM_DIR])
+
+ # Create config files
+ run_privileged(['tee', cls.OPENDKIM_CONF], input=cls.OPENDKIM_CONF_CONTENT)
+ run_privileged(['tee', cls.TRUSTED_HOSTS], input=cls.TRUSTED_HOSTS_DEFAULT)
+ run_privileged(['touch', cls.KEY_TABLE])
+ run_privileged(['touch', cls.SIGNING_TABLE])
+
+ # Set permissions
+ run_privileged(['chown', '-R', 'opendkim:opendkim', cls.OPENDKIM_DIR])
+ run_privileged(['chmod', '700', cls.OPENDKIM_KEYS_DIR])
+
+ # Create PID directory
+ run_privileged(['mkdir', '-p', '/run/opendkim'])
+ run_privileged(['chown', 'opendkim:opendkim', '/run/opendkim'])
+
+ # Add postfix to opendkim group
+ run_privileged(['usermod', '-aG', 'opendkim', 'postfix'])
+
+ ServiceControl.enable('opendkim')
+ ServiceControl.start('opendkim', timeout=30)
+
+ return {'success': True, 'message': 'OpenDKIM installed successfully'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def generate_key(cls, domain: str, selector: str = 'default') -> Dict:
+ """Generate DKIM key pair for a domain."""
+ try:
+ key_dir = os.path.join(cls.OPENDKIM_KEYS_DIR, domain)
+ run_privileged(['mkdir', '-p', key_dir])
+
+ # Generate key
+ result = run_privileged([
+ 'opendkim-genkey',
+ '-s', selector,
+ '-d', domain,
+ '-D', key_dir,
+ '-b', '2048',
+ ])
+ if result.returncode != 0:
+ return {'success': False, 'error': result.stderr or 'Key generation failed'}
+
+ # Set permissions
+ run_privileged(['chown', '-R', 'opendkim:opendkim', key_dir])
+ run_privileged(['chmod', '600', os.path.join(key_dir, f'{selector}.private')])
+
+ # Read the public key TXT record
+ txt_file = os.path.join(key_dir, f'{selector}.txt')
+ result = run_privileged(['cat', txt_file])
+ public_key_record = result.stdout.strip() if result.returncode == 0 else ''
+
+ # Extract just the key value from the TXT record
+ key_match = re.search(r'p=([A-Za-z0-9+/=\s]+)', public_key_record)
+ public_key = key_match.group(1).replace(' ', '').replace('\n', '').replace('\t', '').replace('"', '') if key_match else ''
+
+ return {
+ 'success': True,
+ 'domain': domain,
+ 'selector': selector,
+ 'private_key_path': os.path.join(key_dir, f'{selector}.private'),
+ 'public_key': public_key,
+ 'dns_record': public_key_record,
+ 'dns_name': f'{selector}._domainkey.{domain}',
+ 'dns_value': f'v=DKIM1; k=rsa; p={public_key}',
+ }
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def add_domain(cls, domain: str, selector: str = 'default') -> Dict:
+ """Add domain to KeyTable and SigningTable."""
+ try:
+ key_path = os.path.join(cls.OPENDKIM_KEYS_DIR, domain, f'{selector}.private')
+
+ # Add to KeyTable
+ key_entry = f'{selector}._domainkey.{domain} {domain}:{selector}:{key_path}\n'
+ result = run_privileged(['cat', cls.KEY_TABLE])
+ if domain not in (result.stdout or ''):
+ run_privileged(['tee', '-a', cls.KEY_TABLE], input=key_entry)
+
+ # Add to SigningTable
+ signing_entry = f'*@{domain} {selector}._domainkey.{domain}\n'
+ result = run_privileged(['cat', cls.SIGNING_TABLE])
+ if domain not in (result.stdout or ''):
+ run_privileged(['tee', '-a', cls.SIGNING_TABLE], input=signing_entry)
+
+ # Add to TrustedHosts
+ result = run_privileged(['cat', cls.TRUSTED_HOSTS])
+ if domain not in (result.stdout or ''):
+ run_privileged(['tee', '-a', cls.TRUSTED_HOSTS], input=f'*.{domain}\n')
+
+ # Reload OpenDKIM
+ ServiceControl.restart('opendkim', timeout=30)
+
+ return {'success': True, 'message': f'Domain {domain} added to DKIM'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def remove_domain(cls, domain: str) -> Dict:
+ """Remove domain from DKIM configuration."""
+ try:
+ # Remove from KeyTable
+ result = run_privileged(['cat', cls.KEY_TABLE])
+ lines = [l for l in (result.stdout or '').splitlines() if domain not in l]
+ run_privileged(['tee', cls.KEY_TABLE], input='\n'.join(lines) + '\n')
+
+ # Remove from SigningTable
+ result = run_privileged(['cat', cls.SIGNING_TABLE])
+ lines = [l for l in (result.stdout or '').splitlines() if domain not in l]
+ run_privileged(['tee', cls.SIGNING_TABLE], input='\n'.join(lines) + '\n')
+
+ # Remove from TrustedHosts
+ result = run_privileged(['cat', cls.TRUSTED_HOSTS])
+ lines = [l for l in (result.stdout or '').splitlines() if domain not in l]
+ run_privileged(['tee', cls.TRUSTED_HOSTS], input='\n'.join(lines) + '\n')
+
+ # Remove key files
+ key_dir = os.path.join(cls.OPENDKIM_KEYS_DIR, domain)
+ run_privileged(['rm', '-rf', key_dir])
+
+ ServiceControl.restart('opendkim', timeout=30)
+
+ return {'success': True, 'message': f'Domain {domain} removed from DKIM'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def get_dns_record(cls, domain: str, selector: str = 'default') -> Dict:
+ """Get the DKIM DNS TXT record content for a domain."""
+ try:
+ txt_file = os.path.join(cls.OPENDKIM_KEYS_DIR, domain, f'{selector}.txt')
+ result = run_privileged(['cat', txt_file])
+ if result.returncode != 0:
+ return {'success': False, 'error': 'DKIM key not found'}
+
+ record = result.stdout.strip()
+ key_match = re.search(r'p=([A-Za-z0-9+/=\s"]+)', record)
+ public_key = ''
+ if key_match:
+ public_key = key_match.group(1).replace(' ', '').replace('\n', '').replace('\t', '').replace('"', '')
+
+ return {
+ 'success': True,
+ 'dns_name': f'{selector}._domainkey.{domain}',
+ 'dns_value': f'v=DKIM1; k=rsa; p={public_key}',
+ 'raw_record': record,
+ }
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def verify_key(cls, domain: str, selector: str = 'default') -> Dict:
+ """Verify DKIM key configuration."""
+ try:
+ result = run_privileged([
+ 'opendkim-testkey',
+ '-d', domain,
+ '-s', selector,
+ '-vvv',
+ ])
+ success = result.returncode == 0
+ output = (result.stdout or '') + (result.stderr or '')
+ return {
+ 'success': success,
+ 'verified': success and 'key OK' in output,
+ 'output': output,
+ }
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def reload(cls) -> Dict:
+ """Reload OpenDKIM."""
+ try:
+ result = ServiceControl.restart('opendkim', timeout=30)
+ if result.returncode == 0:
+ return {'success': True, 'message': 'OpenDKIM restarted'}
+ return {'success': False, 'error': result.stderr or 'Restart failed'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
diff --git a/backend/app/services/dns_provider_service.py b/backend/app/services/dns_provider_service.py
new file mode 100644
index 0000000..5e51d7b
--- /dev/null
+++ b/backend/app/services/dns_provider_service.py
@@ -0,0 +1,376 @@
+"""DNS Provider service for managing DKIM/SPF/DMARC records via Cloudflare and Route53."""
+import logging
+from typing import Dict, List, Optional
+
+import requests
+
+from app import db
+from app.models.email import DNSProviderConfig
+
+logger = logging.getLogger(__name__)
+
+
+class DNSProviderService:
+ """Service for managing DNS records via Cloudflare and Route53 APIs."""
+
+ @classmethod
+ def list_providers(cls) -> List[Dict]:
+ """List all configured DNS providers (secrets masked)."""
+ providers = DNSProviderConfig.query.all()
+ return [p.to_dict(mask_secrets=True) for p in providers]
+
+ @classmethod
+ def get_provider(cls, provider_id: int) -> Optional[DNSProviderConfig]:
+ """Get a DNS provider config by ID."""
+ return DNSProviderConfig.query.get(provider_id)
+
+ @classmethod
+ def add_provider(cls, name: str, provider: str, api_key: str,
+ api_secret: str = None, api_email: str = None,
+ is_default: bool = False) -> Dict:
+ """Add a new DNS provider configuration."""
+ if provider not in ('cloudflare', 'route53'):
+ return {'success': False, 'error': 'Provider must be cloudflare or route53'}
+ try:
+ if is_default:
+ # Unset other defaults
+ DNSProviderConfig.query.filter_by(is_default=True).update({'is_default': False})
+
+ config = DNSProviderConfig(
+ name=name,
+ provider=provider,
+ api_key=api_key,
+ api_secret=api_secret,
+ api_email=api_email,
+ is_default=is_default,
+ )
+ db.session.add(config)
+ db.session.commit()
+ return {'success': True, 'provider': config.to_dict(), 'message': 'DNS provider added'}
+ except Exception as e:
+ db.session.rollback()
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def remove_provider(cls, provider_id: int) -> Dict:
+ """Remove a DNS provider configuration."""
+ try:
+ config = DNSProviderConfig.query.get(provider_id)
+ if not config:
+ return {'success': False, 'error': 'Provider not found'}
+ db.session.delete(config)
+ db.session.commit()
+ return {'success': True, 'message': 'DNS provider removed'}
+ except Exception as e:
+ db.session.rollback()
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def test_connection(cls, provider_id: int) -> Dict:
+ """Test DNS provider API connection."""
+ config = DNSProviderConfig.query.get(provider_id)
+ if not config:
+ return {'success': False, 'error': 'Provider not found'}
+
+ if config.provider == 'cloudflare':
+ return cls._test_cloudflare(config)
+ elif config.provider == 'route53':
+ return cls._test_route53(config)
+ return {'success': False, 'error': 'Unknown provider'}
+
+ @classmethod
+ def list_zones(cls, provider_id: int) -> Dict:
+ """List DNS zones from the provider."""
+ config = DNSProviderConfig.query.get(provider_id)
+ if not config:
+ return {'success': False, 'error': 'Provider not found'}
+
+ if config.provider == 'cloudflare':
+ return cls._cloudflare_list_zones(config)
+ elif config.provider == 'route53':
+ return cls._route53_list_zones(config)
+ return {'success': False, 'error': 'Unknown provider'}
+
+ @classmethod
+ def set_record(cls, provider_id: int, zone_id: str, record_type: str,
+ name: str, value: str, ttl: int = 3600) -> Dict:
+ """Create or update a DNS record."""
+ config = DNSProviderConfig.query.get(provider_id)
+ if not config:
+ return {'success': False, 'error': 'Provider not found'}
+
+ if config.provider == 'cloudflare':
+ return cls._cloudflare_set_record(config, zone_id, record_type, name, value, ttl)
+ elif config.provider == 'route53':
+ return cls._route53_set_record(config, zone_id, record_type, name, value, ttl)
+ return {'success': False, 'error': 'Unknown provider'}
+
+ @classmethod
+ def delete_record(cls, provider_id: int, zone_id: str, record_type: str, name: str) -> Dict:
+ """Delete a DNS record."""
+ config = DNSProviderConfig.query.get(provider_id)
+ if not config:
+ return {'success': False, 'error': 'Provider not found'}
+
+ if config.provider == 'cloudflare':
+ return cls._cloudflare_delete_record(config, zone_id, record_type, name)
+ elif config.provider == 'route53':
+ return cls._route53_delete_record(config, zone_id, record_type, name)
+ return {'success': False, 'error': 'Unknown provider'}
+
+ @classmethod
+ def deploy_email_records(cls, provider_id: int, zone_id: str, domain: str,
+ selector: str, dkim_public_key: str,
+ server_ip: str = None) -> Dict:
+ """Deploy DKIM, SPF, and DMARC records for an email domain."""
+ results = {}
+
+ # Deploy DKIM record
+ dkim_name = f'{selector}._domainkey.{domain}'
+ dkim_value = f'v=DKIM1; k=rsa; p={dkim_public_key}'
+ results['dkim'] = cls.set_record(provider_id, zone_id, 'TXT', dkim_name, dkim_value)
+
+ # Deploy SPF record
+ spf_value = 'v=spf1 mx a ~all'
+ if server_ip:
+ spf_value = f'v=spf1 mx a ip4:{server_ip} ~all'
+ results['spf'] = cls.set_record(provider_id, zone_id, 'TXT', domain, spf_value)
+
+ # Deploy DMARC record
+ dmarc_name = f'_dmarc.{domain}'
+ dmarc_value = f'v=DMARC1; p=quarantine; rua=mailto:dmarc@{domain}; pct=100'
+ results['dmarc'] = cls.set_record(provider_id, zone_id, 'TXT', dmarc_name, dmarc_value)
+
+ # Deploy MX record
+ results['mx'] = cls.set_record(provider_id, zone_id, 'MX', domain, f'10 mail.{domain}')
+
+ all_ok = all(r.get('success') for r in results.values())
+ return {
+ 'success': all_ok,
+ 'results': results,
+ 'message': 'All DNS records deployed' if all_ok else 'Some records failed',
+ }
+
+ # ── Cloudflare Implementation ──
+
+ @classmethod
+ def _cloudflare_headers(cls, config: DNSProviderConfig) -> Dict:
+ """Build Cloudflare API headers."""
+ if config.api_email:
+ return {
+ 'X-Auth-Email': config.api_email,
+ 'X-Auth-Key': config.api_key,
+ 'Content-Type': 'application/json',
+ }
+ return {
+ 'Authorization': f'Bearer {config.api_key}',
+ 'Content-Type': 'application/json',
+ }
+
+ @classmethod
+ def _test_cloudflare(cls, config: DNSProviderConfig) -> Dict:
+ """Test Cloudflare API connection."""
+ try:
+ resp = requests.get(
+ 'https://api.cloudflare.com/client/v4/user/tokens/verify',
+ headers=cls._cloudflare_headers(config),
+ timeout=15,
+ )
+ data = resp.json()
+ if data.get('success'):
+ return {'success': True, 'message': 'Cloudflare connection successful'}
+ return {'success': False, 'error': data.get('errors', [{}])[0].get('message', 'Unknown error')}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def _cloudflare_list_zones(cls, config: DNSProviderConfig) -> Dict:
+ """List Cloudflare zones."""
+ try:
+ resp = requests.get(
+ 'https://api.cloudflare.com/client/v4/zones?per_page=50',
+ headers=cls._cloudflare_headers(config),
+ timeout=15,
+ )
+ data = resp.json()
+ if not data.get('success'):
+ return {'success': False, 'error': 'Failed to list zones'}
+ zones = [{'id': z['id'], 'name': z['name'], 'status': z['status']}
+ for z in data.get('result', [])]
+ return {'success': True, 'zones': zones}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def _cloudflare_set_record(cls, config: DNSProviderConfig, zone_id: str,
+ record_type: str, name: str, value: str, ttl: int) -> Dict:
+ """Create or update a Cloudflare DNS record."""
+ try:
+ headers = cls._cloudflare_headers(config)
+ base = f'https://api.cloudflare.com/client/v4/zones/{zone_id}/dns_records'
+
+ # Check if record exists
+ resp = requests.get(
+ f'{base}?type={record_type}&name={name}',
+ headers=headers, timeout=15,
+ )
+ data = resp.json()
+ existing = data.get('result', [])
+
+ payload = {'type': record_type, 'name': name, 'content': value, 'ttl': ttl}
+
+ if existing:
+ # Update existing record
+ record_id = existing[0]['id']
+ resp = requests.put(
+ f'{base}/{record_id}',
+ headers=headers, json=payload, timeout=15,
+ )
+ else:
+ # Create new record
+ resp = requests.post(base, headers=headers, json=payload, timeout=15)
+
+ data = resp.json()
+ if data.get('success'):
+ return {'success': True, 'message': f'{record_type} record set for {name}'}
+ return {'success': False, 'error': data.get('errors', [{}])[0].get('message', 'Unknown error')}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def _cloudflare_delete_record(cls, config: DNSProviderConfig, zone_id: str,
+ record_type: str, name: str) -> Dict:
+ """Delete a Cloudflare DNS record."""
+ try:
+ headers = cls._cloudflare_headers(config)
+ base = f'https://api.cloudflare.com/client/v4/zones/{zone_id}/dns_records'
+
+ resp = requests.get(
+ f'{base}?type={record_type}&name={name}',
+ headers=headers, timeout=15,
+ )
+ data = resp.json()
+ existing = data.get('result', [])
+
+ if not existing:
+ return {'success': True, 'message': 'Record not found (already deleted)'}
+
+ for record in existing:
+ requests.delete(f'{base}/{record["id"]}', headers=headers, timeout=15)
+
+ return {'success': True, 'message': f'{record_type} record deleted for {name}'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ # ── Route53 Implementation ──
+
+ @classmethod
+ def _get_route53_client(cls, config: DNSProviderConfig):
+ """Get a boto3 Route53 client."""
+ try:
+ import boto3
+ except ImportError:
+ raise RuntimeError('boto3 is required for Route53 integration. Install with: pip install boto3')
+
+ return boto3.client(
+ 'route53',
+ aws_access_key_id=config.api_key,
+ aws_secret_access_key=config.api_secret,
+ )
+
+ @classmethod
+ def _test_route53(cls, config: DNSProviderConfig) -> Dict:
+ """Test Route53 API connection."""
+ try:
+ client = cls._get_route53_client(config)
+ client.list_hosted_zones(MaxItems='1')
+ return {'success': True, 'message': 'Route53 connection successful'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def _route53_list_zones(cls, config: DNSProviderConfig) -> Dict:
+ """List Route53 hosted zones."""
+ try:
+ client = cls._get_route53_client(config)
+ resp = client.list_hosted_zones()
+ zones = [
+ {
+ 'id': z['Id'].replace('/hostedzone/', ''),
+ 'name': z['Name'].rstrip('.'),
+ 'status': 'active',
+ }
+ for z in resp.get('HostedZones', [])
+ ]
+ return {'success': True, 'zones': zones}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def _route53_set_record(cls, config: DNSProviderConfig, zone_id: str,
+ record_type: str, name: str, value: str, ttl: int) -> Dict:
+ """Create or update a Route53 DNS record."""
+ try:
+ client = cls._get_route53_client(config)
+ # Ensure name ends with a dot for Route53
+ fqdn = name if name.endswith('.') else f'{name}.'
+
+ resource_record = {'Value': value}
+ if record_type == 'TXT':
+ # TXT records need to be quoted
+ resource_record = {'Value': f'"{value}"'}
+ elif record_type == 'MX':
+ resource_record = {'Value': value}
+
+ client.change_resource_record_sets(
+ HostedZoneId=zone_id,
+ ChangeBatch={
+ 'Changes': [{
+ 'Action': 'UPSERT',
+ 'ResourceRecordSet': {
+ 'Name': fqdn,
+ 'Type': record_type,
+ 'TTL': ttl,
+ 'ResourceRecords': [resource_record],
+ }
+ }]
+ }
+ )
+ return {'success': True, 'message': f'{record_type} record set for {name}'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def _route53_delete_record(cls, config: DNSProviderConfig, zone_id: str,
+ record_type: str, name: str) -> Dict:
+ """Delete a Route53 DNS record."""
+ try:
+ client = cls._get_route53_client(config)
+ fqdn = name if name.endswith('.') else f'{name}.'
+
+ # Get current record to know its value (required for DELETE)
+ resp = client.list_resource_record_sets(
+ HostedZoneId=zone_id,
+ StartRecordName=fqdn,
+ StartRecordType=record_type,
+ MaxItems='1',
+ )
+ records = resp.get('ResourceRecordSets', [])
+ matching = [r for r in records if r['Name'] == fqdn and r['Type'] == record_type]
+
+ if not matching:
+ return {'success': True, 'message': 'Record not found (already deleted)'}
+
+ record = matching[0]
+ client.change_resource_record_sets(
+ HostedZoneId=zone_id,
+ ChangeBatch={
+ 'Changes': [{
+ 'Action': 'DELETE',
+ 'ResourceRecordSet': record,
+ }]
+ }
+ )
+ return {'success': True, 'message': f'{record_type} record deleted for {name}'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
diff --git a/backend/app/services/dovecot_service.py b/backend/app/services/dovecot_service.py
new file mode 100644
index 0000000..55cf3d2
--- /dev/null
+++ b/backend/app/services/dovecot_service.py
@@ -0,0 +1,351 @@
+"""Dovecot IMAP/POP3 server management service."""
+import os
+import re
+import subprocess
+from typing import Dict, Optional
+
+from app.utils.system import PackageManager, ServiceControl, run_privileged
+from app import paths
+
+
+class DovecotService:
+ """Service for managing Dovecot (IMAP/POP3 server)."""
+
+ DOVECOT_CONF_DIR = '/etc/dovecot'
+ DOVECOT_CONF = '/etc/dovecot/dovecot.conf'
+ DOVECOT_AUTH_CONF = '/etc/dovecot/conf.d/10-auth.conf'
+ DOVECOT_MAIL_CONF = '/etc/dovecot/conf.d/10-mail.conf'
+ DOVECOT_MASTER_CONF = '/etc/dovecot/conf.d/10-master.conf'
+ DOVECOT_SSL_CONF = '/etc/dovecot/conf.d/10-ssl.conf'
+ DOVECOT_PASSWD_FILE = '/etc/dovecot/users'
+ AUTH_PASSWDFILE_CONF = '/etc/dovecot/conf.d/auth-passwdfile.conf.ext'
+
+ MAIL_CONF_CONTENT = """# Dovecot mail configuration - Managed by ServerKit
+mail_location = maildir:{vmail_dir}/%d/%n/Maildir
+namespace inbox {{
+ inbox = yes
+ separator = /
+}}
+mail_uid = {vmail_uid}
+mail_gid = {vmail_gid}
+mail_privileged_group = vmail
+first_valid_uid = {vmail_uid}
+last_valid_uid = {vmail_uid}
+"""
+
+ AUTH_CONF_CONTENT = """# Dovecot auth configuration - Managed by ServerKit
+disable_plaintext_auth = yes
+auth_mechanisms = plain login
+!include auth-passwdfile.conf.ext
+"""
+
+ AUTH_PASSWDFILE_CONTENT = """# Password file auth - Managed by ServerKit
+passdb {{
+ driver = passwd-file
+ args = scheme=SHA512-CRYPT /etc/dovecot/users
+}}
+userdb {{
+ driver = static
+ args = uid={vmail_uid} gid={vmail_gid} home={vmail_dir}/%d/%n
+}}
+"""
+
+ MASTER_CONF_CONTENT = """# Dovecot master configuration - Managed by ServerKit
+service imap-login {
+ inet_listener imap {
+ port = 0
+ }
+ inet_listener imaps {
+ port = 993
+ ssl = yes
+ }
+}
+service pop3-login {
+ inet_listener pop3 {
+ port = 0
+ }
+ inet_listener pop3s {
+ port = 995
+ ssl = yes
+ }
+}
+service lmtp {
+ unix_listener /var/spool/postfix/private/dovecot-lmtp {
+ mode = 0600
+ user = postfix
+ group = postfix
+ }
+}
+service auth {
+ unix_listener /var/spool/postfix/private/auth {
+ mode = 0666
+ user = postfix
+ group = postfix
+ }
+ unix_listener auth-userdb {
+ mode = 0600
+ user = vmail
+ group = vmail
+ }
+ user = dovecot
+}
+service auth-worker {
+ user = vmail
+}
+"""
+
+ SSL_CONF_CONTENT = """# Dovecot SSL configuration - Managed by ServerKit
+ssl = required
+ssl_cert = <{tls_cert}
+ssl_key = <{tls_key}
+ssl_min_protocol = TLSv1.2
+ssl_prefer_server_ciphers = yes
+"""
+
+ @classmethod
+ def get_status(cls) -> Dict:
+ """Get Dovecot installation and running status."""
+ installed = False
+ running = False
+ enabled = False
+ version = None
+ try:
+ result = subprocess.run(['which', 'dovecot'], capture_output=True, text=True)
+ installed = result.returncode == 0
+ if not installed:
+ installed = PackageManager.is_installed('dovecot-core') or PackageManager.is_installed('dovecot')
+ if installed:
+ running = ServiceControl.is_active('dovecot')
+ enabled = ServiceControl.is_enabled('dovecot')
+ result = subprocess.run(['dovecot', '--version'], capture_output=True, text=True)
+ version_match = re.search(r'(\d+\.\d+\.\d+)', result.stdout)
+ if version_match:
+ version = version_match.group(1)
+ except (subprocess.SubprocessError, FileNotFoundError):
+ pass
+
+ return {
+ 'installed': installed,
+ 'running': running,
+ 'enabled': enabled,
+ 'version': version,
+ }
+
+ @classmethod
+ def install(cls) -> Dict:
+ """Install Dovecot packages."""
+ try:
+ manager = PackageManager.detect()
+ if manager == 'apt':
+ packages = ['dovecot-core', 'dovecot-imapd', 'dovecot-pop3d', 'dovecot-lmtpd', 'dovecot-sieve']
+ else:
+ packages = ['dovecot']
+
+ result = PackageManager.install(packages, timeout=300)
+ if result.returncode != 0:
+ return {'success': False, 'error': result.stderr or 'Failed to install Dovecot'}
+
+ # Create empty passwd file
+ run_privileged(['touch', cls.DOVECOT_PASSWD_FILE])
+ run_privileged(['chown', 'vmail:dovecot', cls.DOVECOT_PASSWD_FILE])
+ run_privileged(['chmod', '640', cls.DOVECOT_PASSWD_FILE])
+
+ ServiceControl.enable('dovecot')
+
+ return {'success': True, 'message': 'Dovecot installed successfully'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def configure(cls, tls_cert: str = None, tls_key: str = None) -> Dict:
+ """Write Dovecot configuration files."""
+ try:
+ cert = tls_cert or '/etc/ssl/certs/ssl-cert-snakeoil.pem'
+ key = tls_key or '/etc/ssl/private/ssl-cert-snakeoil.key'
+
+ # 10-mail.conf
+ mail_conf = cls.MAIL_CONF_CONTENT.format(
+ vmail_dir=paths.VMAIL_DIR,
+ vmail_uid=paths.VMAIL_UID,
+ vmail_gid=paths.VMAIL_GID,
+ )
+ run_privileged(['tee', cls.DOVECOT_MAIL_CONF], input=mail_conf)
+
+ # 10-auth.conf
+ run_privileged(['tee', cls.DOVECOT_AUTH_CONF], input=cls.AUTH_CONF_CONTENT)
+
+ # auth-passwdfile.conf.ext
+ auth_passwd = cls.AUTH_PASSWDFILE_CONTENT.format(
+ vmail_uid=paths.VMAIL_UID,
+ vmail_gid=paths.VMAIL_GID,
+ vmail_dir=paths.VMAIL_DIR,
+ )
+ run_privileged(['tee', cls.AUTH_PASSWDFILE_CONF], input=auth_passwd)
+
+ # 10-master.conf
+ run_privileged(['tee', cls.DOVECOT_MASTER_CONF], input=cls.MASTER_CONF_CONTENT)
+
+ # 10-ssl.conf
+ ssl_conf = cls.SSL_CONF_CONTENT.format(tls_cert=cert, tls_key=key)
+ run_privileged(['tee', cls.DOVECOT_SSL_CONF], input=ssl_conf)
+
+ # Restart to apply
+ ServiceControl.restart('dovecot', timeout=30)
+
+ return {'success': True, 'message': 'Dovecot configured successfully'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def create_mailbox(cls, email: str, password: str, domain: str, username: str, quota_mb: int = 1024) -> Dict:
+ """Create a virtual mailbox with password entry."""
+ try:
+ # Generate password hash using doveadm
+ result = run_privileged(['doveadm', 'pw', '-s', 'SHA512-CRYPT', '-p', password])
+ if result.returncode != 0:
+ return {'success': False, 'error': 'Failed to hash password'}
+
+ password_hash = result.stdout.strip()
+
+ # Build passwd-file entry
+ # Format: user@domain:{scheme}hash:uid:gid:home::userdb_quota_rule=*:storage=NM
+ entry = f'{email}:{password_hash}:{paths.VMAIL_UID}:{paths.VMAIL_GID}:{paths.VMAIL_DIR}/{domain}/{username}::userdb_quota_rule=*:storage={quota_mb}M'
+
+ # Append to passwd file
+ run_privileged(['tee', '-a', cls.DOVECOT_PASSWD_FILE], input=entry + '\n')
+
+ # Create Maildir
+ maildir = os.path.join(paths.VMAIL_DIR, domain, username, 'Maildir')
+ run_privileged(['mkdir', '-p', f'{maildir}/cur', f'{maildir}/new', f'{maildir}/tmp'])
+ run_privileged(['chown', '-R', f'{paths.VMAIL_UID}:{paths.VMAIL_GID}',
+ os.path.join(paths.VMAIL_DIR, domain, username)])
+
+ return {'success': True, 'message': f'Mailbox {email} created'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def delete_mailbox(cls, email: str, domain: str, username: str, remove_files: bool = False) -> Dict:
+ """Delete a virtual mailbox."""
+ try:
+ # Remove from passwd file
+ result = run_privileged(['cat', cls.DOVECOT_PASSWD_FILE])
+ lines = (result.stdout or '').splitlines()
+ new_lines = [l for l in lines if not l.startswith(f'{email}:')]
+ run_privileged(['tee', cls.DOVECOT_PASSWD_FILE], input='\n'.join(new_lines) + '\n')
+
+ # Optionally remove Maildir
+ if remove_files:
+ mailbox_path = os.path.join(paths.VMAIL_DIR, domain, username)
+ run_privileged(['rm', '-rf', mailbox_path])
+
+ return {'success': True, 'message': f'Mailbox {email} deleted'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def change_password(cls, email: str, new_password: str) -> Dict:
+ """Change a mailbox password."""
+ try:
+ # Generate new hash
+ result = run_privileged(['doveadm', 'pw', '-s', 'SHA512-CRYPT', '-p', new_password])
+ if result.returncode != 0:
+ return {'success': False, 'error': 'Failed to hash password'}
+
+ new_hash = result.stdout.strip()
+
+ # Read current file
+ result = run_privileged(['cat', cls.DOVECOT_PASSWD_FILE])
+ lines = (result.stdout or '').splitlines()
+
+ updated = False
+ new_lines = []
+ for line in lines:
+ if line.startswith(f'{email}:'):
+ parts = line.split(':')
+ parts[1] = new_hash
+ new_lines.append(':'.join(parts))
+ updated = True
+ else:
+ new_lines.append(line)
+
+ if not updated:
+ return {'success': False, 'error': f'Account {email} not found'}
+
+ run_privileged(['tee', cls.DOVECOT_PASSWD_FILE], input='\n'.join(new_lines) + '\n')
+
+ return {'success': True, 'message': 'Password changed'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def set_quota(cls, email: str, quota_mb: int) -> Dict:
+ """Update mailbox quota."""
+ try:
+ result = run_privileged(['cat', cls.DOVECOT_PASSWD_FILE])
+ lines = (result.stdout or '').splitlines()
+
+ updated = False
+ new_lines = []
+ for line in lines:
+ if line.startswith(f'{email}:'):
+ # Replace quota in the userdb_quota_rule field
+ line = re.sub(r'userdb_quota_rule=\*:storage=\d+M',
+ f'userdb_quota_rule=*:storage={quota_mb}M', line)
+ new_lines.append(line)
+ updated = True
+ else:
+ new_lines.append(line)
+
+ if not updated:
+ return {'success': False, 'error': f'Account {email} not found'}
+
+ run_privileged(['tee', cls.DOVECOT_PASSWD_FILE], input='\n'.join(new_lines) + '\n')
+
+ return {'success': True, 'message': f'Quota set to {quota_mb}MB'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def get_quota_usage(cls, email: str) -> Dict:
+ """Get mailbox quota usage."""
+ try:
+ result = run_privileged(['doveadm', 'quota', 'get', '-u', email])
+ if result.returncode != 0:
+ return {'success': False, 'error': 'Failed to get quota'}
+
+ # Parse output
+ usage = {'storage_used': 0, 'storage_limit': 0, 'message_count': 0}
+ for line in result.stdout.splitlines():
+ parts = line.split()
+ if len(parts) >= 4 and parts[0] == 'STORAGE':
+ usage['storage_used'] = int(parts[1]) // 1024 # KB to MB
+ usage['storage_limit'] = int(parts[2]) // 1024 if parts[2] != '-' else 0
+ elif len(parts) >= 4 and parts[0] == 'MESSAGE':
+ usage['message_count'] = int(parts[1])
+
+ return {'success': True, **usage}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def reload(cls) -> Dict:
+ """Reload Dovecot configuration."""
+ try:
+ result = ServiceControl.reload('dovecot', timeout=30)
+ if result.returncode == 0:
+ return {'success': True, 'message': 'Dovecot reloaded'}
+ return {'success': False, 'error': result.stderr or 'Reload failed'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def restart(cls) -> Dict:
+ """Restart Dovecot."""
+ try:
+ result = ServiceControl.restart('dovecot', timeout=30)
+ if result.returncode == 0:
+ return {'success': True, 'message': 'Dovecot restarted'}
+ return {'success': False, 'error': result.stderr or 'Restart failed'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
diff --git a/backend/app/services/email_service.py b/backend/app/services/email_service.py
new file mode 100644
index 0000000..f31f93b
--- /dev/null
+++ b/backend/app/services/email_service.py
@@ -0,0 +1,546 @@
+"""Email service orchestrator — delegates to sub-services for each component."""
+import logging
+import subprocess
+from typing import Dict, List, Optional
+
+from app import db
+from app.models.email import EmailDomain, EmailAccount, EmailAlias, EmailForwardingRule
+from app.services.postfix_service import PostfixService
+from app.services.dovecot_service import DovecotService
+from app.services.dkim_service import DKIMService
+from app.services.spamassassin_service import SpamAssassinService
+from app.services.roundcube_service import RoundcubeService
+from app.utils.system import PackageManager, ServiceControl
+
+logger = logging.getLogger(__name__)
+
+
+class EmailService:
+ """High-level email service that coordinates Postfix, Dovecot, DKIM, and SpamAssassin."""
+
+ COMPONENTS = {
+ 'postfix': PostfixService,
+ 'dovecot': DovecotService,
+ 'opendkim': DKIMService,
+ 'spamassassin': SpamAssassinService,
+ }
+
+ # ── Status ──
+
+ @classmethod
+ def get_status(cls) -> Dict:
+ """Get aggregate email server status."""
+ postfix = PostfixService.get_status()
+ dovecot = DovecotService.get_status()
+ dkim = DKIMService.get_status()
+ spam = SpamAssassinService.get_status()
+
+ all_installed = postfix['installed'] and dovecot['installed']
+ all_running = postfix.get('running', False) and dovecot.get('running', False)
+
+ return {
+ 'installed': all_installed,
+ 'running': all_running,
+ 'postfix': postfix,
+ 'dovecot': dovecot,
+ 'dkim': dkim,
+ 'spamassassin': spam,
+ }
+
+ # ── Installation ──
+
+ @classmethod
+ def install_all(cls, hostname: str = None) -> Dict:
+ """Install and configure all email components."""
+ results = {}
+
+ # 1. Install Postfix
+ results['postfix'] = PostfixService.install(hostname=hostname)
+ if not results['postfix'].get('success'):
+ return {'success': False, 'error': 'Postfix installation failed', 'results': results}
+
+ # 2. Configure Postfix
+ postfix_config = PostfixService.configure(hostname=hostname)
+ if not postfix_config.get('success'):
+ logger.warning(f"Postfix configuration warning: {postfix_config.get('error')}")
+
+ # 3. Install Dovecot
+ results['dovecot'] = DovecotService.install()
+ if not results['dovecot'].get('success'):
+ return {'success': False, 'error': 'Dovecot installation failed', 'results': results}
+
+ # 4. Configure Dovecot
+ dovecot_config = DovecotService.configure()
+ if not dovecot_config.get('success'):
+ logger.warning(f"Dovecot configuration warning: {dovecot_config.get('error')}")
+
+ # 5. Install OpenDKIM
+ results['dkim'] = DKIMService.install()
+ if not results['dkim'].get('success'):
+ logger.warning(f"DKIM installation warning: {results['dkim'].get('error')}")
+
+ # 6. Install SpamAssassin
+ results['spamassassin'] = SpamAssassinService.install()
+ if not results['spamassassin'].get('success'):
+ logger.warning(f"SpamAssassin installation warning: {results['spamassassin'].get('error')}")
+
+ all_ok = all(r.get('success') for r in results.values())
+ return {
+ 'success': all_ok,
+ 'message': 'All email components installed' if all_ok else 'Some components had issues',
+ 'results': results,
+ }
+
+ # ── Service Control ──
+
+ @classmethod
+ def control_service(cls, component: str, action: str) -> Dict:
+ """Start/stop/restart an email component."""
+ if component not in cls.COMPONENTS and component != 'roundcube':
+ return {'success': False, 'error': f'Unknown component: {component}'}
+
+ if action not in ('start', 'stop', 'restart', 'reload'):
+ return {'success': False, 'error': f'Invalid action: {action}'}
+
+ try:
+ if component == 'roundcube':
+ actions = {
+ 'start': RoundcubeService.start,
+ 'stop': RoundcubeService.stop,
+ 'restart': RoundcubeService.restart,
+ 'reload': RoundcubeService.restart,
+ }
+ return actions[action]()
+
+ service_name = component
+ if action == 'start':
+ result = ServiceControl.start(service_name, timeout=30)
+ elif action == 'stop':
+ result = ServiceControl.stop(service_name, timeout=30)
+ elif action == 'restart':
+ result = ServiceControl.restart(service_name, timeout=30)
+ elif action == 'reload':
+ result = ServiceControl.reload(service_name, timeout=30)
+
+ if result.returncode == 0:
+ return {'success': True, 'message': f'{component} {action} successful'}
+ return {'success': False, 'error': result.stderr or f'{action} failed'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ # ── Domains ──
+
+ @classmethod
+ def get_domains(cls) -> List[Dict]:
+ """List all email domains."""
+ domains = EmailDomain.query.all()
+ return [d.to_dict() for d in domains]
+
+ @classmethod
+ def add_domain(cls, name: str, dns_provider_id: int = None, dns_zone_id: str = None) -> Dict:
+ """Add an email domain."""
+ try:
+ existing = EmailDomain.query.filter_by(name=name).first()
+ if existing:
+ return {'success': False, 'error': f'Domain {name} already exists'}
+
+ domain = EmailDomain(
+ name=name,
+ dns_provider_id=dns_provider_id,
+ dns_zone_id=dns_zone_id,
+ )
+ db.session.add(domain)
+ db.session.commit()
+
+ # Add to Postfix virtual domains
+ PostfixService.add_domain(name)
+
+ # Generate DKIM key
+ dkim_result = DKIMService.generate_key(name)
+ if dkim_result.get('success'):
+ domain.dkim_selector = 'default'
+ domain.dkim_private_key_path = dkim_result.get('private_key_path')
+ domain.dkim_public_key = dkim_result.get('public_key')
+ domain.spf_record = f'v=spf1 mx a ~all'
+ domain.dmarc_record = f'v=DMARC1; p=quarantine; rua=mailto:dmarc@{name}; pct=100'
+ DKIMService.add_domain(name)
+ db.session.commit()
+
+ return {'success': True, 'domain': domain.to_dict(), 'message': f'Domain {name} added'}
+ except Exception as e:
+ db.session.rollback()
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def get_domain(cls, domain_id: int) -> Dict:
+ """Get domain details."""
+ domain = EmailDomain.query.get(domain_id)
+ if not domain:
+ return {'success': False, 'error': 'Domain not found'}
+ return {'success': True, 'domain': domain.to_dict()}
+
+ @classmethod
+ def remove_domain(cls, domain_id: int) -> Dict:
+ """Remove an email domain and all its accounts/aliases."""
+ try:
+ domain = EmailDomain.query.get(domain_id)
+ if not domain:
+ return {'success': False, 'error': 'Domain not found'}
+
+ domain_name = domain.name
+
+ # Remove accounts from Dovecot/Postfix
+ for account in domain.accounts:
+ DovecotService.delete_mailbox(account.email, domain_name, account.username, remove_files=True)
+ PostfixService.remove_mailbox(account.email)
+
+ # Remove aliases from Postfix
+ for alias in domain.aliases:
+ PostfixService.remove_alias(alias.source)
+
+ # Remove from DKIM
+ DKIMService.remove_domain(domain_name)
+
+ # Remove from Postfix virtual domains
+ PostfixService.remove_domain(domain_name)
+
+ # Delete from database
+ db.session.delete(domain)
+ db.session.commit()
+
+ return {'success': True, 'message': f'Domain {domain_name} removed'}
+ except Exception as e:
+ db.session.rollback()
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def verify_dns(cls, domain_id: int) -> Dict:
+ """Verify DNS records for a domain."""
+ domain = EmailDomain.query.get(domain_id)
+ if not domain:
+ return {'success': False, 'error': 'Domain not found'}
+
+ results = {}
+ name = domain.name
+
+ # Check MX record
+ try:
+ result = subprocess.run(['dig', '+short', 'MX', name], capture_output=True, text=True, timeout=10)
+ results['mx'] = {
+ 'found': bool(result.stdout.strip()),
+ 'value': result.stdout.strip() or None,
+ }
+ except Exception:
+ results['mx'] = {'found': False, 'error': 'DNS lookup failed'}
+
+ # Check SPF record
+ try:
+ result = subprocess.run(['dig', '+short', 'TXT', name], capture_output=True, text=True, timeout=10)
+ spf_found = 'v=spf1' in (result.stdout or '')
+ results['spf'] = {
+ 'found': spf_found,
+ 'value': result.stdout.strip() or None,
+ }
+ except Exception:
+ results['spf'] = {'found': False, 'error': 'DNS lookup failed'}
+
+ # Check DKIM record
+ selector = domain.dkim_selector or 'default'
+ try:
+ dkim_name = f'{selector}._domainkey.{name}'
+ result = subprocess.run(['dig', '+short', 'TXT', dkim_name], capture_output=True, text=True, timeout=10)
+ dkim_found = 'v=DKIM1' in (result.stdout or '')
+ results['dkim'] = {
+ 'found': dkim_found,
+ 'value': result.stdout.strip() or None,
+ 'name': dkim_name,
+ }
+ except Exception:
+ results['dkim'] = {'found': False, 'error': 'DNS lookup failed'}
+
+ # Check DMARC record
+ try:
+ dmarc_name = f'_dmarc.{name}'
+ result = subprocess.run(['dig', '+short', 'TXT', dmarc_name], capture_output=True, text=True, timeout=10)
+ dmarc_found = 'v=DMARC1' in (result.stdout or '')
+ results['dmarc'] = {
+ 'found': dmarc_found,
+ 'value': result.stdout.strip() or None,
+ }
+ except Exception:
+ results['dmarc'] = {'found': False, 'error': 'DNS lookup failed'}
+
+ all_ok = all(r.get('found') for r in results.values())
+ return {
+ 'success': True,
+ 'domain': name,
+ 'all_verified': all_ok,
+ 'records': results,
+ }
+
+ # ── Accounts ──
+
+ @classmethod
+ def get_accounts(cls, domain_id: int) -> List[Dict]:
+ """List email accounts for a domain."""
+ accounts = EmailAccount.query.filter_by(domain_id=domain_id).all()
+ return [a.to_dict() for a in accounts]
+
+ @classmethod
+ def add_account(cls, domain_id: int, username: str, password: str, quota_mb: int = 1024) -> Dict:
+ """Create an email account."""
+ try:
+ domain = EmailDomain.query.get(domain_id)
+ if not domain:
+ return {'success': False, 'error': 'Domain not found'}
+
+ email = f'{username}@{domain.name}'
+
+ existing = EmailAccount.query.filter_by(email=email).first()
+ if existing:
+ return {'success': False, 'error': f'Account {email} already exists'}
+
+ # Create mailbox in Dovecot
+ dovecot_result = DovecotService.create_mailbox(email, password, domain.name, username, quota_mb)
+ if not dovecot_result.get('success'):
+ return dovecot_result
+
+ # Add to Postfix virtual mailboxes
+ PostfixService.add_mailbox(email, domain.name, username)
+
+ # Get password hash for storage
+ password_hash = dovecot_result.get('password_hash', 'stored_in_dovecot')
+
+ account = EmailAccount(
+ email=email,
+ username=username,
+ password_hash=password_hash,
+ domain_id=domain_id,
+ quota_mb=quota_mb,
+ )
+ db.session.add(account)
+ db.session.commit()
+
+ return {'success': True, 'account': account.to_dict(), 'message': f'Account {email} created'}
+ except Exception as e:
+ db.session.rollback()
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def update_account(cls, account_id: int, quota_mb: int = None, is_active: bool = None) -> Dict:
+ """Update account settings."""
+ try:
+ account = EmailAccount.query.get(account_id)
+ if not account:
+ return {'success': False, 'error': 'Account not found'}
+
+ if quota_mb is not None:
+ account.quota_mb = quota_mb
+ DovecotService.set_quota(account.email, quota_mb)
+
+ if is_active is not None:
+ account.is_active = is_active
+
+ db.session.commit()
+ return {'success': True, 'account': account.to_dict(), 'message': 'Account updated'}
+ except Exception as e:
+ db.session.rollback()
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def delete_account(cls, account_id: int) -> Dict:
+ """Delete an email account."""
+ try:
+ account = EmailAccount.query.get(account_id)
+ if not account:
+ return {'success': False, 'error': 'Account not found'}
+
+ email = account.email
+ domain_name = account.domain.name if account.domain else ''
+ username = account.username
+
+ # Remove from Dovecot
+ DovecotService.delete_mailbox(email, domain_name, username, remove_files=True)
+
+ # Remove from Postfix
+ PostfixService.remove_mailbox(email)
+
+ db.session.delete(account)
+ db.session.commit()
+
+ return {'success': True, 'message': f'Account {email} deleted'}
+ except Exception as e:
+ db.session.rollback()
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def change_password(cls, account_id: int, new_password: str) -> Dict:
+ """Change an account password."""
+ try:
+ account = EmailAccount.query.get(account_id)
+ if not account:
+ return {'success': False, 'error': 'Account not found'}
+
+ result = DovecotService.change_password(account.email, new_password)
+ if not result.get('success'):
+ return result
+
+ account.password_hash = 'updated_in_dovecot'
+ db.session.commit()
+
+ return {'success': True, 'message': 'Password changed successfully'}
+ except Exception as e:
+ db.session.rollback()
+ return {'success': False, 'error': str(e)}
+
+ # ── Aliases ──
+
+ @classmethod
+ def get_aliases(cls, domain_id: int) -> List[Dict]:
+ """List email aliases for a domain."""
+ aliases = EmailAlias.query.filter_by(domain_id=domain_id).all()
+ return [a.to_dict() for a in aliases]
+
+ @classmethod
+ def add_alias(cls, domain_id: int, source: str, destination: str) -> Dict:
+ """Create an email alias."""
+ try:
+ domain = EmailDomain.query.get(domain_id)
+ if not domain:
+ return {'success': False, 'error': 'Domain not found'}
+
+ # Add @ domain if not already qualified
+ if '@' not in source:
+ source = f'{source}@{domain.name}'
+
+ alias = EmailAlias(
+ source=source,
+ destination=destination,
+ domain_id=domain_id,
+ )
+ db.session.add(alias)
+ db.session.commit()
+
+ # Add to Postfix virtual aliases
+ PostfixService.add_alias(source, destination)
+
+ return {'success': True, 'alias': alias.to_dict(), 'message': f'Alias {source} created'}
+ except Exception as e:
+ db.session.rollback()
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def remove_alias(cls, alias_id: int) -> Dict:
+ """Delete an email alias."""
+ try:
+ alias = EmailAlias.query.get(alias_id)
+ if not alias:
+ return {'success': False, 'error': 'Alias not found'}
+
+ PostfixService.remove_alias(alias.source)
+
+ db.session.delete(alias)
+ db.session.commit()
+
+ return {'success': True, 'message': 'Alias removed'}
+ except Exception as e:
+ db.session.rollback()
+ return {'success': False, 'error': str(e)}
+
+ # ── Forwarding Rules ──
+
+ @classmethod
+ def get_forwarding(cls, account_id: int) -> List[Dict]:
+ """List forwarding rules for an account."""
+ rules = EmailForwardingRule.query.filter_by(account_id=account_id).all()
+ return [r.to_dict() for r in rules]
+
+ @classmethod
+ def add_forwarding(cls, account_id: int, destination: str, keep_copy: bool = True) -> Dict:
+ """Create a forwarding rule."""
+ try:
+ account = EmailAccount.query.get(account_id)
+ if not account:
+ return {'success': False, 'error': 'Account not found'}
+
+ rule = EmailForwardingRule(
+ account_id=account_id,
+ destination=destination,
+ keep_copy=keep_copy,
+ )
+ db.session.add(rule)
+ db.session.commit()
+
+ # Update Postfix aliases for forwarding
+ cls._sync_forwarding_aliases(account)
+
+ return {'success': True, 'rule': rule.to_dict(), 'message': 'Forwarding rule created'}
+ except Exception as e:
+ db.session.rollback()
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def update_forwarding(cls, rule_id: int, destination: str = None,
+ keep_copy: bool = None, is_active: bool = None) -> Dict:
+ """Update a forwarding rule."""
+ try:
+ rule = EmailForwardingRule.query.get(rule_id)
+ if not rule:
+ return {'success': False, 'error': 'Forwarding rule not found'}
+
+ if destination is not None:
+ rule.destination = destination
+ if keep_copy is not None:
+ rule.keep_copy = keep_copy
+ if is_active is not None:
+ rule.is_active = is_active
+
+ db.session.commit()
+
+ # Re-sync Postfix aliases
+ cls._sync_forwarding_aliases(rule.account)
+
+ return {'success': True, 'rule': rule.to_dict(), 'message': 'Forwarding rule updated'}
+ except Exception as e:
+ db.session.rollback()
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def remove_forwarding(cls, rule_id: int) -> Dict:
+ """Delete a forwarding rule."""
+ try:
+ rule = EmailForwardingRule.query.get(rule_id)
+ if not rule:
+ return {'success': False, 'error': 'Forwarding rule not found'}
+
+ account = rule.account
+ db.session.delete(rule)
+ db.session.commit()
+
+ # Re-sync Postfix aliases
+ cls._sync_forwarding_aliases(account)
+
+ return {'success': True, 'message': 'Forwarding rule removed'}
+ except Exception as e:
+ db.session.rollback()
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def _sync_forwarding_aliases(cls, account: EmailAccount):
+ """Sync forwarding rules to Postfix virtual aliases for an account."""
+ active_rules = [r for r in account.forwarding_rules if r.is_active]
+
+ if not active_rules:
+ # Remove any forwarding alias
+ PostfixService.remove_alias(account.email)
+ return
+
+ # Build destination list
+ destinations = [r.destination for r in active_rules]
+ # If any rule wants to keep a copy, include the original mailbox
+ if any(r.keep_copy for r in active_rules):
+ destinations.append(account.email)
+
+ # Remove old alias and add new one
+ PostfixService.remove_alias(account.email)
+ PostfixService.add_alias(account.email, ', '.join(destinations))
diff --git a/backend/app/services/event_service.py b/backend/app/services/event_service.py
new file mode 100644
index 0000000..9acf7de
--- /dev/null
+++ b/backend/app/services/event_service.py
@@ -0,0 +1,268 @@
+"""Service for event emission and webhook delivery."""
+import hashlib
+import hmac
+import json
+import logging
+import time
+import threading
+import uuid
+from datetime import datetime, timedelta
+
+import requests as http_requests
+
+from app import db
+from app.models.event_subscription import EventSubscription, EventDelivery
+
+logger = logging.getLogger(__name__)
+
+# Mapping from audit log actions to event types
+AUDIT_TO_EVENT = {
+ 'app.create': 'app.created',
+ 'app.update': 'app.updated',
+ 'app.delete': 'app.deleted',
+ 'app.start': 'app.started',
+ 'app.stop': 'app.stopped',
+ 'app.restart': 'app.restarted',
+ 'app.deploy': 'app.deployed',
+ 'backup.create': 'backup.created',
+ 'backup.restore': 'backup.restored',
+ 'user.create': 'user.created',
+ 'user.login': 'user.login',
+ 'api_key.create': 'api_key.created',
+ 'api_key.revoke': 'api_key.revoked',
+}
+
+# Available events catalog
+EVENT_CATALOG = [
+ {'type': 'app.created', 'category': 'Applications', 'description': 'An application was created'},
+ {'type': 'app.updated', 'category': 'Applications', 'description': 'An application was updated'},
+ {'type': 'app.deleted', 'category': 'Applications', 'description': 'An application was deleted'},
+ {'type': 'app.started', 'category': 'Applications', 'description': 'An application was started'},
+ {'type': 'app.stopped', 'category': 'Applications', 'description': 'An application was stopped'},
+ {'type': 'app.restarted', 'category': 'Applications', 'description': 'An application was restarted'},
+ {'type': 'app.deployed', 'category': 'Applications', 'description': 'An application was deployed'},
+ {'type': 'container.started', 'category': 'Docker', 'description': 'A container was started'},
+ {'type': 'container.stopped', 'category': 'Docker', 'description': 'A container was stopped'},
+ {'type': 'backup.created', 'category': 'Backups', 'description': 'A backup was created'},
+ {'type': 'backup.restored', 'category': 'Backups', 'description': 'A backup was restored'},
+ {'type': 'user.created', 'category': 'Users', 'description': 'A user was created'},
+ {'type': 'user.login', 'category': 'Users', 'description': 'A user logged in'},
+ {'type': 'security.alert', 'category': 'Security', 'description': 'A security alert was triggered'},
+ {'type': 'ssl.expiring', 'category': 'SSL', 'description': 'An SSL certificate is expiring soon'},
+ {'type': 'domain.created', 'category': 'Domains', 'description': 'A domain was created'},
+ {'type': 'domain.deleted', 'category': 'Domains', 'description': 'A domain was deleted'},
+ {'type': 'api_key.created', 'category': 'API', 'description': 'An API key was created'},
+ {'type': 'api_key.revoked', 'category': 'API', 'description': 'An API key was revoked'},
+]
+
+
+class EventService:
+ """Service for emitting events and delivering webhooks."""
+
+ @staticmethod
+ def get_available_events():
+ """Return the event catalog."""
+ return EVENT_CATALOG
+
+ @staticmethod
+ def emit(event_type, payload, user_id=None):
+ """Emit an event to all matching subscriptions."""
+ subscriptions = EventSubscription.query.filter_by(is_active=True).all()
+ matching = [s for s in subscriptions if s.matches_event(event_type)]
+
+ if not matching:
+ return
+
+ for sub in matching:
+ delivery = EventDelivery(
+ subscription_id=sub.id,
+ event_type=event_type,
+ status=EventDelivery.STATUS_PENDING,
+ )
+ delivery.set_payload(payload)
+ db.session.add(delivery)
+
+ db.session.commit()
+
+ # Dispatch deliveries in background
+ for sub in matching:
+ pending = EventDelivery.query.filter_by(
+ subscription_id=sub.id,
+ event_type=event_type,
+ status=EventDelivery.STATUS_PENDING,
+ ).order_by(EventDelivery.created_at.desc()).first()
+
+ if pending:
+ thread = threading.Thread(
+ target=EventService._deliver_in_thread,
+ args=(pending.id,),
+ daemon=True,
+ )
+ thread.start()
+
+ @staticmethod
+ def emit_for_audit(action, target_type, target_id, details, user_id):
+ """Emit an event based on an audit log action."""
+ event_type = AUDIT_TO_EVENT.get(action)
+ if not event_type:
+ return
+
+ payload = {
+ 'event': event_type,
+ 'timestamp': datetime.utcnow().isoformat(),
+ 'target_type': target_type,
+ 'target_id': target_id,
+ 'user_id': user_id,
+ 'details': details or {},
+ }
+
+ try:
+ EventService.emit(event_type, payload, user_id)
+ except Exception as e:
+ logger.error(f'Failed to emit event {event_type}: {e}')
+
+ @staticmethod
+ def _deliver_in_thread(delivery_id):
+ """Deliver a webhook in a background thread."""
+ from flask import current_app
+ try:
+ app = current_app._get_current_object()
+ except RuntimeError:
+ # No app context - need to import create_app
+ return
+
+ with app.app_context():
+ EventService.deliver(delivery_id)
+
+ @staticmethod
+ def deliver(delivery_id):
+ """Deliver a webhook to the subscription URL."""
+ delivery = EventDelivery.query.get(delivery_id)
+ if not delivery:
+ return
+
+ subscription = delivery.subscription
+ if not subscription or not subscription.is_active:
+ delivery.status = EventDelivery.STATUS_FAILED
+ db.session.commit()
+ return
+
+ payload = delivery.get_payload()
+ payload_json = json.dumps(payload)
+ delivery_uuid = str(uuid.uuid4())
+
+ headers = {
+ 'Content-Type': 'application/json',
+ 'X-ServerKit-Event': delivery.event_type,
+ 'X-ServerKit-Delivery': delivery_uuid,
+ 'User-Agent': 'ServerKit-Webhooks/1.0',
+ }
+
+ # HMAC signature if secret is set
+ if subscription.secret:
+ signature = hmac.new(
+ subscription.secret.encode(),
+ payload_json.encode(),
+ hashlib.sha256,
+ ).hexdigest()
+ headers['X-ServerKit-Signature'] = f'sha256={signature}'
+
+ # Add custom headers
+ custom_headers = subscription.get_headers()
+ if custom_headers:
+ headers.update(custom_headers)
+
+ delivery.attempts = (delivery.attempts or 0) + 1
+ start_time = time.time()
+
+ try:
+ resp = http_requests.post( # nosec B113
+ subscription.url,
+ data=payload_json,
+ headers=headers,
+ timeout=subscription.timeout_seconds or 10,
+ )
+ elapsed_ms = (time.time() - start_time) * 1000
+
+ delivery.http_status = resp.status_code
+ delivery.response_body = resp.text[:1000] if resp.text else None
+ delivery.duration_ms = round(elapsed_ms, 2)
+
+ if 200 <= resp.status_code < 300:
+ delivery.status = EventDelivery.STATUS_SUCCESS
+ delivery.delivered_at = datetime.utcnow()
+ else:
+ _schedule_retry(delivery, subscription)
+
+ except Exception as e:
+ elapsed_ms = (time.time() - start_time) * 1000
+ delivery.duration_ms = round(elapsed_ms, 2)
+ delivery.response_body = str(e)[:1000]
+ _schedule_retry(delivery, subscription)
+
+ db.session.commit()
+
+ @staticmethod
+ def retry_failed():
+ """Retry failed deliveries that are due."""
+ now = datetime.utcnow()
+ pending = EventDelivery.query.filter(
+ EventDelivery.status == EventDelivery.STATUS_PENDING,
+ EventDelivery.next_retry_at <= now,
+ EventDelivery.attempts > 0,
+ ).all()
+
+ for delivery in pending:
+ EventService.deliver(delivery.id)
+
+ @staticmethod
+ def send_test(subscription_id):
+ """Send a test event to a subscription."""
+ sub = EventSubscription.query.get(subscription_id)
+ if not sub:
+ return None
+
+ delivery = EventDelivery(
+ subscription_id=sub.id,
+ event_type='test.ping',
+ status=EventDelivery.STATUS_PENDING,
+ )
+ delivery.set_payload({
+ 'event': 'test.ping',
+ 'timestamp': datetime.utcnow().isoformat(),
+ 'message': 'This is a test webhook from ServerKit',
+ })
+ db.session.add(delivery)
+ db.session.commit()
+
+ EventService.deliver(delivery.id)
+ return delivery
+
+ @staticmethod
+ def get_deliveries(subscription_id, page=1, per_page=50):
+ """Get delivery history for a subscription."""
+ return EventDelivery.query.filter_by(
+ subscription_id=subscription_id
+ ).order_by(
+ EventDelivery.created_at.desc()
+ ).paginate(page=page, per_page=per_page, error_out=False)
+
+ @staticmethod
+ def cleanup_old_deliveries(days=30):
+ """Purge old delivery records."""
+ cutoff = datetime.utcnow() - timedelta(days=days)
+ deleted = EventDelivery.query.filter(EventDelivery.created_at < cutoff).delete()
+ db.session.commit()
+ return deleted
+
+
+def _schedule_retry(delivery, subscription):
+ """Schedule a retry with exponential backoff."""
+ max_retries = subscription.retry_count or 3
+ if delivery.attempts >= max_retries:
+ delivery.status = EventDelivery.STATUS_FAILED
+ else:
+ # Exponential backoff: 10s, 30s, 90s
+ delay_seconds = 10 * (3 ** (delivery.attempts - 1))
+ delivery.next_retry_at = datetime.utcnow() + timedelta(seconds=delay_seconds)
+ delivery.status = EventDelivery.STATUS_PENDING
diff --git a/backend/app/services/file_service.py b/backend/app/services/file_service.py
index 051a2e0..ab02a7f 100644
--- a/backend/app/services/file_service.py
+++ b/backend/app/services/file_service.py
@@ -483,14 +483,37 @@ def _format_size(size: int) -> str:
size /= 1024
return f"{size:.1f} PB"
+ # Virtual/pseudo filesystem types to hide from disk usage
+ _VIRTUAL_FSTYPES = {
+ 'squashfs', 'tmpfs', 'devtmpfs', 'devfs', 'overlay', 'aufs',
+ 'proc', 'sysfs', 'cgroup', 'cgroup2', 'debugfs', 'tracefs',
+ 'securityfs', 'pstore', 'efivarfs', 'bpf', 'fusectl',
+ 'configfs', 'hugetlbfs', 'mqueue', 'ramfs', 'nsfs',
+ }
+
+ # Mount-point prefixes that are always noise
+ _SKIP_MOUNT_PREFIXES = ('/snap/', '/var/lib/docker/', '/run/')
+
@classmethod
def get_all_disk_mounts(cls) -> Dict:
- """Get disk usage for all mount points."""
+ """Get disk usage for all physical mount points, deduplicated by device."""
try:
partitions = psutil.disk_partitions(all=False)
mounts = []
+ seen_devices = set()
for partition in partitions:
+ # Skip virtual/pseudo filesystems
+ if partition.fstype in cls._VIRTUAL_FSTYPES:
+ continue
+ # Skip noisy mount prefixes (snaps, docker layers, etc.)
+ if any(partition.mountpoint.startswith(p) for p in cls._SKIP_MOUNT_PREFIXES):
+ continue
+ # Deduplicate: keep only the shortest mount path per device
+ if partition.device in seen_devices:
+ continue
+ seen_devices.add(partition.device)
+
try:
usage = psutil.disk_usage(partition.mountpoint)
mounts.append({
diff --git a/backend/app/services/ftp_service.py b/backend/app/services/ftp_service.py
index ac29575..90df625 100644
--- a/backend/app/services/ftp_service.py
+++ b/backend/app/services/ftp_service.py
@@ -4,7 +4,7 @@
import subprocess
import re
-from app.utils.system import PackageManager, ServiceControl, run_privileged
+from app.utils.system import PackageManager, ServiceControl, run_privileged, privileged_cmd
try:
import pwd
except ImportError:
@@ -285,6 +285,9 @@ def _update_proftpd_config(cls, settings: Dict) -> Dict:
@classmethod
def list_users(cls) -> Dict:
"""List FTP users."""
+ if pwd is None:
+ return {'success': False, 'error': 'User management requires Linux'}
+
try:
users = []
@@ -340,6 +343,9 @@ def list_users(cls) -> Dict:
@classmethod
def create_user(cls, username: str, password: str = None, home_dir: str = None) -> Dict:
"""Create a new FTP user."""
+ if pwd is None:
+ return {'success': False, 'error': 'User management requires Linux'}
+
# Validate username
if not re.match(r'^[a-z][a-z0-9_-]{2,31}$', username):
return {'success': False, 'error': 'Invalid username. Use lowercase letters, numbers, underscore, hyphen. 3-32 chars.'}
@@ -361,26 +367,29 @@ def create_user(cls, username: str, password: str = None, home_dir: str = None)
try:
# Create user with restricted shell
- result = subprocess.run([
- 'sudo', 'useradd',
+ result = run_privileged([
+ 'useradd',
'-m', # Create home directory
'-d', home_dir,
'-s', '/usr/sbin/nologin', # No shell access
'-c', f'FTP User {username}',
username
- ], capture_output=True, text=True)
+ ])
if result.returncode != 0:
return {'success': False, 'error': result.stderr or 'Failed to create user'}
# Set password
proc = subprocess.Popen(
- ['sudo', 'chpasswd'],
+ privileged_cmd(['chpasswd']),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
- proc.communicate(input=f'{username}:{password}'.encode())
+ stdout, stderr = proc.communicate(input=f'{username}:{password}'.encode())
+
+ if proc.returncode != 0:
+ return {'success': False, 'error': stderr.decode() or 'Failed to set password'}
# Add to vsftpd userlist if it exists
if os.path.exists(cls.VSFTPD_USER_LIST):
@@ -388,7 +397,7 @@ def create_user(cls, username: str, password: str = None, home_dir: str = None)
f.write(f'{username}\n')
# Set proper permissions on home directory
- subprocess.run(['sudo', 'chmod', '755', home_dir], capture_output=True)
+ run_privileged(['chmod', '755', home_dir])
return {
'success': True,
@@ -404,6 +413,9 @@ def create_user(cls, username: str, password: str = None, home_dir: str = None)
@classmethod
def delete_user(cls, username: str, delete_home: bool = False) -> Dict:
"""Delete an FTP user."""
+ if pwd is None:
+ return {'success': False, 'error': 'User management requires Linux'}
+
try:
# Check if user exists
try:
@@ -412,12 +424,12 @@ def delete_user(cls, username: str, delete_home: bool = False) -> Dict:
return {'success': False, 'error': 'User not found'}
# Delete user
- cmd = ['sudo', 'userdel']
+ cmd = ['userdel']
if delete_home:
cmd.append('-r')
cmd.append(username)
- result = subprocess.run(cmd, capture_output=True, text=True)
+ result = run_privileged(cmd)
if result.returncode != 0:
return {'success': False, 'error': result.stderr or 'Failed to delete user'}
@@ -437,6 +449,9 @@ def delete_user(cls, username: str, delete_home: bool = False) -> Dict:
@classmethod
def change_password(cls, username: str, new_password: str = None) -> Dict:
"""Change FTP user password."""
+ if pwd is None:
+ return {'success': False, 'error': 'User management requires Linux'}
+
try:
# Check if user exists
try:
@@ -450,7 +465,7 @@ def change_password(cls, username: str, new_password: str = None) -> Dict:
# Set password
proc = subprocess.Popen(
- ['sudo', 'chpasswd'],
+ privileged_cmd(['chpasswd']),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
@@ -537,6 +552,9 @@ def get_logs(cls, lines: int = 100) -> Dict:
@classmethod
def toggle_user(cls, username: str, enabled: bool) -> Dict:
"""Enable or disable an FTP user."""
+ if pwd is None:
+ return {'success': False, 'error': 'User management requires Linux'}
+
try:
# Check if user exists
try:
@@ -551,9 +569,8 @@ def toggle_user(cls, username: str, enabled: bool) -> Dict:
# Change shell to /bin/false to disable
shell = '/bin/false'
- result = subprocess.run(
- ['sudo', 'usermod', '-s', shell, username],
- capture_output=True, text=True
+ result = run_privileged(
+ ['usermod', '-s', shell, username]
)
if result.returncode != 0:
@@ -573,9 +590,8 @@ def toggle_user(cls, username: str, enabled: bool) -> Dict:
def disconnect_session(cls, pid: int) -> Dict:
"""Disconnect an active FTP session by PID."""
try:
- result = subprocess.run(
- ['sudo', 'kill', str(pid)],
- capture_output=True, text=True
+ result = run_privileged(
+ ['kill', str(pid)]
)
if result.returncode != 0:
diff --git a/backend/app/services/invitation_service.py b/backend/app/services/invitation_service.py
new file mode 100644
index 0000000..e6d5aca
--- /dev/null
+++ b/backend/app/services/invitation_service.py
@@ -0,0 +1,180 @@
+"""Service for managing team invitations."""
+import smtplib
+from datetime import datetime, timedelta
+from email.mime.text import MIMEText
+from email.mime.multipart import MIMEMultipart
+from app import db
+from app.models import User, Invitation
+
+
+class InvitationService:
+ """Stateless service for invitation operations."""
+
+ @staticmethod
+ def create_invitation(email=None, role='developer', permissions=None,
+ invited_by=None, expires_in_days=7):
+ """Create a new invitation."""
+ if role not in User.VALID_ROLES:
+ return {'success': False, 'error': f'Invalid role: {role}'}
+
+ # Check for duplicate pending invite with same email
+ if email:
+ existing = Invitation.query.filter_by(
+ email=email, status=Invitation.STATUS_PENDING
+ ).first()
+ if existing and not existing.is_expired:
+ return {'success': False, 'error': 'A pending invitation already exists for this email'}
+
+ expires_at = None
+ if expires_in_days and expires_in_days > 0:
+ expires_at = datetime.utcnow() + timedelta(days=expires_in_days)
+
+ invitation = Invitation(
+ email=email,
+ role=role,
+ invited_by=invited_by,
+ expires_at=expires_at,
+ )
+ if permissions:
+ invitation.set_permissions(permissions)
+
+ db.session.add(invitation)
+ db.session.commit()
+
+ return {'success': True, 'invitation': invitation}
+
+ @staticmethod
+ def validate_token(token):
+ """Validate an invite token. Returns Invitation if valid, else None."""
+ invitation = Invitation.query.filter_by(token=token).first()
+ if not invitation:
+ return None
+
+ # Auto-expire if past date
+ if invitation.status == Invitation.STATUS_PENDING and invitation.is_expired:
+ invitation.status = Invitation.STATUS_EXPIRED
+ db.session.commit()
+ return None
+
+ if invitation.status != Invitation.STATUS_PENDING:
+ return None
+
+ return invitation
+
+ @staticmethod
+ def accept_invitation(token, user_id):
+ """Mark an invitation as accepted."""
+ invitation = InvitationService.validate_token(token)
+ if not invitation:
+ return {'success': False, 'error': 'Invalid or expired invitation'}
+
+ invitation.status = Invitation.STATUS_ACCEPTED
+ invitation.accepted_at = datetime.utcnow()
+ invitation.accepted_by = user_id
+ db.session.commit()
+
+ return {'success': True, 'invitation': invitation}
+
+ @staticmethod
+ def revoke_invitation(invitation_id):
+ """Revoke a pending invitation."""
+ invitation = Invitation.query.get(invitation_id)
+ if not invitation:
+ return {'success': False, 'error': 'Invitation not found'}
+
+ if invitation.status != Invitation.STATUS_PENDING:
+ return {'success': False, 'error': 'Only pending invitations can be revoked'}
+
+ invitation.status = Invitation.STATUS_REVOKED
+ db.session.commit()
+
+ return {'success': True}
+
+ @staticmethod
+ def list_invitations(status=None):
+ """List invitations, optionally filtered by status."""
+ query = Invitation.query.order_by(Invitation.created_at.desc())
+ if status:
+ query = query.filter_by(status=status)
+ return query.all()
+
+ @staticmethod
+ def send_invitation_email(invitation, base_url):
+ """Send invitation email via SMTP. Returns {success, error}."""
+ if not invitation.email:
+ return {'success': False, 'error': 'No email address on invitation'}
+
+ try:
+ from app.services.notification_service import NotificationService
+ config = NotificationService.get_config()
+ email_config = config.get('email', {})
+ except Exception:
+ return {'success': False, 'error': 'Could not load email configuration'}
+
+ if not email_config.get('smtp_host') or not email_config.get('from_email'):
+ return {'success': False, 'error': 'SMTP not configured'}
+
+ invite_url = f"{base_url}/register?invite={invitation.token}"
+
+ msg = MIMEMultipart('alternative')
+ msg['Subject'] = 'You have been invited to ServerKit'
+ msg['From'] = email_config['from_email']
+ msg['To'] = invitation.email
+
+ text = (
+ f"You have been invited to join ServerKit as a {invitation.role}.\n\n"
+ f"Click the link below to create your account:\n{invite_url}\n\n"
+ )
+ if invitation.expires_at:
+ text += f"This invitation expires on {invitation.expires_at.strftime('%Y-%m-%d %H:%M UTC')}.\n"
+
+ html = (
+ f"You're invited to ServerKit
"
+ f"You have been invited to join as a {invitation.role}.
"
+ f""
+ f"Accept Invitation
"
+ )
+ if invitation.expires_at:
+ html += f"Expires {invitation.expires_at.strftime('%Y-%m-%d %H:%M UTC')}
"
+
+ msg.attach(MIMEText(text, 'plain'))
+ msg.attach(MIMEText(html, 'html'))
+
+ try:
+ smtp_port = int(email_config.get('smtp_port', 587))
+ use_tls = email_config.get('smtp_tls', True)
+
+ if use_tls:
+ server = smtplib.SMTP(email_config['smtp_host'], smtp_port)
+ server.starttls()
+ else:
+ server = smtplib.SMTP_SSL(email_config['smtp_host'], smtp_port)
+
+ if email_config.get('smtp_user') and email_config.get('smtp_password'):
+ server.login(email_config['smtp_user'], email_config['smtp_password'])
+
+ server.sendmail(email_config['from_email'], [invitation.email], msg.as_string())
+ server.quit()
+ return {'success': True}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @staticmethod
+ def cleanup_expired():
+ """Batch-mark expired invitations."""
+ now = datetime.utcnow()
+ expired = Invitation.query.filter(
+ Invitation.status == Invitation.STATUS_PENDING,
+ Invitation.expires_at.isnot(None),
+ Invitation.expires_at < now
+ ).all()
+
+ count = 0
+ for inv in expired:
+ inv.status = Invitation.STATUS_EXPIRED
+ count += 1
+
+ if count:
+ db.session.commit()
+ return count
diff --git a/backend/app/services/log_service.py b/backend/app/services/log_service.py
index 441b4a9..43553fb 100644
--- a/backend/app/services/log_service.py
+++ b/backend/app/services/log_service.py
@@ -7,6 +7,7 @@
import queue
from app import paths
+from app.utils.system import run_privileged, privileged_cmd, is_command_available, sourced_result
class LogService:
@@ -73,74 +74,116 @@ def get_log_files(cls) -> List[Dict]:
@classmethod
def read_log(cls, filepath: str, lines: int = 100, from_end: bool = True) -> Dict:
- """Read lines from a log file."""
+ """Read lines from a log file. Falls back to Python I/O when tail/head are unavailable."""
if not cls.is_path_allowed(filepath):
return {'success': False, 'error': 'Access denied: path not in allowed directories'}
if not os.path.exists(filepath):
return {'success': False, 'error': 'Log file not found'}
- try:
- if from_end:
- # Use tail to get last N lines
- result = subprocess.run(
- ['sudo', 'tail', '-n', str(lines), filepath],
- capture_output=True,
- text=True,
- timeout=30
- )
- else:
- # Use head to get first N lines
- result = subprocess.run(
- ['sudo', 'head', '-n', str(lines), filepath],
- capture_output=True,
- text=True,
+ tool = 'tail' if from_end else 'head'
+
+ if is_command_available(tool):
+ try:
+ result = run_privileged(
+ [tool, '-n', str(lines), filepath],
timeout=30
)
- if result.returncode == 0:
- log_lines = result.stdout.split('\n')
- return {
- 'success': True,
- 'lines': log_lines,
- 'count': len(log_lines),
- 'filepath': filepath
- }
+ if result.returncode == 0:
+ log_lines = result.stdout.split('\n')
+ return {**sourced_result(log_lines, tool, tool), 'filepath': filepath}
+ else:
+ return {'success': False, 'error': result.stderr}
+
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ # Fallback: Python file I/O
+ try:
+ with open(filepath, 'r', errors='replace') as f:
+ all_lines = f.readlines()
+
+ if from_end:
+ log_lines = [l.rstrip('\n') for l in all_lines[-lines:]]
else:
- return {'success': False, 'error': result.stderr}
+ log_lines = [l.rstrip('\n') for l in all_lines[:lines]]
+ return {**sourced_result(log_lines, 'python', 'direct file read'), 'filepath': filepath}
+
+ except PermissionError:
+ return {'success': False, 'error': f'Permission denied reading {filepath}'}
except Exception as e:
return {'success': False, 'error': str(e)}
@classmethod
def search_log(cls, filepath: str, pattern: str, lines: int = 100) -> Dict:
- """Search log file for pattern."""
+ """Search log file for pattern. Falls back to Python regex when grep is unavailable."""
if not cls.is_path_allowed(filepath):
return {'success': False, 'error': 'Access denied: path not in allowed directories'}
if not os.path.exists(filepath):
return {'success': False, 'error': 'Log file not found'}
+ if is_command_available('grep'):
+ try:
+ result = run_privileged(
+ ['grep', '-i', '-m', str(lines), pattern, filepath],
+ timeout=60
+ )
+
+ # grep returns 1 if no matches (not an error)
+ if result.returncode in [0, 1]:
+ matches = result.stdout.split('\n') if result.stdout else []
+ return {
+ 'success': True,
+ 'matches': [m for m in matches if m],
+ 'count': len([m for m in matches if m]),
+ 'pattern': pattern
+ }
+ else:
+ return {'success': False, 'error': result.stderr}
+
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ # Fallback: Python regex search
+ import re
try:
- result = subprocess.run(
- ['sudo', 'grep', '-i', '-m', str(lines), pattern, filepath],
- capture_output=True,
- text=True,
- timeout=60
- )
+ regex = re.compile(pattern, re.IGNORECASE)
+ matches = []
+ with open(filepath, 'r', errors='replace') as f:
+ for line in f:
+ if regex.search(line):
+ matches.append(line.rstrip('\n'))
+ if len(matches) >= lines:
+ break
- # grep returns 1 if no matches (not an error)
- if result.returncode in [0, 1]:
- matches = result.stdout.split('\n') if result.stdout else []
- return {
- 'success': True,
- 'matches': [m for m in matches if m],
- 'count': len([m for m in matches if m]),
- 'pattern': pattern
- }
- else:
- return {'success': False, 'error': result.stderr}
+ return {
+ 'success': True,
+ 'matches': matches,
+ 'count': len(matches),
+ 'pattern': pattern
+ }
+ except re.error:
+ # Pattern might be a plain string, not valid regex — use substring match
+ matches = []
+ with open(filepath, 'r', errors='replace') as f:
+ for line in f:
+ if pattern.lower() in line.lower():
+ matches.append(line.rstrip('\n'))
+ if len(matches) >= lines:
+ break
+
+ return {
+ 'success': True,
+ 'matches': matches,
+ 'count': len(matches),
+ 'pattern': pattern
+ }
+ except PermissionError:
+ return {'success': False, 'error': f'Permission denied reading {filepath}'}
except Exception as e:
return {'success': False, 'error': str(e)}
@@ -184,13 +227,7 @@ def get_docker_app_logs(cls, app_name: str, app_dir: str, lines: int = 100) -> D
if result.returncode == 0:
log_lines = result.stdout.split('\n') if result.stdout else []
- return {
- 'success': True,
- 'lines': log_lines,
- 'count': len(log_lines),
- 'source': 'docker',
- 'app_dir': app_dir
- }
+ return {**sourced_result(log_lines, 'docker', 'Docker Compose'), 'app_dir': app_dir}
else:
# Try with docker-compose (older syntax) as fallback
result = subprocess.run(
@@ -202,13 +239,7 @@ def get_docker_app_logs(cls, app_name: str, app_dir: str, lines: int = 100) -> D
)
if result.returncode == 0:
log_lines = result.stdout.split('\n') if result.stdout else []
- return {
- 'success': True,
- 'lines': log_lines,
- 'count': len(log_lines),
- 'source': 'docker',
- 'app_dir': app_dir
- }
+ return {**sourced_result(log_lines, 'docker', 'Docker Compose (legacy)'), 'app_dir': app_dir}
return {'success': False, 'error': result.stderr or 'Failed to get Docker logs'}
except FileNotFoundError:
@@ -221,9 +252,24 @@ def get_docker_app_logs(cls, app_name: str, app_dir: str, lines: int = 100) -> D
@classmethod
def get_journalctl_logs(cls, unit: str = None, lines: int = 100,
since: str = None, priority: str = None) -> Dict:
- """Get logs from systemd journal."""
+ """Get system logs, trying journalctl → syslog → Windows Event Log."""
+ if is_command_available('journalctl'):
+ return cls._read_journalctl(unit, lines, since, priority)
+
+ syslog_path = cls._find_syslog()
+ if syslog_path:
+ return cls._read_syslog(syslog_path, unit, lines)
+
+ if os.name == 'nt':
+ return cls._read_windows_eventlog(lines)
+
+ return {'success': False, 'error': 'No system log source available — journalctl, syslog, and Windows Event Log are all unavailable'}
+
+ @classmethod
+ def _read_journalctl(cls, unit: str, lines: int, since: str, priority: str) -> Dict:
+ """Read logs from systemd journal."""
try:
- cmd = ['sudo', 'journalctl', '-n', str(lines), '--no-pager', '-o', 'short-iso']
+ cmd = ['journalctl', '-n', str(lines), '--no-pager', '-o', 'short-iso']
if unit:
cmd.extend(['-u', unit])
@@ -232,18 +278,72 @@ def get_journalctl_logs(cls, unit: str = None, lines: int = 100,
if priority:
cmd.extend(['-p', priority])
- result = subprocess.run(cmd, capture_output=True, text=True, timeout=60)
+ result = run_privileged(cmd, timeout=60)
if result.returncode == 0:
log_lines = result.stdout.split('\n')
- return {
- 'success': True,
- 'lines': log_lines,
- 'count': len(log_lines)
- }
+ return sourced_result(log_lines, 'journalctl', 'systemd journal')
+ else:
+ return {'success': False, 'error': result.stderr}
+
+ except FileNotFoundError:
+ return {'success': False, 'error': 'journalctl command not found'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @staticmethod
+ def _find_syslog() -> Optional[str]:
+ """Return the first existing syslog path, or None."""
+ for path in ['/var/log/syslog', '/var/log/messages']:
+ if os.path.exists(path):
+ return path
+ return None
+
+ @classmethod
+ def _read_syslog(cls, filepath: str, service: str, lines: int) -> Dict:
+ """Read system logs from a syslog file, optionally filtering by service."""
+ try:
+ if service:
+ result = run_privileged(
+ ['bash', '-c', f'grep -i {subprocess.list2cmdline([service])} {subprocess.list2cmdline([filepath])} | tail -n {int(lines)}'],
+ timeout=60,
+ )
+ else:
+ result = run_privileged(
+ ['tail', '-n', str(lines), filepath],
+ timeout=60,
+ )
+
+ if result.returncode == 0 or (service and result.returncode == 1):
+ log_lines = result.stdout.split('\n') if result.stdout else []
+ return sourced_result(log_lines, 'syslog', filepath)
+ else:
+ return {'success': False, 'error': result.stderr}
+
+ except FileNotFoundError:
+ return {'success': False, 'error': 'Required commands not found'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @staticmethod
+ def _read_windows_eventlog(lines: int) -> Dict:
+ """Read system logs from Windows Event Log via wevtutil."""
+ try:
+ result = subprocess.run(
+ ['wevtutil', 'qe', 'System', f'/c:{int(lines)}', '/f:text', '/rd:true'],
+ capture_output=True,
+ text=True,
+ timeout=60,
+ )
+
+ if result.returncode == 0:
+ log_lines = result.stdout.split('\n') if result.stdout else []
+ return sourced_result(log_lines, 'eventlog', 'Windows Event Log')
else:
return {'success': False, 'error': result.stderr}
+ except FileNotFoundError:
+ return {'success': False, 'error': 'wevtutil command not found'}
except Exception as e:
return {'success': False, 'error': str(e)}
@@ -257,10 +357,8 @@ def clear_log(cls, filepath: str) -> Dict:
return {'success': False, 'error': 'Log file not found'}
try:
- result = subprocess.run(
- ['sudo', 'truncate', '-s', '0', filepath],
- capture_output=True,
- text=True
+ result = run_privileged(
+ ['truncate', '-s', '0', filepath]
)
if result.returncode == 0:
@@ -268,6 +366,8 @@ def clear_log(cls, filepath: str) -> Dict:
else:
return {'success': False, 'error': result.stderr}
+ except FileNotFoundError:
+ return {'success': False, 'error': 'truncate command not found'}
except Exception as e:
return {'success': False, 'error': str(e)}
@@ -275,10 +375,8 @@ def clear_log(cls, filepath: str) -> Dict:
def rotate_logs(cls) -> Dict:
"""Trigger log rotation."""
try:
- result = subprocess.run(
- ['sudo', 'logrotate', '-f', '/etc/logrotate.conf'],
- capture_output=True,
- text=True,
+ result = run_privileged(
+ ['logrotate', '-f', '/etc/logrotate.conf'],
timeout=120
)
@@ -286,6 +384,8 @@ def rotate_logs(cls) -> Dict:
'success': result.returncode == 0,
'message': 'Logs rotated' if result.returncode == 0 else result.stderr
}
+ except FileNotFoundError:
+ return {'success': False, 'error': 'logrotate command not found'}
except Exception as e:
return {'success': False, 'error': str(e)}
@@ -302,7 +402,7 @@ def tail_log(cls, filepath: str, callback, stop_event: threading.Event = None):
try:
process = subprocess.Popen(
- ['sudo', 'tail', '-f', '-n', '0', filepath],
+ privileged_cmd(['tail', '-f', '-n', '0', filepath]),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
diff --git a/backend/app/services/migration_service.py b/backend/app/services/migration_service.py
new file mode 100644
index 0000000..a8f438a
--- /dev/null
+++ b/backend/app/services/migration_service.py
@@ -0,0 +1,254 @@
+"""Database migration service using Flask-Migrate (Alembic)."""
+
+import os
+import logging
+import shutil
+from datetime import datetime
+
+from alembic import command
+from alembic.config import Config as AlembicConfig
+from alembic.script import ScriptDirectory
+from alembic.migration import MigrationContext
+from sqlalchemy import inspect as sa_inspect, text
+
+logger = logging.getLogger(__name__)
+
+
+class MigrationService:
+ """Handles database migration detection, backup, and execution."""
+
+ _needs_migration = False
+ _current_revision = None
+ _head_revision = None
+ _pending_migrations = []
+
+ @classmethod
+ def _get_alembic_config(cls, app):
+ """Build an Alembic config pointing at the migrations directory."""
+ migrations_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'migrations')
+ cfg = AlembicConfig()
+ cfg.set_main_option('script_location', migrations_dir)
+ cfg.set_main_option('sqlalchemy.url', app.config['SQLALCHEMY_DATABASE_URI'])
+ return cfg
+
+ @classmethod
+ def _fix_missing_columns(cls, db):
+ """Add columns that may be missing from existing tables.
+
+ Runs raw SQL before any ORM queries to prevent crashes when models
+ reference columns that don't exist in the database yet.
+ """
+ inspector = sa_inspect(db.engine)
+ existing_tables = inspector.get_table_names()
+
+ if 'users' in existing_tables:
+ cols = {c['name'] for c in inspector.get_columns('users')}
+ if 'auth_provider' not in cols:
+ logger.info('Adding missing column: users.auth_provider')
+ with db.engine.begin() as conn:
+ conn.execute(text(
+ "ALTER TABLE users ADD COLUMN auth_provider VARCHAR(50) DEFAULT 'local'"
+ ))
+
+ @classmethod
+ def check_and_prepare(cls, app):
+ """Called on startup to detect migration state.
+
+ Three scenarios:
+ 1. Fresh install (no DB / no tables) -> upgrade head to create everything
+ 2. Existing install (tables exist, no alembic_version) -> stamp baseline, then check
+ 3. Normal state (alembic_version exists) -> compare current vs head
+ """
+ from app import db
+
+ try:
+ # Fix any missing columns before ORM queries can fail
+ cls._fix_missing_columns(db)
+
+ cfg = cls._get_alembic_config(app)
+ script = ScriptDirectory.from_config(cfg)
+ head = script.get_current_head()
+ cls._head_revision = head
+
+ inspector = sa_inspect(db.engine)
+ existing_tables = inspector.get_table_names()
+ has_alembic = 'alembic_version' in existing_tables
+
+ # Count real application tables (exclude alembic_version)
+ app_tables = [t for t in existing_tables if t != 'alembic_version']
+
+ if not app_tables and not has_alembic:
+ # Scenario 1: Fresh install — create everything via Alembic
+ logger.info('Fresh install detected — running alembic upgrade head')
+ with app.app_context():
+ command.upgrade(cfg, 'head')
+ cls._needs_migration = False
+ cls._current_revision = head
+ cls._pending_migrations = []
+ return
+
+ if app_tables and not has_alembic:
+ # Scenario 2: Existing install upgrading to Alembic
+ # Run upgrade so migrations can add missing columns to existing tables
+ logger.info('Existing install detected — running alembic upgrade head')
+ with app.app_context():
+ command.upgrade(cfg, 'head')
+
+ # Scenario 3 (or after stamping): Check current vs head
+ with db.engine.connect() as conn:
+ context = MigrationContext.configure(conn)
+ current_heads = context.get_current_heads()
+ cls._current_revision = current_heads[0] if current_heads else None
+
+ if cls._current_revision != head:
+ # Calculate pending migrations
+ cls._pending_migrations = []
+ for rev in script.walk_revisions():
+ if cls._current_revision and rev.revision == cls._current_revision:
+ break
+ cls._pending_migrations.append({
+ 'revision': rev.revision,
+ 'description': rev.doc or '',
+ 'down_revision': rev.down_revision,
+ })
+ cls._pending_migrations.reverse()
+ if cls._pending_migrations:
+ logger.info(
+ f'Database migration needed: {len(cls._pending_migrations)} pending '
+ f'(current={cls._current_revision}, head={head}) — running upgrade'
+ )
+ with app.app_context():
+ command.upgrade(cfg, 'head')
+ cls._current_revision = head
+ cls._needs_migration = False
+ else:
+ cls._needs_migration = False
+ cls._pending_migrations = []
+
+ except Exception as e:
+ logger.error(f'Migration check failed: {e}')
+ # Don't block startup on migration check failure — fall back to no-migration state
+ cls._needs_migration = False
+
+ @classmethod
+ def get_status(cls):
+ """Return current migration status."""
+ return {
+ 'needs_migration': cls._needs_migration,
+ 'current_revision': cls._current_revision,
+ 'head_revision': cls._head_revision,
+ 'pending_count': len(cls._pending_migrations),
+ 'pending_migrations': cls._pending_migrations,
+ }
+
+ @classmethod
+ def create_backup(cls, app):
+ """Create a database backup before migration.
+
+ SQLite: file copy. PostgreSQL: pg_dump.
+ """
+ db_url = app.config['SQLALCHEMY_DATABASE_URI']
+ timestamp = datetime.utcnow().strftime('%Y%m%d_%H%M%S')
+
+ try:
+ if db_url.startswith('sqlite'):
+ # Extract file path from sqlite:///path or sqlite:////path
+ db_path = db_url.replace('sqlite:///', '')
+ if not os.path.isabs(db_path):
+ db_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), db_path)
+
+ if not os.path.exists(db_path):
+ return {'success': False, 'error': 'Database file not found'}
+
+ backup_dir = os.path.join(os.path.dirname(db_path), 'backups')
+ os.makedirs(backup_dir, exist_ok=True)
+ backup_name = f'serverkit_pre_migration_{timestamp}.db'
+ backup_path = os.path.join(backup_dir, backup_name)
+
+ shutil.copy2(db_path, backup_path)
+ logger.info(f'Database backup created: {backup_path}')
+ return {'success': True, 'path': backup_path}
+
+ elif 'postgresql' in db_url:
+ import subprocess
+ backup_dir = '/var/serverkit/backups/db'
+ os.makedirs(backup_dir, exist_ok=True)
+ backup_name = f'serverkit_pre_migration_{timestamp}.sql'
+ backup_path = os.path.join(backup_dir, backup_name)
+
+ result = subprocess.run(
+ ['pg_dump', db_url, '-f', backup_path],
+ capture_output=True, text=True, timeout=300
+ )
+ if result.returncode != 0:
+ return {'success': False, 'error': result.stderr}
+
+ logger.info(f'Database backup created: {backup_path}')
+ return {'success': True, 'path': backup_path}
+
+ else:
+ return {'success': False, 'error': f'Unsupported database type'}
+
+ except Exception as e:
+ logger.error(f'Backup failed: {e}')
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def apply_migrations(cls, app):
+ """Run all pending Alembic migrations."""
+ try:
+ cfg = cls._get_alembic_config(app)
+
+ with app.app_context():
+ command.upgrade(cfg, 'head')
+
+ # Update internal state
+ from app import db
+ with db.engine.connect() as conn:
+ context = MigrationContext.configure(conn)
+ current_heads = context.get_current_heads()
+ cls._current_revision = current_heads[0] if current_heads else None
+
+ cls._needs_migration = False
+ cls._pending_migrations = []
+
+ # Record in SystemSettings
+ try:
+ from app.models import SystemSettings
+ from app import db as _db
+ SystemSettings.set('schema_version', cls._current_revision)
+ SystemSettings.set('last_migration_at', datetime.utcnow().isoformat())
+ _db.session.commit()
+ except Exception as e:
+ logger.warning(f'Failed to record migration in settings: {e}')
+
+ logger.info(f'Migrations applied successfully (now at {cls._current_revision})')
+ return {'success': True, 'revision': cls._current_revision}
+
+ except Exception as e:
+ logger.error(f'Migration failed: {e}')
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def get_migration_history(cls, app):
+ """Return list of all Alembic revisions with descriptions."""
+ try:
+ cfg = cls._get_alembic_config(app)
+ script = ScriptDirectory.from_config(cfg)
+
+ revisions = []
+ for rev in script.walk_revisions():
+ revisions.append({
+ 'revision': rev.revision,
+ 'down_revision': rev.down_revision,
+ 'description': rev.doc or '',
+ 'is_current': rev.revision == cls._current_revision,
+ 'is_head': rev.revision == cls._head_revision,
+ })
+
+ revisions.reverse()
+ return revisions
+
+ except Exception as e:
+ logger.error(f'Failed to get migration history: {e}')
+ return []
diff --git a/backend/app/services/nginx_service.py b/backend/app/services/nginx_service.py
index 88b2bda..3ec1aaa 100644
--- a/backend/app/services/nginx_service.py
+++ b/backend/app/services/nginx_service.py
@@ -4,7 +4,7 @@
from typing import Dict, List, Optional
from pathlib import Path
-from app.utils.system import ServiceControl, run_privileged
+from app.utils.system import ServiceControl, run_privileged, is_command_available
class NginxService:
@@ -293,6 +293,9 @@ class NginxService:
@classmethod
def test_config(cls) -> Dict:
"""Test Nginx configuration syntax."""
+ if not is_command_available('nginx'):
+ return {'success': False, 'error': 'nginx is not installed'}
+
try:
result = run_privileged([cls.NGINX_BIN, '-t'], timeout=30)
return {
diff --git a/backend/app/services/openapi_service.py b/backend/app/services/openapi_service.py
new file mode 100644
index 0000000..a4362a1
--- /dev/null
+++ b/backend/app/services/openapi_service.py
@@ -0,0 +1,230 @@
+"""Service for generating OpenAPI 3.0 specification."""
+import re
+from flask import current_app
+
+
+class OpenAPIService:
+ """Auto-generates OpenAPI 3.0 spec from registered Flask routes."""
+
+ # Blueprint name to tag mapping
+ TAG_MAP = {
+ 'auth': 'Authentication',
+ 'apps': 'Applications',
+ 'domains': 'Domains',
+ 'docker': 'Docker',
+ 'databases': 'Databases',
+ 'system': 'System',
+ 'processes': 'Processes',
+ 'logs': 'Logs',
+ 'nginx': 'Nginx',
+ 'ssl': 'SSL Certificates',
+ 'php': 'PHP',
+ 'wordpress': 'WordPress',
+ 'wordpress_sites': 'WordPress Sites',
+ 'python': 'Python',
+ 'monitoring': 'Monitoring',
+ 'notifications': 'Notifications',
+ 'backups': 'Backups',
+ 'deploy': 'Deployment',
+ 'builds': 'Builds',
+ 'templates': 'Templates',
+ 'files': 'File Manager',
+ 'ftp': 'FTP Server',
+ 'firewall': 'Firewall',
+ 'git': 'Git Server',
+ 'security': 'Security',
+ 'cron': 'Cron Jobs',
+ 'email': 'Email',
+ 'uptime': 'Uptime',
+ 'admin': 'Admin',
+ 'metrics': 'Metrics',
+ 'workflows': 'Workflows',
+ 'servers': 'Servers',
+ 'api_keys': 'API Keys',
+ 'api_analytics': 'API Analytics',
+ 'event_subscriptions': 'Event Subscriptions',
+ 'two_factor': 'Two-Factor Auth',
+ 'sso': 'SSO / OAuth',
+ 'migrations': 'Database Migrations',
+ 'env_vars': 'Environment Variables',
+ 'private_urls': 'Private URLs',
+ }
+
+ @staticmethod
+ def generate_spec():
+ """Generate the full OpenAPI 3.0 spec."""
+ app = current_app
+
+ spec = {
+ 'openapi': '3.0.3',
+ 'info': {
+ 'title': 'ServerKit API',
+ 'description': 'Server control panel API for managing web applications, databases, Docker containers, and security.',
+ 'version': '1.0.0',
+ 'contact': {
+ 'name': 'ServerKit',
+ },
+ },
+ 'servers': [
+ {
+ 'url': '/api/v1',
+ 'description': 'API v1',
+ },
+ ],
+ 'components': {
+ 'securitySchemes': {
+ 'BearerAuth': {
+ 'type': 'http',
+ 'scheme': 'bearer',
+ 'bearerFormat': 'JWT',
+ 'description': 'JWT access token',
+ },
+ 'ApiKeyAuth': {
+ 'type': 'apiKey',
+ 'in': 'header',
+ 'name': 'X-API-Key',
+ 'description': 'API key (sk_...)',
+ },
+ },
+ 'schemas': {
+ 'Error': {
+ 'type': 'object',
+ 'properties': {
+ 'error': {
+ 'type': 'string',
+ 'description': 'Error message',
+ },
+ },
+ },
+ 'Message': {
+ 'type': 'object',
+ 'properties': {
+ 'message': {
+ 'type': 'string',
+ 'description': 'Success message',
+ },
+ },
+ },
+ },
+ 'responses': {
+ '401': {
+ 'description': 'Unauthorized',
+ 'content': {
+ 'application/json': {
+ 'schema': {'$ref': '#/components/schemas/Error'},
+ },
+ },
+ },
+ '403': {
+ 'description': 'Forbidden',
+ 'content': {
+ 'application/json': {
+ 'schema': {'$ref': '#/components/schemas/Error'},
+ },
+ },
+ },
+ '404': {
+ 'description': 'Not found',
+ 'content': {
+ 'application/json': {
+ 'schema': {'$ref': '#/components/schemas/Error'},
+ },
+ },
+ },
+ },
+ },
+ 'security': [
+ {'BearerAuth': []},
+ {'ApiKeyAuth': []},
+ ],
+ 'paths': {},
+ 'tags': [],
+ }
+
+ # Collect tags and paths from registered routes
+ tags_seen = set()
+ paths = {}
+
+ for rule in app.url_map.iter_rules():
+ # Only include API routes
+ if not rule.rule.startswith('/api/v1/'):
+ continue
+
+ # Skip static and internal routes
+ if rule.endpoint == 'static' or rule.rule.endswith('/openapi.json'):
+ continue
+
+ # Get blueprint name
+ parts = rule.endpoint.split('.')
+ bp_name = parts[0] if len(parts) > 1 else None
+
+ # Convert Flask URL params to OpenAPI format
+ path = rule.rule.replace('/api/v1', '')
+ path = re.sub(r'<(?:int:|string:|float:)?(\w+)>', r'{\1}', path)
+
+ # Get tag
+ tag = OpenAPIService.TAG_MAP.get(bp_name, bp_name or 'Other')
+ if tag not in tags_seen:
+ tags_seen.add(tag)
+
+ # Get view function docstring
+ view_func = app.view_functions.get(rule.endpoint)
+ description = ''
+ if view_func and view_func.__doc__:
+ description = view_func.__doc__.strip()
+
+ # Build path item
+ if path not in paths:
+ paths[path] = {}
+
+ methods = [m.lower() for m in rule.methods if m not in ('HEAD', 'OPTIONS')]
+ for method in methods:
+ operation = {
+ 'tags': [tag],
+ 'summary': description or f'{method.upper()} {path}',
+ 'operationId': rule.endpoint.replace('.', '_'),
+ 'responses': {
+ '200': {
+ 'description': 'Success',
+ 'content': {
+ 'application/json': {
+ 'schema': {'type': 'object'},
+ },
+ },
+ },
+ '401': {'$ref': '#/components/responses/401'},
+ '403': {'$ref': '#/components/responses/403'},
+ },
+ }
+
+ # Add path parameters
+ params = re.findall(r'\{(\w+)\}', path)
+ if params:
+ operation['parameters'] = []
+ for param in params:
+ operation['parameters'].append({
+ 'name': param,
+ 'in': 'path',
+ 'required': True,
+ 'schema': {'type': 'integer' if param.endswith('_id') or param == 'id' else 'string'},
+ })
+
+ # Add request body for POST/PUT/PATCH
+ if method in ('post', 'put', 'patch'):
+ operation['requestBody'] = {
+ 'content': {
+ 'application/json': {
+ 'schema': {'type': 'object'},
+ },
+ },
+ }
+
+ if len(methods) > 1:
+ operation['operationId'] = f'{rule.endpoint.replace(".", "_")}_{method}'
+
+ paths[path][method] = operation
+
+ spec['paths'] = dict(sorted(paths.items()))
+ spec['tags'] = [{'name': t} for t in sorted(tags_seen)]
+
+ return spec
diff --git a/backend/app/services/permission_service.py b/backend/app/services/permission_service.py
new file mode 100644
index 0000000..553b66d
--- /dev/null
+++ b/backend/app/services/permission_service.py
@@ -0,0 +1,71 @@
+"""Service for managing per-feature user permissions."""
+from app import db
+from app.models import User
+
+
+class PermissionService:
+ """Stateless service for user permission operations."""
+
+ @staticmethod
+ def get_role_template(role):
+ """Return default permissions for a role."""
+ return User.ROLE_PERMISSION_TEMPLATES.get(role, {})
+
+ @staticmethod
+ def get_user_permissions(user_id):
+ """Get resolved permissions for a user."""
+ user = User.query.get(user_id)
+ if not user:
+ return None
+ return user.get_permissions()
+
+ @staticmethod
+ def update_user_permissions(user_id, permissions):
+ """Validate and store custom permissions for a user."""
+ user = User.query.get(user_id)
+ if not user:
+ return {'success': False, 'error': 'User not found'}
+
+ if user.role == User.ROLE_ADMIN:
+ return {'success': False, 'error': 'Admin permissions cannot be customized'}
+
+ error = PermissionService.validate_permissions(permissions)
+ if error:
+ return {'success': False, 'error': error}
+
+ user.set_permissions(permissions)
+ db.session.commit()
+ return {'success': True, 'permissions': user.get_permissions()}
+
+ @staticmethod
+ def reset_to_role_defaults(user_id):
+ """Clear custom permissions so user falls back to role template."""
+ user = User.query.get(user_id)
+ if not user:
+ return {'success': False, 'error': 'User not found'}
+
+ user.permissions = None
+ db.session.commit()
+ return {'success': True, 'permissions': user.get_permissions()}
+
+ @staticmethod
+ def validate_permissions(permissions):
+ """Validate permission structure. Returns error string or None."""
+ if not isinstance(permissions, dict):
+ return 'Permissions must be an object'
+
+ for feature, access in permissions.items():
+ if feature not in User.PERMISSION_FEATURES:
+ return f'Unknown feature: {feature}'
+ if not isinstance(access, dict):
+ return f'Feature "{feature}" must have read/write object'
+ for key in access:
+ if key not in ('read', 'write'):
+ return f'Invalid permission key "{key}" for feature "{feature}"'
+ if not isinstance(access[key], bool):
+ return f'Permission values must be boolean for "{feature}.{key}"'
+ # Write without read is invalid
+ if access.get('write') and not access.get('read'):
+ return f'Cannot have write without read for "{feature}"'
+
+ return None
diff --git a/backend/app/services/php_service.py b/backend/app/services/php_service.py
index 1832335..2351a30 100644
--- a/backend/app/services/php_service.py
+++ b/backend/app/services/php_service.py
@@ -4,7 +4,7 @@
from typing import Dict, List, Optional
from pathlib import Path
-from app.utils.system import PackageManager, ServiceControl, run_privileged
+from app.utils.system import PackageManager, ServiceControl, run_privileged, is_command_available
class PHPService:
@@ -77,7 +77,7 @@ def get_installed_versions(cls) -> List[Dict]:
timeout=10
)
full_version = result.stdout.split('\n')[0] if result.returncode == 0 else version
- except:
+ except Exception:
full_version = version
# Check if FPM is installed
@@ -88,7 +88,7 @@ def get_installed_versions(cls) -> List[Dict]:
if fpm_installed:
try:
fpm_running = ServiceControl.is_active(f'php{version}-fpm')
- except:
+ except Exception:
pass
versions.append({
@@ -116,7 +116,7 @@ def get_default_version(cls) -> Optional[str]:
match = re.search(r'PHP (\d+\.\d+)', result.stdout)
if match:
return match.group(1)
- except:
+ except Exception:
pass
return None
@@ -150,13 +150,17 @@ def install_version(cls, version: str) -> Dict:
return {'success': False, 'error': f'Unsupported PHP version: {version}'}
try:
- # Add PHP repository if needed
- run_privileged(
- ['add-apt-repository', '-y', 'ppa:ondrej/php'],
- timeout=120,
- )
-
- run_privileged(['apt-get', 'update'], timeout=120)
+ # Add PHP repository if needed (Ubuntu/Debian only)
+ if is_command_available('add-apt-repository'):
+ run_privileged(
+ ['add-apt-repository', '-y', 'ppa:ondrej/php'],
+ timeout=120,
+ )
+
+ # Update package lists (apt-specific, safe to skip on non-apt)
+ manager = PackageManager.detect()
+ if manager == 'apt':
+ run_privileged(['apt-get', 'update'], timeout=120)
# Install PHP and common extensions
packages = [
@@ -177,10 +181,7 @@ def install_version(cls, version: str) -> Dict:
f'php{version}-bcmath',
]
- result = run_privileged(
- ['apt-get', 'install', '-y'] + packages,
- timeout=600,
- )
+ result = PackageManager.install(packages, timeout=600)
if result.returncode == 0:
# Start FPM service
@@ -216,7 +217,7 @@ def get_extensions(cls, version: str) -> List[Dict]:
'name': ext,
'enabled': True
})
- except:
+ except Exception:
pass
return extensions
@@ -227,10 +228,7 @@ def install_extension(cls, version: str, extension: str) -> Dict:
package = f'php{version}-{extension}'
try:
- result = run_privileged(
- ['apt-get', 'install', '-y', package],
- timeout=120,
- )
+ result = PackageManager.install(package, timeout=120)
if result.returncode == 0:
# Restart FPM to load extension
@@ -263,7 +261,7 @@ def get_pools(cls, version: str) -> List[Dict]:
'pm': config.get('pm', 'dynamic'),
'max_children': config.get('pm.max_children', '5')
})
- except Exception as e:
+ except Exception:
pass
return pools
@@ -279,7 +277,7 @@ def _parse_pool_config(cls, filepath: str) -> Dict:
if line and not line.startswith(';') and '=' in line:
key, value = line.split('=', 1)
config[key.strip()] = value.strip()
- except:
+ except Exception:
pass
return config
diff --git a/backend/app/services/postfix_service.py b/backend/app/services/postfix_service.py
new file mode 100644
index 0000000..f690ddd
--- /dev/null
+++ b/backend/app/services/postfix_service.py
@@ -0,0 +1,390 @@
+"""Postfix SMTP server management service."""
+import os
+import re
+import subprocess
+from typing import Dict
+
+from app.utils.system import PackageManager, ServiceControl, run_privileged
+from app import paths
+
+
+class PostfixService:
+ """Service for managing Postfix (SMTP server)."""
+
+ POSTFIX_MAIN_CF = '/etc/postfix/main.cf'
+ POSTFIX_MASTER_CF = '/etc/postfix/master.cf'
+ VIRTUAL_DOMAINS_FILE = '/etc/postfix/virtual_domains'
+ VIRTUAL_MAILBOXES_FILE = '/etc/postfix/virtual_mailboxes'
+ VIRTUAL_ALIASES_FILE = '/etc/postfix/virtual_aliases'
+
+ MAIN_CF_ADDITIONS = """
+# ServerKit mail configuration
+smtpd_banner = $myhostname ESMTP
+biff = no
+append_dot_mydomain = no
+readme_directory = no
+compatibility_level = 3.6
+
+# TLS parameters
+smtpd_tls_cert_file = {tls_cert}
+smtpd_tls_key_file = {tls_key}
+smtpd_tls_security_level = may
+smtpd_tls_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
+smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
+smtp_tls_security_level = may
+
+# Virtual mailbox configuration
+virtual_mailbox_domains = hash:/etc/postfix/virtual_domains
+virtual_mailbox_maps = hash:/etc/postfix/virtual_mailboxes
+virtual_alias_maps = hash:/etc/postfix/virtual_aliases
+virtual_mailbox_base = {vmail_dir}
+virtual_minimum_uid = {vmail_uid}
+virtual_uid_maps = static:{vmail_uid}
+virtual_gid_maps = static:{vmail_gid}
+virtual_transport = lmtp:unix:private/dovecot-lmtp
+
+# SASL authentication
+smtpd_sasl_type = dovecot
+smtpd_sasl_path = private/auth
+smtpd_sasl_auth_enable = yes
+smtpd_sasl_security_options = noanonymous
+smtpd_sasl_local_domain = $myhostname
+broken_sasl_auth_clients = yes
+
+# Restrictions
+smtpd_recipient_restrictions =
+ permit_sasl_authenticated,
+ permit_mynetworks,
+ reject_unauth_destination,
+ reject_invalid_hostname,
+ reject_non_fqdn_hostname,
+ reject_non_fqdn_sender,
+ reject_non_fqdn_recipient,
+ reject_unknown_sender_domain,
+ reject_unknown_recipient_domain,
+ reject_rbl_client zen.spamhaus.org
+
+# DKIM milter
+smtpd_milters = inet:localhost:8891
+non_smtpd_milters = $smtpd_milters
+milter_default_action = accept
+milter_protocol = 6
+
+# SpamAssassin milter
+smtpd_milters = inet:localhost:8891, inet:localhost:8893
+
+# Message size limit (25MB)
+message_size_limit = 26214400
+mailbox_size_limit = 0
+"""
+
+ SUBMISSION_CONF = """submission inet n - y - - smtpd
+ -o syslog_name=postfix/submission
+ -o smtpd_tls_security_level=encrypt
+ -o smtpd_sasl_auth_enable=yes
+ -o smtpd_tls_auth_only=yes
+ -o smtpd_reject_unlisted_recipient=no
+ -o smtpd_recipient_restrictions=permit_sasl_authenticated,reject
+ -o milter_macro_daemon_name=ORIGINATING
+"""
+
+ @classmethod
+ def get_status(cls) -> Dict:
+ """Get Postfix installation and running status."""
+ installed = False
+ running = False
+ enabled = False
+ version = None
+ hostname = None
+
+ try:
+ result = subprocess.run(['which', 'postfix'], capture_output=True, text=True)
+ installed = result.returncode == 0
+ if not installed:
+ installed = PackageManager.is_installed('postfix')
+
+ if installed:
+ running = ServiceControl.is_active('postfix')
+ enabled = ServiceControl.is_enabled('postfix')
+
+ result = subprocess.run(['postconf', 'mail_version'], capture_output=True, text=True)
+ match = re.search(r'mail_version\s*=\s*(\S+)', result.stdout)
+ if match:
+ version = match.group(1)
+
+ result = subprocess.run(['postconf', 'myhostname'], capture_output=True, text=True)
+ match = re.search(r'myhostname\s*=\s*(\S+)', result.stdout)
+ if match:
+ hostname = match.group(1)
+ except (subprocess.SubprocessError, FileNotFoundError):
+ pass
+
+ return {
+ 'installed': installed,
+ 'running': running,
+ 'enabled': enabled,
+ 'version': version,
+ 'hostname': hostname,
+ }
+
+ @classmethod
+ def install(cls, hostname: str = None) -> Dict:
+ """Install Postfix."""
+ try:
+ # Pre-seed debconf to avoid interactive prompts
+ if PackageManager.detect() == 'apt':
+ run_privileged(['bash', '-c',
+ 'echo "postfix postfix/mailname string ' + (hostname or 'localhost') + '" | debconf-set-selections'])
+ run_privileged(['bash', '-c',
+ 'echo "postfix postfix/main_mailer_type select Internet Site" | debconf-set-selections'])
+
+ result = PackageManager.install(['postfix'], timeout=300)
+ if result.returncode != 0:
+ return {'success': False, 'error': result.stderr or 'Failed to install Postfix'}
+
+ # Create virtual map files
+ for path in [cls.VIRTUAL_DOMAINS_FILE, cls.VIRTUAL_MAILBOXES_FILE, cls.VIRTUAL_ALIASES_FILE]:
+ run_privileged(['touch', path])
+ run_privileged(['postmap', path])
+
+ # Create vmail user/group
+ run_privileged(['groupadd', '-g', str(paths.VMAIL_GID), 'vmail'], check=False)
+ run_privileged(['useradd', '-u', str(paths.VMAIL_UID), '-g', 'vmail',
+ '-d', paths.VMAIL_DIR, '-s', '/usr/sbin/nologin', 'vmail'], check=False)
+ run_privileged(['mkdir', '-p', paths.VMAIL_DIR])
+ run_privileged(['chown', '-R', f'{paths.VMAIL_UID}:{paths.VMAIL_GID}', paths.VMAIL_DIR])
+
+ ServiceControl.enable('postfix')
+ ServiceControl.start('postfix', timeout=30)
+
+ return {'success': True, 'message': 'Postfix installed successfully'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def configure(cls, hostname: str = None, tls_cert: str = None, tls_key: str = None) -> Dict:
+ """Configure Postfix for virtual mailbox hosting."""
+ try:
+ cert = tls_cert or '/etc/ssl/certs/ssl-cert-snakeoil.pem'
+ key = tls_key or '/etc/ssl/private/ssl-cert-snakeoil.key'
+
+ if hostname:
+ run_privileged(['postconf', '-e', f'myhostname={hostname}'])
+
+ additions = cls.MAIN_CF_ADDITIONS.format(
+ tls_cert=cert,
+ tls_key=key,
+ vmail_dir=paths.VMAIL_DIR,
+ vmail_uid=paths.VMAIL_UID,
+ vmail_gid=paths.VMAIL_GID,
+ )
+
+ # Append to main.cf
+ run_privileged(['tee', '-a', cls.POSTFIX_MAIN_CF], input=additions)
+
+ # Enable submission port in master.cf
+ result = run_privileged(['cat', cls.POSTFIX_MASTER_CF])
+ if 'submission' not in (result.stdout or ''):
+ run_privileged(['tee', '-a', cls.POSTFIX_MASTER_CF], input=cls.SUBMISSION_CONF)
+
+ # Restart
+ ServiceControl.restart('postfix', timeout=30)
+
+ return {'success': True, 'message': 'Postfix configured successfully'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def add_domain(cls, domain: str) -> Dict:
+ """Add a domain to the virtual domains map."""
+ try:
+ result = run_privileged(['cat', cls.VIRTUAL_DOMAINS_FILE])
+ content = result.stdout or ''
+ if domain not in content:
+ run_privileged(['tee', '-a', cls.VIRTUAL_DOMAINS_FILE], input=f'{domain} OK\n')
+ run_privileged(['postmap', cls.VIRTUAL_DOMAINS_FILE])
+ run_privileged(['postfix', 'reload'])
+ return {'success': True}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def remove_domain(cls, domain: str) -> Dict:
+ """Remove a domain from the virtual domains map."""
+ try:
+ result = run_privileged(['cat', cls.VIRTUAL_DOMAINS_FILE])
+ lines = [l for l in (result.stdout or '').splitlines() if not l.startswith(f'{domain} ')]
+ run_privileged(['tee', cls.VIRTUAL_DOMAINS_FILE], input='\n'.join(lines) + '\n')
+ run_privileged(['postmap', cls.VIRTUAL_DOMAINS_FILE])
+
+ # Also remove mailboxes for this domain
+ result = run_privileged(['cat', cls.VIRTUAL_MAILBOXES_FILE])
+ lines = [l for l in (result.stdout or '').splitlines() if not l.endswith(f'@{domain}') and f'@{domain} ' not in l]
+ run_privileged(['tee', cls.VIRTUAL_MAILBOXES_FILE], input='\n'.join(lines) + '\n')
+ run_privileged(['postmap', cls.VIRTUAL_MAILBOXES_FILE])
+
+ # Remove aliases for this domain
+ result = run_privileged(['cat', cls.VIRTUAL_ALIASES_FILE])
+ lines = [l for l in (result.stdout or '').splitlines() if f'@{domain}' not in l]
+ run_privileged(['tee', cls.VIRTUAL_ALIASES_FILE], input='\n'.join(lines) + '\n')
+ run_privileged(['postmap', cls.VIRTUAL_ALIASES_FILE])
+
+ run_privileged(['postfix', 'reload'])
+ return {'success': True}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def add_mailbox(cls, email: str, domain: str, username: str) -> Dict:
+ """Add a mailbox to the virtual mailbox map."""
+ try:
+ mailbox_path = f'{domain}/{username}/Maildir/'
+ run_privileged(['tee', '-a', cls.VIRTUAL_MAILBOXES_FILE], input=f'{email} {mailbox_path}\n')
+ run_privileged(['postmap', cls.VIRTUAL_MAILBOXES_FILE])
+ run_privileged(['postfix', 'reload'])
+ return {'success': True}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def remove_mailbox(cls, email: str) -> Dict:
+ """Remove a mailbox from the virtual mailbox map."""
+ try:
+ result = run_privileged(['cat', cls.VIRTUAL_MAILBOXES_FILE])
+ lines = [l for l in (result.stdout or '').splitlines() if not l.startswith(f'{email} ')]
+ run_privileged(['tee', cls.VIRTUAL_MAILBOXES_FILE], input='\n'.join(lines) + '\n')
+ run_privileged(['postmap', cls.VIRTUAL_MAILBOXES_FILE])
+ run_privileged(['postfix', 'reload'])
+ return {'success': True}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def add_alias(cls, source: str, destination: str) -> Dict:
+ """Add a virtual alias."""
+ try:
+ run_privileged(['tee', '-a', cls.VIRTUAL_ALIASES_FILE], input=f'{source} {destination}\n')
+ run_privileged(['postmap', cls.VIRTUAL_ALIASES_FILE])
+ run_privileged(['postfix', 'reload'])
+ return {'success': True}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def remove_alias(cls, source: str) -> Dict:
+ """Remove a virtual alias."""
+ try:
+ result = run_privileged(['cat', cls.VIRTUAL_ALIASES_FILE])
+ lines = [l for l in (result.stdout or '').splitlines() if not l.startswith(f'{source} ')]
+ run_privileged(['tee', cls.VIRTUAL_ALIASES_FILE], input='\n'.join(lines) + '\n')
+ run_privileged(['postmap', cls.VIRTUAL_ALIASES_FILE])
+ run_privileged(['postfix', 'reload'])
+ return {'success': True}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def get_queue(cls) -> Dict:
+ """Get the Postfix mail queue."""
+ try:
+ result = subprocess.run(['mailq'], capture_output=True, text=True)
+ output = result.stdout or ''
+
+ if 'Mail queue is empty' in output:
+ return {'success': True, 'queue': [], 'total': 0}
+
+ queue = []
+ current = None
+
+ for line in output.splitlines():
+ # Queue ID line: starts with hex ID
+ id_match = re.match(r'^([A-F0-9]+)\s+(\d+)\s+(\S+\s+\S+\s+\S+)\s+(.+)', line)
+ if id_match:
+ if current:
+ queue.append(current)
+ current = {
+ 'queue_id': id_match.group(1),
+ 'size': int(id_match.group(2)),
+ 'arrival_time': id_match.group(3),
+ 'sender': id_match.group(4),
+ 'recipients': [],
+ 'error': None,
+ }
+ elif current and line.strip().startswith('('):
+ # Error message
+ current['error'] = line.strip().strip('()')
+ elif current and line.strip() and not line.startswith('-'):
+ # Recipient line
+ current['recipients'].append(line.strip())
+
+ if current:
+ queue.append(current)
+
+ return {'success': True, 'queue': queue, 'total': len(queue)}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def flush_queue(cls) -> Dict:
+ """Flush the Postfix mail queue."""
+ try:
+ result = run_privileged(['postfix', 'flush'])
+ if result.returncode == 0:
+ return {'success': True, 'message': 'Mail queue flushed'}
+ return {'success': False, 'error': result.stderr or 'Flush failed'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def delete_from_queue(cls, queue_id: str) -> Dict:
+ """Delete a message from the mail queue."""
+ try:
+ result = run_privileged(['postsuper', '-d', queue_id])
+ if result.returncode == 0:
+ return {'success': True, 'message': f'Message {queue_id} deleted from queue'}
+ return {'success': False, 'error': result.stderr or 'Delete failed'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def get_logs(cls, lines: int = 100) -> Dict:
+ """Get recent mail log entries."""
+ try:
+ log_files = ['/var/log/mail.log', '/var/log/maillog']
+ log_file = None
+ for f in log_files:
+ if os.path.exists(f):
+ log_file = f
+ break
+
+ if not log_file:
+ return {'success': True, 'logs': [], 'message': 'No mail log file found'}
+
+ result = run_privileged(['tail', '-n', str(lines), log_file])
+ log_lines = (result.stdout or '').splitlines()
+
+ return {'success': True, 'logs': log_lines, 'total': len(log_lines)}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def reload(cls) -> Dict:
+ """Reload Postfix configuration."""
+ try:
+ result = run_privileged(['postfix', 'reload'])
+ if result.returncode == 0:
+ return {'success': True, 'message': 'Postfix reloaded'}
+ return {'success': False, 'error': result.stderr or 'Reload failed'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def restart(cls) -> Dict:
+ """Restart Postfix."""
+ try:
+ result = ServiceControl.restart('postfix', timeout=30)
+ if result.returncode == 0:
+ return {'success': True, 'message': 'Postfix restarted'}
+ return {'success': False, 'error': result.stderr or 'Restart failed'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
diff --git a/backend/app/services/process_service.py b/backend/app/services/process_service.py
index bb42443..7dfb920 100644
--- a/backend/app/services/process_service.py
+++ b/backend/app/services/process_service.py
@@ -3,6 +3,8 @@
import platform
from typing import List, Dict, Optional
+from app.utils.system import run_privileged
+
class ProcessService:
"""Service for process and service management."""
@@ -129,13 +131,15 @@ def control_service(cls, service_name: str, action: str) -> Dict:
try:
if system == 'Linux':
# Try systemctl first (systemd)
- cmd = ['sudo', 'systemctl', action, service_name]
- result = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
+ result = run_privileged(
+ ['systemctl', action, service_name], timeout=30
+ )
if result.returncode != 0:
# Fall back to service command
- cmd = ['sudo', 'service', service_name, action]
- result = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
+ result = run_privileged(
+ ['service', service_name, action], timeout=30
+ )
if result.returncode == 0:
return {'success': True, 'message': f'Service {service_name} {action} successful'}
@@ -158,6 +162,8 @@ def control_service(cls, service_name: str, action: str) -> Dict:
else:
return {'success': False, 'error': f'Unsupported platform: {system}'}
+ except FileNotFoundError:
+ return {'success': False, 'error': 'systemctl/service command not found'}
except subprocess.TimeoutExpired:
return {'success': False, 'error': 'Command timed out'}
except Exception as e:
@@ -165,22 +171,6 @@ def control_service(cls, service_name: str, action: str) -> Dict:
@classmethod
def get_service_logs(cls, service_name: str, lines: int = 100) -> Dict:
- """Get recent logs for a service."""
- system = platform.system()
-
- try:
- if system == 'Linux':
- # Use journalctl for systemd services
- cmd = ['sudo', 'journalctl', '-u', service_name, '-n', str(lines), '--no-pager']
- result = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
-
- if result.returncode == 0:
- return {'success': True, 'logs': result.stdout}
- else:
- return {'success': False, 'error': result.stderr}
-
- else:
- return {'success': False, 'error': 'Log retrieval not supported on this platform'}
-
- except Exception as e:
- return {'success': False, 'error': str(e)}
+ """Get recent logs for a service via LogService fallback chain."""
+ from app.services.log_service import LogService
+ return LogService.get_journalctl_logs(unit=service_name, lines=lines)
diff --git a/backend/app/services/roundcube_service.py b/backend/app/services/roundcube_service.py
new file mode 100644
index 0000000..32a1fbb
--- /dev/null
+++ b/backend/app/services/roundcube_service.py
@@ -0,0 +1,183 @@
+"""Roundcube webmail management service (Docker-based)."""
+import subprocess
+from typing import Dict
+
+from app.utils.system import run_privileged, is_command_available
+
+
+class RoundcubeService:
+ """Service for managing Roundcube webmail via Docker."""
+
+ CONTAINER_NAME = 'serverkit-roundcube'
+ VOLUME_NAME = 'roundcube_data'
+ IMAGE = 'roundcube/roundcubemail:latest'
+ HOST_PORT = 9000
+
+ @classmethod
+ def get_status(cls) -> Dict:
+ """Get Roundcube container status."""
+ if not is_command_available('docker'):
+ return {'installed': False, 'running': False, 'error': 'Docker not available'}
+
+ try:
+ result = subprocess.run(
+ ['docker', 'inspect', '--format', '{{.State.Status}}', cls.CONTAINER_NAME],
+ capture_output=True, text=True,
+ )
+ if result.returncode != 0:
+ return {'installed': False, 'running': False}
+
+ status = result.stdout.strip()
+ return {
+ 'installed': True,
+ 'running': status == 'running',
+ 'status': status,
+ 'port': cls.HOST_PORT,
+ }
+ except Exception as e:
+ return {'installed': False, 'running': False, 'error': str(e)}
+
+ @classmethod
+ def install(cls, imap_host: str = 'host.docker.internal',
+ smtp_host: str = 'host.docker.internal') -> Dict:
+ """Install Roundcube via Docker container."""
+ if not is_command_available('docker'):
+ return {'success': False, 'error': 'Docker is not installed'}
+
+ try:
+ # Remove existing container if any
+ subprocess.run(
+ ['docker', 'rm', '-f', cls.CONTAINER_NAME],
+ capture_output=True, text=True,
+ )
+
+ # Create volume
+ subprocess.run(
+ ['docker', 'volume', 'create', cls.VOLUME_NAME],
+ capture_output=True, text=True,
+ )
+
+ # Run container
+ result = subprocess.run([
+ 'docker', 'run', '-d',
+ '--name', cls.CONTAINER_NAME,
+ '--restart', 'unless-stopped',
+ '--add-host', 'host.docker.internal:host-gateway',
+ '-p', f'{cls.HOST_PORT}:80',
+ '-e', f'ROUNDCUBEMAIL_DEFAULT_HOST=ssl://{imap_host}',
+ '-e', f'ROUNDCUBEMAIL_SMTP_SERVER=tls://{smtp_host}',
+ '-e', 'ROUNDCUBEMAIL_DEFAULT_PORT=993',
+ '-e', 'ROUNDCUBEMAIL_SMTP_PORT=587',
+ '-e', 'ROUNDCUBEMAIL_UPLOAD_MAX_FILESIZE=25M',
+ '-e', 'ROUNDCUBEMAIL_SKIN=elastic',
+ '-v', f'{cls.VOLUME_NAME}:/var/roundcube/db',
+ cls.IMAGE,
+ ], capture_output=True, text=True)
+
+ if result.returncode != 0:
+ return {'success': False, 'error': result.stderr or 'Failed to start container'}
+
+ return {
+ 'success': True,
+ 'message': 'Roundcube installed successfully',
+ 'port': cls.HOST_PORT,
+ 'url': f'http://localhost:{cls.HOST_PORT}',
+ }
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def uninstall(cls) -> Dict:
+ """Stop and remove Roundcube container."""
+ try:
+ subprocess.run(['docker', 'rm', '-f', cls.CONTAINER_NAME], capture_output=True, text=True)
+ return {'success': True, 'message': 'Roundcube uninstalled'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def start(cls) -> Dict:
+ """Start Roundcube container."""
+ try:
+ result = subprocess.run(
+ ['docker', 'start', cls.CONTAINER_NAME],
+ capture_output=True, text=True,
+ )
+ if result.returncode == 0:
+ return {'success': True, 'message': 'Roundcube started'}
+ return {'success': False, 'error': result.stderr or 'Start failed'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def stop(cls) -> Dict:
+ """Stop Roundcube container."""
+ try:
+ result = subprocess.run(
+ ['docker', 'stop', cls.CONTAINER_NAME],
+ capture_output=True, text=True,
+ )
+ if result.returncode == 0:
+ return {'success': True, 'message': 'Roundcube stopped'}
+ return {'success': False, 'error': result.stderr or 'Stop failed'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def restart(cls) -> Dict:
+ """Restart Roundcube container."""
+ try:
+ result = subprocess.run(
+ ['docker', 'restart', cls.CONTAINER_NAME],
+ capture_output=True, text=True,
+ )
+ if result.returncode == 0:
+ return {'success': True, 'message': 'Roundcube restarted'}
+ return {'success': False, 'error': result.stderr or 'Restart failed'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def configure_nginx_proxy(cls, domain: str) -> Dict:
+ """Create Nginx reverse proxy config for Roundcube."""
+ try:
+ from app.services.nginx_service import NginxService
+
+ config = f"""# Roundcube Webmail - Managed by ServerKit
+server {{
+ listen 80;
+ server_name {domain};
+
+ location / {{
+ proxy_pass http://127.0.0.1:{cls.HOST_PORT};
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ client_max_body_size 25m;
+ }}
+}}
+"""
+ site_name = f'roundcube-{domain.replace(".", "-")}'
+ config_path = f'/etc/nginx/sites-available/{site_name}'
+ enabled_path = f'/etc/nginx/sites-enabled/{site_name}'
+
+ run_privileged(['tee', config_path], input=config)
+ run_privileged(['ln', '-sf', config_path, enabled_path])
+
+ # Test and reload
+ test = run_privileged(['nginx', '-t'])
+ if test.returncode != 0:
+ # Rollback
+ run_privileged(['rm', '-f', enabled_path])
+ return {'success': False, 'error': f'Nginx config test failed: {test.stderr}'}
+
+ run_privileged(['systemctl', 'reload', 'nginx'])
+
+ return {
+ 'success': True,
+ 'message': f'Nginx proxy configured for {domain}',
+ 'url': f'http://{domain}',
+ }
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
diff --git a/backend/app/services/settings_service.py b/backend/app/services/settings_service.py
index 7810d78..10e36bd 100644
--- a/backend/app/services/settings_service.py
+++ b/backend/app/services/settings_service.py
@@ -37,7 +37,33 @@ class SettingsService:
'value': False,
'type': 'boolean',
'description': 'Enable developer mode for debugging tools and icon reference'
- }
+ },
+ # SSO / OAuth settings
+ 'sso_google_enabled': {'value': False, 'type': 'boolean', 'description': 'Enable Google OAuth login'},
+ 'sso_google_client_id': {'value': '', 'type': 'string', 'description': 'Google OAuth client ID'},
+ 'sso_google_client_secret': {'value': '', 'type': 'string', 'description': 'Google OAuth client secret'},
+ 'sso_github_enabled': {'value': False, 'type': 'boolean', 'description': 'Enable GitHub OAuth login'},
+ 'sso_github_client_id': {'value': '', 'type': 'string', 'description': 'GitHub OAuth client ID'},
+ 'sso_github_client_secret': {'value': '', 'type': 'string', 'description': 'GitHub OAuth client secret'},
+ 'sso_oidc_enabled': {'value': False, 'type': 'boolean', 'description': 'Enable generic OIDC login'},
+ 'sso_oidc_provider_name': {'value': '', 'type': 'string', 'description': 'OIDC provider display name'},
+ 'sso_oidc_client_id': {'value': '', 'type': 'string', 'description': 'OIDC client ID'},
+ 'sso_oidc_client_secret': {'value': '', 'type': 'string', 'description': 'OIDC client secret'},
+ 'sso_oidc_discovery_url': {'value': '', 'type': 'string', 'description': 'OIDC discovery URL'},
+ 'sso_saml_enabled': {'value': False, 'type': 'boolean', 'description': 'Enable SAML 2.0 login'},
+ 'sso_saml_entity_id': {'value': '', 'type': 'string', 'description': 'SAML SP entity ID'},
+ 'sso_saml_idp_metadata_url': {'value': '', 'type': 'string', 'description': 'SAML IdP metadata URL'},
+ 'sso_saml_idp_sso_url': {'value': '', 'type': 'string', 'description': 'SAML IdP SSO URL'},
+ 'sso_saml_idp_cert': {'value': '', 'type': 'string', 'description': 'SAML IdP certificate (PEM)'},
+ 'sso_auto_provision': {'value': True, 'type': 'boolean', 'description': 'Auto-create users on first SSO login'},
+ 'sso_default_role': {'value': 'developer', 'type': 'string', 'description': 'Default role for SSO-provisioned users'},
+ 'sso_force_sso': {'value': False, 'type': 'boolean', 'description': 'Disable password login (SSO only)'},
+ 'sso_allowed_domains': {'value': [], 'type': 'json', 'description': 'Restrict SSO to these email domains'},
+ # Rate limiting settings
+ 'rate_limit_standard': {'value': '100 per minute', 'type': 'string', 'description': 'Rate limit for standard API keys'},
+ 'rate_limit_elevated': {'value': '500 per minute', 'type': 'string', 'description': 'Rate limit for elevated API keys'},
+ 'rate_limit_unlimited': {'value': '5000 per minute', 'type': 'string', 'description': 'Rate limit for unlimited API keys'},
+ 'rate_limit_unauthenticated': {'value': '30 per minute', 'type': 'string', 'description': 'Rate limit for unauthenticated requests'},
}
@staticmethod
@@ -126,14 +152,18 @@ def set_registration_enabled(enabled, user_id=None):
@staticmethod
def migrate_legacy_roles():
"""Migrate users with 'user' role to 'developer' role."""
- users_to_migrate = User.query.filter_by(role='user').all()
- count = 0
- for user in users_to_migrate:
- user.role = User.ROLE_DEVELOPER
- count += 1
- if count > 0:
- db.session.commit()
- return count
+ try:
+ users_to_migrate = User.query.filter_by(role='user').all()
+ count = 0
+ for user in users_to_migrate:
+ user.role = User.ROLE_DEVELOPER
+ count += 1
+ if count > 0:
+ db.session.commit()
+ return count
+ except Exception:
+ db.session.rollback()
+ return 0
@staticmethod
def ensure_admin_exists():
diff --git a/backend/app/services/spamassassin_service.py b/backend/app/services/spamassassin_service.py
new file mode 100644
index 0000000..1418069
--- /dev/null
+++ b/backend/app/services/spamassassin_service.py
@@ -0,0 +1,253 @@
+"""SpamAssassin management service."""
+import os
+import re
+import subprocess
+from typing import Dict
+
+from app.utils.system import PackageManager, ServiceControl, run_privileged
+
+
+class SpamAssassinService:
+ """Service for managing SpamAssassin spam filtering."""
+
+ SPAMASSASSIN_CONF = '/etc/spamassassin/local.cf'
+ SPAMASSASSIN_DEFAULT = '/etc/default/spamassassin'
+ SPAMASS_MILTER_DEFAULT = '/etc/default/spamass-milter'
+
+ LOCAL_CF_TEMPLATE = """# SpamAssassin local configuration - Managed by ServerKit
+required_score {required_score}
+report_safe {report_safe}
+rewrite_header Subject {rewrite_subject}
+use_bayes {use_bayes}
+bayes_auto_learn {bayes_auto_learn}
+bayes_auto_learn_threshold_nonspam 0.1
+bayes_auto_learn_threshold_spam 12.0
+skip_rbl_checks {skip_rbl_checks}
+use_razor2 0
+use_pyzor 0
+
+# Network checks
+dns_available yes
+
+# Trusted networks
+trusted_networks 127.0.0.0/8
+internal_networks 127.0.0.0/8
+"""
+
+ DEFAULT_CONFIG = {
+ 'required_score': 5.0,
+ 'report_safe': 0,
+ 'rewrite_subject': '[SPAM]',
+ 'use_bayes': 1,
+ 'bayes_auto_learn': 1,
+ 'skip_rbl_checks': 0,
+ }
+
+ @classmethod
+ def get_status(cls) -> Dict:
+ """Get SpamAssassin installation and running status."""
+ installed = False
+ running = False
+ enabled = False
+ version = None
+ milter_installed = False
+ milter_running = False
+
+ try:
+ installed = PackageManager.is_installed('spamassassin')
+ if installed:
+ running = ServiceControl.is_active('spamassassin')
+ enabled = ServiceControl.is_enabled('spamassassin')
+ result = subprocess.run(['spamassassin', '--version'], capture_output=True, text=True)
+ match = re.search(r'version\s+(\S+)', result.stdout)
+ if match:
+ version = match.group(1)
+
+ # Check milter
+ milter_installed = PackageManager.is_installed('spamass-milter')
+ if milter_installed:
+ milter_running = ServiceControl.is_active('spamass-milter')
+ except (subprocess.SubprocessError, FileNotFoundError):
+ pass
+
+ return {
+ 'installed': installed,
+ 'running': running,
+ 'enabled': enabled,
+ 'version': version,
+ 'milter_installed': milter_installed,
+ 'milter_running': milter_running,
+ }
+
+ @classmethod
+ def install(cls) -> Dict:
+ """Install SpamAssassin and spamass-milter."""
+ try:
+ manager = PackageManager.detect()
+ if manager == 'apt':
+ packages = ['spamassassin', 'spamass-milter', 'spamc']
+ else:
+ packages = ['spamassassin', 'spamass-milter-postfix']
+
+ result = PackageManager.install(packages, timeout=300)
+ if result.returncode != 0:
+ return {'success': False, 'error': result.stderr or 'Failed to install SpamAssassin'}
+
+ # Enable spamd on Debian/Ubuntu
+ if manager == 'apt' and os.path.exists(cls.SPAMASSASSIN_DEFAULT):
+ result = run_privileged(['cat', cls.SPAMASSASSIN_DEFAULT])
+ content = result.stdout or ''
+ content = re.sub(r'ENABLED=0', 'ENABLED=1', content)
+ content = re.sub(r'CRON=0', 'CRON=1', content)
+ run_privileged(['tee', cls.SPAMASSASSIN_DEFAULT], input=content)
+
+ # Configure milter to listen on port 8893
+ milter_config = 'OPTIONS="-u spamass-milter -i 127.0.0.1 -p inet:8893@localhost -- --socket=/var/run/spamassassin/spamd.sock"\n'
+ if os.path.exists(cls.SPAMASS_MILTER_DEFAULT):
+ run_privileged(['tee', cls.SPAMASS_MILTER_DEFAULT], input=milter_config)
+
+ # Write default config
+ cls.configure(cls.DEFAULT_CONFIG)
+
+ # Update rules
+ run_privileged(['sa-update'], timeout=120)
+
+ ServiceControl.enable('spamassassin')
+ ServiceControl.start('spamassassin', timeout=30)
+
+ if PackageManager.is_installed('spamass-milter'):
+ ServiceControl.enable('spamass-milter')
+ ServiceControl.start('spamass-milter', timeout=30)
+
+ return {'success': True, 'message': 'SpamAssassin installed successfully'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def configure(cls, settings: Dict = None) -> Dict:
+ """Write SpamAssassin configuration."""
+ try:
+ config = dict(cls.DEFAULT_CONFIG)
+ if settings:
+ config.update(settings)
+ content = cls.LOCAL_CF_TEMPLATE.format(**config)
+ run_privileged(['tee', cls.SPAMASSASSIN_CONF], input=content)
+ return {'success': True, 'message': 'SpamAssassin configured'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def get_config(cls) -> Dict:
+ """Get current SpamAssassin configuration."""
+ try:
+ if not os.path.exists(cls.SPAMASSASSIN_CONF):
+ return {'success': True, 'config': dict(cls.DEFAULT_CONFIG)}
+
+ result = run_privileged(['cat', cls.SPAMASSASSIN_CONF])
+ content = result.stdout or ''
+
+ config = dict(cls.DEFAULT_CONFIG)
+
+ # Parse values
+ score_match = re.search(r'required_score\s+(\S+)', content)
+ if score_match:
+ config['required_score'] = float(score_match.group(1))
+
+ report_match = re.search(r'report_safe\s+(\d+)', content)
+ if report_match:
+ config['report_safe'] = int(report_match.group(1))
+
+ subject_match = re.search(r'rewrite_header Subject\s+(.+)', content)
+ if subject_match:
+ config['rewrite_subject'] = subject_match.group(1).strip()
+
+ bayes_match = re.search(r'use_bayes\s+(\d+)', content)
+ if bayes_match:
+ config['use_bayes'] = int(bayes_match.group(1))
+
+ auto_learn_match = re.search(r'bayes_auto_learn\s+(\d+)', content)
+ if auto_learn_match:
+ config['bayes_auto_learn'] = int(auto_learn_match.group(1))
+
+ rbl_match = re.search(r'skip_rbl_checks\s+(\d+)', content)
+ if rbl_match:
+ config['skip_rbl_checks'] = int(rbl_match.group(1))
+
+ return {'success': True, 'config': config}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def update_rules(cls) -> Dict:
+ """Update SpamAssassin rules."""
+ try:
+ result = run_privileged(['sa-update'], timeout=120)
+ # sa-update returns 0 for updates, 1 for no updates, 2+ for errors
+ if result.returncode <= 1:
+ ServiceControl.restart('spamassassin', timeout=30)
+ return {
+ 'success': True,
+ 'message': 'Rules updated' if result.returncode == 0 else 'Rules already up to date',
+ 'updated': result.returncode == 0,
+ }
+ return {'success': False, 'error': result.stderr or 'Update failed'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def train_spam(cls, message_path: str) -> Dict:
+ """Train SpamAssassin with a spam message."""
+ try:
+ result = run_privileged(['sa-learn', '--spam', message_path])
+ if result.returncode == 0:
+ return {'success': True, 'message': 'Message learned as spam'}
+ return {'success': False, 'error': result.stderr or 'Training failed'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def train_ham(cls, message_path: str) -> Dict:
+ """Train SpamAssassin with a ham (non-spam) message."""
+ try:
+ result = run_privileged(['sa-learn', '--ham', message_path])
+ if result.returncode == 0:
+ return {'success': True, 'message': 'Message learned as ham'}
+ return {'success': False, 'error': result.stderr or 'Training failed'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def get_stats(cls) -> Dict:
+ """Get SpamAssassin Bayes statistics."""
+ try:
+ result = run_privileged(['sa-learn', '--dump', 'magic'])
+ output = result.stdout or ''
+ stats = {
+ 'nspam': 0,
+ 'nham': 0,
+ 'ntokens': 0,
+ }
+ for line in output.splitlines():
+ parts = line.split()
+ if len(parts) >= 4:
+ if parts[2] == 'nspam':
+ stats['nspam'] = int(parts[1])
+ elif parts[2] == 'nham':
+ stats['nham'] = int(parts[1])
+ elif parts[2] == 'ntokens':
+ stats['ntokens'] = int(parts[1])
+
+ return {'success': True, 'stats': stats}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
+
+ @classmethod
+ def reload(cls) -> Dict:
+ """Reload SpamAssassin."""
+ try:
+ result = ServiceControl.restart('spamassassin', timeout=30)
+ if result.returncode == 0:
+ return {'success': True, 'message': 'SpamAssassin restarted'}
+ return {'success': False, 'error': result.stderr or 'Restart failed'}
+ except Exception as e:
+ return {'success': False, 'error': str(e)}
diff --git a/backend/app/services/ssl_service.py b/backend/app/services/ssl_service.py
index 536b7b2..9ea73f6 100644
--- a/backend/app/services/ssl_service.py
+++ b/backend/app/services/ssl_service.py
@@ -5,7 +5,7 @@
from typing import Dict, List, Optional
from pathlib import Path
-from app.utils.system import ServiceControl, run_privileged
+from app.utils.system import ServiceControl, run_privileged, PackageManager, is_command_available
class SSLService:
@@ -18,26 +18,17 @@ class SSLService:
@classmethod
def is_certbot_installed(cls) -> bool:
"""Check if certbot is installed."""
- try:
- result = subprocess.run(
- ['which', 'certbot'],
- capture_output=True,
- text=True
- )
- return result.returncode == 0
- except Exception:
- return False
+ return is_command_available('certbot')
@classmethod
def install_certbot(cls) -> Dict:
"""Install certbot if not present."""
- try:
- result = run_privileged(['apt-get', 'update'], timeout=300)
- if result.returncode != 0:
- return {'success': False, 'error': result.stderr}
+ if not PackageManager.is_available():
+ return {'success': False, 'error': 'No supported package manager found'}
- result = run_privileged(
- ['apt-get', 'install', '-y', 'certbot', 'python3-certbot-nginx'],
+ try:
+ result = PackageManager.install(
+ ['certbot', 'python3-certbot-nginx'],
timeout=300,
)
if result.returncode != 0:
@@ -180,7 +171,7 @@ def list_certificates(cls) -> List[Dict]:
expiry_part = expiry_str.split(' (')[0]
current_cert['expiry'] = expiry_part
current_cert['expiry_valid'] = 'VALID' in expiry_str
- except:
+ except Exception:
current_cert['expiry'] = expiry_str
elif line.startswith('Certificate Path:'):
current_cert['cert_path'] = line.split(':', 1)[1].strip()
@@ -190,7 +181,7 @@ def list_certificates(cls) -> List[Dict]:
if current_cert:
certificates.append(current_cert)
- except Exception as e:
+ except Exception:
pass
return certificates
@@ -226,7 +217,7 @@ def get_certificate_info(cls, domain: str) -> Optional[Dict]:
return info
- except Exception as e:
+ except Exception:
return None
@classmethod
diff --git a/backend/app/services/sso_service.py b/backend/app/services/sso_service.py
new file mode 100644
index 0000000..2022bc7
--- /dev/null
+++ b/backend/app/services/sso_service.py
@@ -0,0 +1,503 @@
+"""SSO / OAuth 2.0 / SAML service — handles external identity authentication."""
+import hashlib
+import logging
+import secrets
+from datetime import datetime
+
+from authlib.integrations.requests_client import OAuth2Session
+from cryptography.fernet import Fernet
+from flask import current_app, session
+import base64
+import requests as http_requests
+
+from app import db
+from app.models import User, AuditLog
+from app.models.oauth_identity import OAuthIdentity
+from app.services.settings_service import SettingsService
+
+logger = logging.getLogger(__name__)
+
+# Built-in provider endpoint configs
+PROVIDER_ENDPOINTS = {
+ 'google': {
+ 'authorize_url': 'https://accounts.google.com/o/oauth2/v2/auth',
+ 'token_url': 'https://oauth2.googleapis.com/token',
+ 'userinfo_url': 'https://openidconnect.googleapis.com/v1/userinfo',
+ 'scopes': ['openid', 'email', 'profile'],
+ },
+ 'github': {
+ 'authorize_url': 'https://github.com/login/oauth/authorize',
+ 'token_url': 'https://github.com/login/oauth/access_token',
+ 'userinfo_url': 'https://api.github.com/user',
+ 'emails_url': 'https://api.github.com/user/emails',
+ 'scopes': ['read:user', 'user:email'],
+ },
+}
+
+
+def _get_fernet():
+ """Derive a Fernet key from SECRET_KEY."""
+ key_bytes = current_app.config['SECRET_KEY'].encode('utf-8')
+ digest = hashlib.sha256(key_bytes).digest()
+ return Fernet(base64.urlsafe_b64encode(digest))
+
+
+def encrypt_token(token):
+ if not token:
+ return None
+ return _get_fernet().encrypt(token.encode('utf-8')).decode('utf-8')
+
+
+def decrypt_token(encrypted):
+ if not encrypted:
+ return None
+ try:
+ return _get_fernet().decrypt(encrypted.encode('utf-8')).decode('utf-8')
+ except Exception:
+ return None
+
+
+def get_enabled_providers():
+ """Return list of enabled SSO providers for the login page."""
+ providers = []
+ if SettingsService.get('sso_google_enabled', False):
+ providers.append({'id': 'google', 'name': 'Google'})
+ if SettingsService.get('sso_github_enabled', False):
+ providers.append({'id': 'github', 'name': 'GitHub'})
+ if SettingsService.get('sso_oidc_enabled', False):
+ name = SettingsService.get('sso_oidc_provider_name', '') or 'OIDC'
+ providers.append({'id': 'oidc', 'name': name})
+ if SettingsService.get('sso_saml_enabled', False):
+ providers.append({'id': 'saml', 'name': 'SAML'})
+ return providers
+
+
+def is_password_login_allowed():
+ return not SettingsService.get('sso_force_sso', False)
+
+
+def get_provider_config(provider):
+ """Full config for a provider (internal use — includes secrets)."""
+ prefix = f'sso_{provider}_'
+ keys = [k for k in SettingsService.DEFAULT_SETTINGS if k.startswith(prefix)]
+ cfg = {}
+ for k in keys:
+ short = k[len(prefix):]
+ cfg[short] = SettingsService.get(k, SettingsService.DEFAULT_SETTINGS[k]['value'])
+ return cfg
+
+
+# ------------------------------------------------------------------
+# OAuth flow helpers
+# ------------------------------------------------------------------
+
+def generate_auth_url(provider, redirect_uri):
+ """Build the OAuth authorize URL with PKCE, return (auth_url, state)."""
+ cfg = get_provider_config(provider)
+
+ if provider in ('google', 'github'):
+ endpoints = PROVIDER_ENDPOINTS[provider]
+ authorize_url = endpoints['authorize_url']
+ scopes = endpoints['scopes']
+ client_id = cfg.get('client_id', '')
+ elif provider == 'oidc':
+ discovery = _fetch_oidc_discovery(cfg.get('discovery_url', ''))
+ authorize_url = discovery.get('authorization_endpoint', '')
+ scopes = ['openid', 'email', 'profile']
+ client_id = cfg.get('client_id', '')
+ else:
+ raise ValueError(f'OAuth authorize not supported for {provider}')
+
+ state = secrets.token_urlsafe(32)
+ code_verifier = secrets.token_urlsafe(64)
+ code_challenge = base64.urlsafe_b64encode(
+ hashlib.sha256(code_verifier.encode('ascii')).digest()
+ ).rstrip(b'=').decode('ascii')
+
+ # Store in server-side session
+ session['sso_state'] = state
+ session['sso_code_verifier'] = code_verifier
+ session['sso_provider'] = provider
+
+ params = {
+ 'client_id': client_id,
+ 'redirect_uri': redirect_uri,
+ 'response_type': 'code',
+ 'scope': ' '.join(scopes),
+ 'state': state,
+ 'code_challenge': code_challenge,
+ 'code_challenge_method': 'S256',
+ }
+
+ if provider == 'google':
+ params['access_type'] = 'offline'
+ params['prompt'] = 'select_account'
+
+ qs = '&'.join(f'{k}={v}' for k, v in params.items())
+ return f'{authorize_url}?{qs}', state
+
+
+def handle_oauth_callback(provider, code, state, redirect_uri):
+ """Exchange authorization code for tokens & fetch user profile."""
+ # Validate state
+ expected_state = session.pop('sso_state', None)
+ code_verifier = session.pop('sso_code_verifier', None)
+ if not expected_state or state != expected_state:
+ raise ValueError('Invalid OAuth state — possible CSRF')
+
+ cfg = get_provider_config(provider)
+
+ if provider in ('google', 'github'):
+ endpoints = PROVIDER_ENDPOINTS[provider]
+ token_url = endpoints['token_url']
+ userinfo_url = endpoints['userinfo_url']
+ client_id = cfg.get('client_id', '')
+ client_secret = cfg.get('client_secret', '')
+ elif provider == 'oidc':
+ discovery = _fetch_oidc_discovery(cfg.get('discovery_url', ''))
+ token_url = discovery.get('token_endpoint', '')
+ userinfo_url = discovery.get('userinfo_endpoint', '')
+ client_id = cfg.get('client_id', '')
+ client_secret = cfg.get('client_secret', '')
+ else:
+ raise ValueError(f'OAuth callback not supported for {provider}')
+
+ # Exchange code for tokens
+ oauth = OAuth2Session(
+ client_id=client_id,
+ client_secret=client_secret,
+ code_challenge_method='S256',
+ )
+ token_resp = oauth.fetch_token(
+ token_url,
+ code=code,
+ redirect_uri=redirect_uri,
+ code_verifier=code_verifier,
+ )
+
+ access_token = token_resp.get('access_token', '')
+ refresh_tok = token_resp.get('refresh_token')
+
+ # Fetch user info
+ headers = {'Authorization': f'Bearer {access_token}'}
+ if provider == 'github':
+ headers['Accept'] = 'application/vnd.github+json'
+
+ resp = http_requests.get(userinfo_url, headers=headers, timeout=10)
+ resp.raise_for_status()
+ info = resp.json()
+
+ profile = _normalize_profile(provider, info, headers)
+ profile['_tokens'] = {
+ 'access_token': access_token,
+ 'refresh_token': refresh_tok,
+ 'expires_at': token_resp.get('expires_at'),
+ }
+ return profile
+
+
+def _normalize_profile(provider, info, headers=None):
+ """Convert provider-specific userinfo into a standard dict."""
+ if provider == 'google':
+ return {
+ 'provider_user_id': info.get('sub', ''),
+ 'email': info.get('email', ''),
+ 'display_name': info.get('name', ''),
+ }
+ elif provider == 'github':
+ email = info.get('email') or ''
+ if not email and headers:
+ # GitHub may not include email in profile; fetch from /user/emails
+ try:
+ emails_url = PROVIDER_ENDPOINTS['github']['emails_url']
+ r = http_requests.get(emails_url, headers=headers, timeout=10)
+ r.raise_for_status()
+ for e in r.json():
+ if e.get('primary') and e.get('verified'):
+ email = e['email']
+ break
+ except Exception:
+ pass
+ return {
+ 'provider_user_id': str(info.get('id', '')),
+ 'email': email,
+ 'display_name': info.get('name') or info.get('login', ''),
+ }
+ else:
+ # Generic OIDC
+ return {
+ 'provider_user_id': info.get('sub', ''),
+ 'email': info.get('email', ''),
+ 'display_name': info.get('name', ''),
+ }
+
+
+# ------------------------------------------------------------------
+# SAML helpers
+# ------------------------------------------------------------------
+
+def get_saml_settings(provider_config, request_data):
+ """Build python3-saml settings dict."""
+ sp_entity_id = provider_config.get('entity_id', '') or request_data.get('sp_entity_id', '')
+ acs_url = request_data.get('acs_url', '')
+
+ return {
+ 'strict': True,
+ 'debug': False,
+ 'sp': {
+ 'entityId': sp_entity_id,
+ 'assertionConsumerService': {
+ 'url': acs_url,
+ 'binding': 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST',
+ },
+ 'NameIDFormat': 'urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress',
+ },
+ 'idp': {
+ 'entityId': provider_config.get('entity_id', ''),
+ 'singleSignOnService': {
+ 'url': provider_config.get('idp_sso_url', ''),
+ 'binding': 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect',
+ },
+ 'x509cert': provider_config.get('idp_cert', ''),
+ },
+ }
+
+
+def handle_saml_callback(saml_response_data, request_data):
+ """Validate SAML response and extract user profile."""
+ try:
+ from onelogin.saml2.auth import OneLogin_Saml2_Auth
+ except ImportError:
+ raise RuntimeError('python3-saml is not installed')
+
+ cfg = get_provider_config('saml')
+ saml_settings = get_saml_settings(cfg, request_data)
+
+ saml_req = {
+ 'https': 'on' if request_data.get('https') else 'off',
+ 'http_host': request_data.get('http_host', ''),
+ 'script_name': request_data.get('script_name', ''),
+ 'post_data': saml_response_data,
+ }
+
+ auth = OneLogin_Saml2_Auth(saml_req, saml_settings)
+ auth.process_response()
+
+ errors = auth.get_errors()
+ if errors:
+ raise ValueError(f'SAML validation failed: {", ".join(errors)}')
+
+ if not auth.is_authenticated():
+ raise ValueError('SAML authentication failed')
+
+ attrs = auth.get_attributes()
+ name_id = auth.get_nameid()
+
+ return {
+ 'provider_user_id': name_id,
+ 'email': attrs.get('email', [name_id])[0] if attrs.get('email') else name_id,
+ 'display_name': attrs.get('displayName', [''])[0] if attrs.get('displayName') else '',
+ '_tokens': {},
+ }
+
+
+# ------------------------------------------------------------------
+# User linking / provisioning
+# ------------------------------------------------------------------
+
+def find_or_create_user(provider, profile):
+ """
+ 1. Check OAuthIdentity by (provider, provider_user_id) → return linked user
+ 2. Check User by email → auto-link identity
+ 3. Auto-provision if enabled
+ Returns (user, is_new_user).
+ """
+ email = profile.get('email', '').lower().strip()
+
+ # Enforce allowed domains
+ allowed_domains = SettingsService.get('sso_allowed_domains', [])
+ if allowed_domains and email:
+ domain = email.split('@')[-1] if '@' in email else ''
+ if domain not in allowed_domains:
+ raise ValueError(f'Email domain @{domain} is not allowed for SSO login')
+
+ # 1. Check existing identity link
+ identity = OAuthIdentity.query.filter_by(
+ provider=provider,
+ provider_user_id=profile['provider_user_id'],
+ ).first()
+
+ if identity:
+ user = identity.user
+ if not user.is_active:
+ raise ValueError('Account is deactivated')
+ identity.last_login_at = datetime.utcnow()
+ _update_identity_tokens(identity, profile.get('_tokens', {}))
+ db.session.commit()
+ return user, False
+
+ # 2. Check existing user by email
+ user = User.query.filter_by(email=email).first() if email else None
+ if user:
+ if not user.is_active:
+ raise ValueError('Account is deactivated')
+ link_identity(user.id, provider, profile, profile.get('_tokens', {}))
+ return user, False
+
+ # 3. Auto-provision
+ if not SettingsService.get('sso_auto_provision', True):
+ raise ValueError('No matching account found and auto-provisioning is disabled')
+
+ if not email:
+ raise ValueError('SSO provider did not return an email address')
+
+ default_role = SettingsService.get('sso_default_role', 'developer')
+ username = _generate_username(email, profile.get('display_name', ''))
+
+ user = User(
+ email=email,
+ username=username,
+ role=default_role,
+ auth_provider=provider,
+ )
+ db.session.add(user)
+ db.session.flush()
+
+ link_identity(user.id, provider, profile, profile.get('_tokens', {}))
+
+ AuditLog.log(
+ action=AuditLog.ACTION_SSO_PROVISION,
+ user_id=user.id,
+ target_type='user',
+ target_id=user.id,
+ details={'provider': provider, 'email': email, 'role': default_role},
+ )
+ db.session.commit()
+ return user, True
+
+
+def link_identity(user_id, provider, profile, tokens=None):
+ """Create an OAuthIdentity record."""
+ tokens = tokens or {}
+ identity = OAuthIdentity(
+ user_id=user_id,
+ provider=provider,
+ provider_user_id=profile['provider_user_id'],
+ provider_email=profile.get('email'),
+ provider_display_name=profile.get('display_name'),
+ last_login_at=datetime.utcnow(),
+ )
+ _update_identity_tokens(identity, tokens)
+ db.session.add(identity)
+
+ AuditLog.log(
+ action=AuditLog.ACTION_SSO_LINK,
+ user_id=user_id,
+ target_type='user',
+ target_id=user_id,
+ details={'provider': provider},
+ )
+ db.session.commit()
+ return identity
+
+
+def unlink_identity(user_id, provider):
+ """Remove an OAuth identity link (prevent if it's the only auth method)."""
+ user = User.query.get(user_id)
+ if not user:
+ raise ValueError('User not found')
+
+ identity = OAuthIdentity.query.filter_by(user_id=user_id, provider=provider).first()
+ if not identity:
+ raise ValueError(f'No {provider} identity linked')
+
+ # Prevent unlinking if it's the only auth method
+ identity_count = OAuthIdentity.query.filter_by(user_id=user_id).count()
+ if not user.has_password and identity_count <= 1:
+ raise ValueError('Cannot unlink the only authentication method. Set a password first.')
+
+ db.session.delete(identity)
+ AuditLog.log(
+ action=AuditLog.ACTION_SSO_UNLINK,
+ user_id=user_id,
+ target_type='user',
+ target_id=user_id,
+ details={'provider': provider},
+ )
+ db.session.commit()
+
+
+# ------------------------------------------------------------------
+# Internal helpers
+# ------------------------------------------------------------------
+
+def _update_identity_tokens(identity, tokens):
+ if tokens.get('access_token'):
+ identity.access_token_encrypted = encrypt_token(tokens['access_token'])
+ if tokens.get('refresh_token'):
+ identity.refresh_token_encrypted = encrypt_token(tokens['refresh_token'])
+ if tokens.get('expires_at'):
+ try:
+ identity.token_expires_at = datetime.utcfromtimestamp(float(tokens['expires_at']))
+ except (ValueError, TypeError):
+ pass
+
+
+def _generate_username(email, display_name):
+ """Generate a unique username from email or display name."""
+ base = display_name.strip().lower().replace(' ', '_') if display_name else email.split('@')[0]
+ # Remove non-alphanumeric except underscores
+ base = ''.join(c for c in base if c.isalnum() or c == '_')[:60]
+ if not base:
+ base = 'user'
+
+ username = base
+ suffix = 1
+ while User.query.filter_by(username=username).first():
+ username = f'{base}_{suffix}'
+ suffix += 1
+ return username
+
+
+def _fetch_oidc_discovery(discovery_url):
+ """Fetch and cache OIDC discovery document."""
+ if not discovery_url:
+ raise ValueError('OIDC discovery URL not configured')
+ resp = http_requests.get(discovery_url, timeout=10)
+ resp.raise_for_status()
+ return resp.json()
+
+
+def test_provider_connectivity(provider):
+ """Test that a provider's endpoints are reachable."""
+ cfg = get_provider_config(provider)
+
+ if provider == 'google':
+ client_id = cfg.get('client_id', '')
+ if not client_id:
+ return {'ok': False, 'error': 'Client ID not configured'}
+ # Google discovery is always available
+ return {'ok': True, 'message': 'Google OAuth endpoints reachable'}
+ elif provider == 'github':
+ client_id = cfg.get('client_id', '')
+ if not client_id:
+ return {'ok': False, 'error': 'Client ID not configured'}
+ return {'ok': True, 'message': 'GitHub OAuth endpoints reachable'}
+ elif provider == 'oidc':
+ try:
+ discovery = _fetch_oidc_discovery(cfg.get('discovery_url', ''))
+ if 'authorization_endpoint' in discovery:
+ return {'ok': True, 'message': 'OIDC discovery successful'}
+ return {'ok': False, 'error': 'Discovery document missing authorization_endpoint'}
+ except Exception as e:
+ return {'ok': False, 'error': str(e)}
+ elif provider == 'saml':
+ idp_sso_url = cfg.get('idp_sso_url', '')
+ if not idp_sso_url:
+ return {'ok': False, 'error': 'IdP SSO URL not configured'}
+ idp_cert = cfg.get('idp_cert', '')
+ if not idp_cert:
+ return {'ok': False, 'error': 'IdP certificate not configured'}
+ return {'ok': True, 'message': 'SAML configuration looks valid'}
+ else:
+ return {'ok': False, 'error': f'Unknown provider: {provider}'}
diff --git a/backend/app/services/system_service.py b/backend/app/services/system_service.py
index e52f39e..194ba08 100644
--- a/backend/app/services/system_service.py
+++ b/backend/app/services/system_service.py
@@ -4,6 +4,8 @@
import os
from datetime import datetime
+from app.utils.system import run_privileged
+
class SystemService:
"""Service for collecting system metrics and information."""
@@ -375,23 +377,23 @@ def set_timezone(cls, timezone_id):
try:
# Try timedatectl first (systemd)
- result = subprocess.run(
- ['sudo', 'timedatectl', 'set-timezone', timezone_id],
- capture_output=True, text=True, timeout=10
+ result = run_privileged(
+ ['timedatectl', 'set-timezone', timezone_id],
+ timeout=10
)
if result.returncode == 0:
return {'success': True, 'message': f'Timezone set to {timezone_id}'}
# Fallback: symlink method
- result = subprocess.run(
- ['sudo', 'ln', '-sf', f'/usr/share/zoneinfo/{timezone_id}', '/etc/localtime'],
- capture_output=True, text=True, timeout=10
+ result = run_privileged(
+ ['ln', '-sf', f'/usr/share/zoneinfo/{timezone_id}', '/etc/localtime'],
+ timeout=10
)
if result.returncode == 0:
# Also update /etc/timezone
- subprocess.run(
- ['sudo', 'bash', '-c', f'echo "{timezone_id}" > /etc/timezone'],
- capture_output=True, text=True, timeout=10
+ run_privileged(
+ ['bash', '-c', f'echo "{timezone_id}" > /etc/timezone'],
+ timeout=10
)
return {'success': True, 'message': f'Timezone set to {timezone_id}'}
diff --git a/backend/app/services/wordpress_service.py b/backend/app/services/wordpress_service.py
index 06ef72b..6aa14bb 100644
--- a/backend/app/services/wordpress_service.py
+++ b/backend/app/services/wordpress_service.py
@@ -9,6 +9,7 @@
from pathlib import Path
from app import paths
+from app.utils.system import run_privileged, privileged_cmd
class WordPressService:
@@ -41,7 +42,6 @@ def install_wp_cli(cls) -> Dict:
commands = [
['curl', '-O', 'https://raw.githubusercontent.com/wp-cli/builds/gh-pages/phar/wp-cli.phar'],
['chmod', '+x', 'wp-cli.phar'],
- ['sudo', 'mv', 'wp-cli.phar', cls.WP_CLI_PATH]
]
for cmd in commands:
@@ -49,6 +49,10 @@ def install_wp_cli(cls) -> Dict:
if result.returncode != 0:
return {'success': False, 'error': result.stderr}
+ result = run_privileged(['mv', 'wp-cli.phar', cls.WP_CLI_PATH], timeout=120)
+ if result.returncode != 0:
+ return {'success': False, 'error': result.stderr}
+
return {'success': True, 'message': 'WP-CLI installed successfully'}
except Exception as e:
return {'success': False, 'error': str(e)}
@@ -67,7 +71,7 @@ def wp_cli(cls, path: str, command: List[str], user: str = 'www-data') -> Dict:
return install_result
try:
- cmd = ['sudo', '-u', user, cls.WP_CLI_PATH, '--path=' + path] + command
+ cmd = privileged_cmd([cls.WP_CLI_PATH, '--path=' + path] + command, user=user)
result = subprocess.run(
cmd,
capture_output=True,
@@ -157,8 +161,8 @@ def install_wordpress(cls, path: str, config: Dict) -> Dict:
try:
# Create directory
- subprocess.run(['sudo', 'mkdir', '-p', path], capture_output=True)
- subprocess.run(['sudo', 'chown', 'www-data:www-data', path], capture_output=True)
+ run_privileged(['mkdir', '-p', path])
+ run_privileged(['chown', 'www-data:www-data', path])
# Download WordPress
download_result = cls.wp_cli(path, ['core', 'download', '--locale=en_US'])
@@ -228,7 +232,7 @@ def get_wordpress_info(cls, path: str) -> Optional[Dict]:
updates = json.loads(update_result['output'])
info['update_available'] = len(updates) > 0
info['latest_version'] = updates[0]['version'] if updates else info.get('version')
- except:
+ except Exception:
info['update_available'] = False
# Get site URL
@@ -265,7 +269,7 @@ def get_plugins(cls, path: str) -> List[Dict]:
if result['success']:
try:
return json.loads(result['output'])
- except:
+ except Exception:
return []
return []
@@ -325,7 +329,7 @@ def get_themes(cls, path: str) -> List[Dict]:
if result['success']:
try:
return json.loads(result['output'])
- except:
+ except Exception:
return []
return []
@@ -357,13 +361,12 @@ def backup_wordpress(cls, path: str, include_db: bool = True) -> Dict:
try:
# Create backup directory
- subprocess.run(['sudo', 'mkdir', '-p', backup_path], capture_output=True)
+ run_privileged(['mkdir', '-p', backup_path])
# Backup files
files_backup = os.path.join(backup_path, 'files.tar.gz')
- subprocess.run(
- ['sudo', 'tar', '-czf', files_backup, '-C', os.path.dirname(path), os.path.basename(path)],
- capture_output=True,
+ run_privileged(
+ ['tar', '-czf', files_backup, '-C', os.path.dirname(path), os.path.basename(path)],
timeout=600
)
@@ -379,7 +382,7 @@ def backup_wordpress(cls, path: str, include_db: bool = True) -> Dict:
size = sum(os.path.getsize(os.path.join(backup_path, f))
for f in os.listdir(backup_path)
if os.path.isfile(os.path.join(backup_path, f)))
- except:
+ except Exception:
size = 0
return {
@@ -433,7 +436,7 @@ def list_backups(cls, site_name: str = None) -> List[Dict]:
'size': size,
'timestamp': timestamp
})
- except:
+ except Exception:
pass
return sorted(backups, key=lambda x: x['timestamp'], reverse=True)
@@ -454,12 +457,11 @@ def restore_backup(cls, backup_name: str, target_path: str) -> Dict:
if os.path.exists(files_backup):
# Remove existing files
if os.path.exists(target_path):
- subprocess.run(['sudo', 'rm', '-rf', target_path], capture_output=True)
+ run_privileged(['rm', '-rf', target_path])
# Extract backup
- subprocess.run(
- ['sudo', 'tar', '-xzf', files_backup, '-C', os.path.dirname(target_path)],
- capture_output=True,
+ run_privileged(
+ ['tar', '-xzf', files_backup, '-C', os.path.dirname(target_path)],
timeout=600
)
@@ -486,7 +488,7 @@ def delete_backup(cls, backup_name: str) -> Dict:
return {'success': False, 'error': 'Backup not found'}
try:
- subprocess.run(['sudo', 'rm', '-rf', backup_path], capture_output=True)
+ run_privileged(['rm', '-rf', backup_path])
return {'success': True, 'message': 'Backup deleted'}
except Exception as e:
return {'success': False, 'error': str(e)}
@@ -531,26 +533,24 @@ def _set_permissions(cls, path: str):
"""Set secure file permissions for WordPress."""
try:
# Set ownership
- subprocess.run(['sudo', 'chown', '-R', 'www-data:www-data', path], capture_output=True)
+ run_privileged(['chown', '-R', 'www-data:www-data', path])
# Set directory permissions
- subprocess.run(
- ['sudo', 'find', path, '-type', 'd', '-exec', 'chmod', '755', '{}', ';'],
- capture_output=True
+ run_privileged(
+ ['find', path, '-type', 'd', '-exec', 'chmod', '755', '{}', ';']
)
# Set file permissions
- subprocess.run(
- ['sudo', 'find', path, '-type', 'f', '-exec', 'chmod', '644', '{}', ';'],
- capture_output=True
+ run_privileged(
+ ['find', path, '-type', 'f', '-exec', 'chmod', '644', '{}', ';']
)
# Protect wp-config.php
wp_config = os.path.join(path, 'wp-config.php')
if os.path.exists(wp_config):
- subprocess.run(['sudo', 'chmod', '600', wp_config], capture_output=True)
+ run_privileged(['chmod', '600', wp_config])
- except:
+ except Exception:
pass
@classmethod
@@ -598,13 +598,11 @@ def _create_htaccess_security(cls, path: str):
# Only add if not already present
if '# ServerKit Security Rules' not in existing:
new_content = security_rules + '\n' + existing
- subprocess.run(
- ['sudo', 'tee', htaccess_path],
- input=new_content,
- capture_output=True,
- text=True
+ run_privileged(
+ ['tee', htaccess_path],
+ input=new_content
)
- except:
+ except Exception:
pass
@classmethod
diff --git a/backend/app/utils/system.py b/backend/app/utils/system.py
index 84231ab..88047f3 100644
--- a/backend/app/utils/system.py
+++ b/backend/app/utils/system.py
@@ -10,27 +10,57 @@
from typing import List, Optional, Union
-def run_privileged(cmd: Union[List[str], str], **kwargs) -> subprocess.CompletedProcess:
+def _needs_sudo() -> bool:
+ """Return True if the current process should prepend sudo to commands.
+
+ Returns False when:
+ - Running on Windows (no sudo concept; dev environment)
+ - Already running as root (e.g. inside Docker)
+ - ``sudo`` is not installed (minimal containers)
+ """
+ if os.name == 'nt':
+ return False
+ if os.geteuid() == 0:
+ return False
+ if not shutil.which('sudo'):
+ return False
+ return True
+
+
+def privileged_cmd(cmd: Union[List[str], str], *, user: Optional[str] = None) -> Union[List[str], str]:
+ """Return *cmd* with ``sudo`` prepended when necessary.
+
+ Use this when you need the command list for ``Popen`` or other non-``run``
+ callers. For simple ``subprocess.run`` calls prefer :func:`run_privileged`.
+
+ Pass *user* to run the command as a specific user (``sudo -u ``).
+ """
+ if isinstance(cmd, str):
+ if _needs_sudo() and not cmd.lstrip().startswith('sudo '):
+ if user:
+ return f'sudo -u {user} {cmd}'
+ return f'sudo {cmd}'
+ return cmd
+
+ cmd = list(cmd)
+ if _needs_sudo() and cmd[0] != 'sudo':
+ if user:
+ return ['sudo', '-u', user] + cmd
+ return ['sudo'] + cmd
+ return cmd
+
+
+def run_privileged(cmd: Union[List[str], str], *, user: Optional[str] = None, **kwargs) -> subprocess.CompletedProcess:
"""Run a command with sudo if the current process is not root.
- Prepends ``sudo`` when ``os.geteuid() != 0`` and the command does not
- already start with ``sudo``. Defaults to ``capture_output=True, text=True``
- but callers can override any kwarg.
+ Prepends ``sudo`` only when needed (not root, not Windows, sudo exists).
+ Pass *user* to run the command as a specific user (``sudo -u ``).
+ Defaults to ``capture_output=True, text=True`` but callers can override.
Returns the raw ``CompletedProcess`` so services keep their existing
error-handling patterns.
"""
- if isinstance(cmd, str):
- # Shell-mode — caller is responsible for quoting
- needs_sudo = os.geteuid() != 0 and not cmd.lstrip().startswith('sudo ')
- if needs_sudo:
- cmd = f'sudo {cmd}'
- else:
- cmd = list(cmd)
- needs_sudo = os.geteuid() != 0 and cmd[0] != 'sudo'
- if needs_sudo:
- cmd = ['sudo'] + cmd
-
+ cmd = privileged_cmd(cmd, user=user)
kwargs.setdefault('capture_output', True)
kwargs.setdefault('text', True)
return subprocess.run(cmd, **kwargs)
@@ -52,6 +82,21 @@ def is_command_available(cmd: str) -> bool:
return False
+def sourced_result(lines: list, source: str, source_label: str) -> dict:
+ """Standard response shape for multi-source data endpoints.
+
+ Every fallback-chain endpoint should return this shape so the frontend
+ can show a consistent source-aware banner.
+ """
+ return {
+ 'success': True,
+ 'lines': lines,
+ 'count': len(lines),
+ 'source': source,
+ 'source_label': source_label,
+ }
+
+
class PackageManager:
"""Cross-distro package management helpers.
diff --git a/backend/cli.py b/backend/cli.py
index caa4a39..13deb2e 100644
--- a/backend/cli.py
+++ b/backend/cli.py
@@ -206,80 +206,102 @@ def generate_keys():
@cli.command()
def init_db():
- """Initialize the database."""
+ """Initialize the database using Alembic migrations."""
app = create_app()
with app.app_context():
- db.create_all()
- click.echo(click.style('Database initialized successfully!', fg='green'))
+ from app.services.migration_service import MigrationService
+ result = MigrationService.apply_migrations(app)
+ if result['success']:
+ click.echo(click.style(f'Database initialized successfully (revision: {result["revision"]})!', fg='green'))
+ else:
+ click.echo(click.style(f'Database initialization failed: {result["error"]}', fg='red'))
+ sys.exit(1)
@cli.command()
-def migrate_db():
- """Apply database migrations for missing columns."""
+def db_status():
+ """Show current database migration status."""
app = create_app()
with app.app_context():
- from sqlalchemy import text, inspect
-
- inspector = inspect(db.engine)
- existing_tables = inspector.get_table_names()
-
- # Define all expected columns per table
- expected_columns = [
- # applications table
- ('applications', 'private_slug', 'VARCHAR(50)'),
- ('applications', 'private_url_enabled', 'BOOLEAN DEFAULT 0'),
- ('applications', 'environment_type', "VARCHAR(20) DEFAULT 'standalone'"),
- ('applications', 'linked_app_id', 'INTEGER'),
- ('applications', 'shared_config', 'TEXT'),
- # wordpress_sites table
- ('wordpress_sites', 'environment_type', "VARCHAR(20) DEFAULT 'standalone'"),
- ('wordpress_sites', 'multidev_branch', 'VARCHAR(200)'),
- ('wordpress_sites', 'is_locked', 'BOOLEAN DEFAULT 0'),
- ('wordpress_sites', 'locked_by', 'VARCHAR(100)'),
- ('wordpress_sites', 'locked_reason', 'VARCHAR(200)'),
- ('wordpress_sites', 'lock_expires_at', 'DATETIME'),
- ('wordpress_sites', 'compose_project_name', 'VARCHAR(100)'),
- ('wordpress_sites', 'container_prefix', 'VARCHAR(100)'),
- ('wordpress_sites', 'resource_limits', 'TEXT'),
- ('wordpress_sites', 'basic_auth_enabled', 'BOOLEAN DEFAULT 0'),
- ('wordpress_sites', 'basic_auth_user', 'VARCHAR(100)'),
- ('wordpress_sites', 'basic_auth_password_hash', 'VARCHAR(200)'),
- ('wordpress_sites', 'health_status', "VARCHAR(20) DEFAULT 'unknown'"),
- ('wordpress_sites', 'last_health_check', 'DATETIME'),
- ('wordpress_sites', 'disk_usage_bytes', 'BIGINT DEFAULT 0'),
- ('wordpress_sites', 'disk_usage_updated_at', 'DATETIME'),
- ('wordpress_sites', 'auto_sync_schedule', 'VARCHAR(100)'),
- ('wordpress_sites', 'auto_sync_enabled', 'BOOLEAN DEFAULT 0'),
- ]
+ from app.services.migration_service import MigrationService
+ status = MigrationService.get_status()
+
+ click.echo(f"\nCurrent revision: {status['current_revision'] or 'none'}")
+ click.echo(f"Head revision: {status['head_revision'] or 'none'}")
+ click.echo(f"Pending: {status['pending_count']}")
+
+ if status['pending_migrations']:
+ click.echo(f"\nPending migrations:")
+ for m in status['pending_migrations']:
+ click.echo(f" - {m['revision']}: {m['description']}")
+ else:
+ click.echo(click.style('\nDatabase is up to date.', fg='green'))
+ click.echo()
- # Check which columns are missing
- table_columns_cache = {}
- migrations = []
- for table, column, col_type in expected_columns:
- if table not in existing_tables:
- continue
- if table not in table_columns_cache:
- table_columns_cache[table] = [col['name'] for col in inspector.get_columns(table)]
- if column not in table_columns_cache[table]:
- migrations.append((table, column, col_type))
+@cli.command()
+@click.option('--no-backup', is_flag=True, help='Skip creating a backup before migrating')
+def db_migrate(no_backup):
+ """Apply pending database migrations."""
+ app = create_app()
+ with app.app_context():
+ from app.services.migration_service import MigrationService
+ status = MigrationService.get_status()
- if not migrations:
+ if not status['needs_migration']:
click.echo(click.style('Database is up to date. No migrations needed.', fg='green'))
return
- click.echo(f'Found {len(migrations)} migration(s) to apply...')
+ click.echo(f'Found {status["pending_count"]} pending migration(s):')
+ for m in status['pending_migrations']:
+ click.echo(f' - {m["revision"]}: {m["description"]}')
+
+ if not no_backup:
+ click.echo('\nCreating backup...')
+ backup = MigrationService.create_backup(app)
+ if backup['success']:
+ click.echo(click.style(f' Backup saved to: {backup["path"]}', fg='green'))
+ else:
+ click.echo(click.style(f' Backup failed: {backup["error"]}', fg='red'))
+ if not click.confirm('Continue without backup?'):
+ return
+
+ click.echo('\nApplying migrations...')
+ result = MigrationService.apply_migrations(app)
+ if result['success']:
+ click.echo(click.style(f'\nMigrations applied! Now at revision: {result["revision"]}', fg='green'))
+ else:
+ click.echo(click.style(f'\nMigration failed: {result["error"]}', fg='red'))
+ sys.exit(1)
- for table, column, col_type in migrations:
- try:
- sql = f'ALTER TABLE {table} ADD COLUMN {column} {col_type}'
- db.session.execute(text(sql))
- click.echo(click.style(f' ✓ Added column {table}.{column}', fg='green'))
- except Exception as e:
- click.echo(click.style(f' ✗ Failed to add {table}.{column}: {e}', fg='red'))
- db.session.commit()
- click.echo(click.style('\nMigrations completed!', fg='green'))
+@cli.command()
+def db_history():
+ """Show all database migration revisions."""
+ app = create_app()
+ with app.app_context():
+ from app.services.migration_service import MigrationService
+ history = MigrationService.get_migration_history(app)
+
+ if not history:
+ click.echo('No migration history found.')
+ return
+
+ click.echo(f"\n{'Revision':<20} {'Description':<50} {'Status'}")
+ click.echo('-' * 80)
+
+ for rev in history:
+ status_parts = []
+ if rev['is_current']:
+ status_parts.append('CURRENT')
+ if rev['is_head']:
+ status_parts.append('HEAD')
+ status = ', '.join(status_parts) if status_parts else ''
+
+ desc = rev['description'][:48] if rev['description'] else ''
+ click.echo(f"{rev['revision']:<20} {desc:<50} {status}")
+
+ click.echo()
@cli.command()
@@ -542,11 +564,15 @@ def factory_reset():
except Exception as e:
click.echo(click.style(f'✗ Failed to clear template cache: {e}', fg='red'))
- # 7. Drop and recreate database
+ # 7. Drop and recreate database via Alembic
try:
db.drop_all()
- db.create_all()
- click.echo(click.style('✓ Reset database', fg='green'))
+ from app.services.migration_service import MigrationService
+ result = MigrationService.apply_migrations(app)
+ if result['success']:
+ click.echo(click.style('✓ Reset database', fg='green'))
+ else:
+ click.echo(click.style(f'✗ Migration after reset failed: {result["error"]}', fg='red'))
except Exception as e:
click.echo(click.style(f'✗ Failed to reset database: {e}', fg='red'))
diff --git a/backend/config.py b/backend/config.py
index 5e04c69..4df2898 100644
--- a/backend/config.py
+++ b/backend/config.py
@@ -31,6 +31,15 @@ class DevelopmentConfig(Config):
DEBUG = True
+class TestingConfig(Config):
+ """Config for pytest and other automated tests."""
+ TESTING = True
+ DEBUG = True
+ SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL', 'sqlite:///:memory:')
+ # Reduce noise during tests
+ JWT_ACCESS_TOKEN_EXPIRES = timedelta(minutes=5)
+
+
class ProductionConfig(Config):
DEBUG = False
@@ -49,6 +58,7 @@ def __init__(self):
config = {
'development': DevelopmentConfig,
+ 'testing': TestingConfig,
'production': ProductionConfig,
- 'default': DevelopmentConfig
+ 'default': DevelopmentConfig,
}
diff --git a/backend/migrations/alembic.ini b/backend/migrations/alembic.ini
new file mode 100644
index 0000000..5f8ee17
--- /dev/null
+++ b/backend/migrations/alembic.ini
@@ -0,0 +1,9 @@
+# A generic, single database configuration.
+
+[alembic]
+# template used to generate migration files
+# file_template = %%(rev)s_%%(slug)s
+
+# set to 'true' to run the environment during
+# the 'revision' command, regardless of autogenerate
+# revision_environment = false
diff --git a/backend/migrations/env.py b/backend/migrations/env.py
new file mode 100644
index 0000000..8390ac2
--- /dev/null
+++ b/backend/migrations/env.py
@@ -0,0 +1,100 @@
+import logging
+from logging.config import fileConfig
+
+from flask import current_app
+from alembic import context
+
+# Alembic Config object
+config = context.config
+
+# Set up loggers
+if config.config_file_name is not None:
+ fileConfig(config.config_file_name)
+logger = logging.getLogger('alembic.env')
+
+
+def get_engine():
+ try:
+ # Flask-Migrate provides the engine via current_app
+ return current_app.extensions['migrate'].db.get_engine()
+ except (TypeError, AttributeError):
+ # Fallback for CLI usage outside Flask context
+ return current_app.extensions['migrate'].db.engine
+
+
+def get_engine_url():
+ try:
+ return get_engine().url.render_as_string(hide_password=False).replace('%', '%%')
+ except AttributeError:
+ return str(get_engine().url).replace('%', '%%')
+
+
+# Import all models so Alembic can detect them
+def import_models():
+ # noinspection PyUnresolvedReferences
+ from app.models import ( # noqa: F401
+ User, Application, Domain, EnvironmentVariable, EnvironmentVariableHistory,
+ NotificationPreferences, Deployment, DeploymentDiff, SystemSettings, AuditLog,
+ MetricsHistory, Workflow, GitWebhook, WebhookLog, GitDeployment,
+ Server, ServerGroup, ServerMetrics, ServerCommand, AgentSession, SecurityAlert,
+ WordPressSite, DatabaseSnapshot, SyncJob,
+ EnvironmentActivity, PromotionJob, SanitizationProfile, EmailAccount,
+ OAuthIdentity
+ )
+
+
+config.set_main_option('sqlalchemy.url', get_engine_url())
+target_db = current_app.extensions['migrate'].db
+
+
+def get_metadata():
+ if hasattr(target_db, 'metadatas'):
+ return target_db.metadatas[None]
+ return target_db.metadata
+
+
+def run_migrations_offline():
+ """Run migrations in 'offline' mode."""
+ url = config.get_main_option("sqlalchemy.url")
+ context.configure(
+ url=url,
+ target_metadata=get_metadata(),
+ literal_binds=True,
+ )
+
+ import_models()
+
+ with context.begin_transaction():
+ context.run_migrations()
+
+
+def run_migrations_online():
+ """Run migrations in 'online' mode."""
+
+ def process_revision_directives(context, revision, directives):
+ if getattr(config.cmd_opts, 'autogenerate', False):
+ script = directives[0]
+ if script.upgrade_ops.is_empty():
+ directives[:] = []
+ logger.info('No changes in schema detected.')
+
+ connectable = get_engine()
+
+ import_models()
+
+ with connectable.connect() as connection:
+ context.configure(
+ connection=connection,
+ target_metadata=get_metadata(),
+ process_revision_directives=process_revision_directives,
+ render_as_batch=True, # Required for SQLite ALTER TABLE support
+ )
+
+ with context.begin_transaction():
+ context.run_migrations()
+
+
+if context.is_offline_mode():
+ run_migrations_offline()
+else:
+ run_migrations_online()
diff --git a/backend/migrations/script.py.mako b/backend/migrations/script.py.mako
new file mode 100644
index 0000000..2c01563
--- /dev/null
+++ b/backend/migrations/script.py.mako
@@ -0,0 +1,24 @@
+"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision | comma,n}
+Create Date: ${create_date}
+
+"""
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
+
+# revision identifiers, used by Alembic.
+revision = ${repr(up_revision)}
+down_revision = ${repr(down_revision)}
+branch_labels = ${repr(branch_labels)}
+depends_on = ${repr(depends_on)}
+
+
+def upgrade():
+ ${upgrades if upgrades else "pass"}
+
+
+def downgrade():
+ ${downgrades if downgrades else "pass"}
diff --git a/backend/migrations/versions/001_baseline.py b/backend/migrations/versions/001_baseline.py
new file mode 100644
index 0000000..b751633
--- /dev/null
+++ b/backend/migrations/versions/001_baseline.py
@@ -0,0 +1,601 @@
+"""Baseline migration capturing full schema.
+
+Revision ID: 001_baseline
+Revises:
+Create Date: 2026-03-04
+
+For fresh installs: creates all tables from scratch.
+For existing DBs: acts as a stamp point (tables already exist).
+"""
+from alembic import op
+import sqlalchemy as sa
+
+revision = '001_baseline'
+down_revision = None
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # Use batch mode and check if tables already exist to support both
+ # fresh installs and existing databases being stamped.
+ conn = op.get_bind()
+ inspector = sa.inspect(conn)
+ existing_tables = inspector.get_table_names()
+
+ if 'users' not in existing_tables:
+ op.create_table('users',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('email', sa.String(120), unique=True, nullable=False, index=True),
+ sa.Column('username', sa.String(80), unique=True, nullable=False, index=True),
+ sa.Column('password_hash', sa.String(256), nullable=True),
+ sa.Column('auth_provider', sa.String(50), server_default='local'),
+ sa.Column('role', sa.String(20), server_default='developer'),
+ sa.Column('is_active', sa.Boolean(), server_default=sa.text('1')),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('updated_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('last_login_at', sa.DateTime(), nullable=True),
+ sa.Column('created_by', sa.Integer(), sa.ForeignKey('users.id'), nullable=True),
+ sa.Column('failed_login_count', sa.Integer(), server_default='0'),
+ sa.Column('locked_until', sa.DateTime(), nullable=True),
+ sa.Column('totp_secret', sa.String(32), nullable=True),
+ sa.Column('totp_enabled', sa.Boolean(), server_default=sa.text('0')),
+ sa.Column('backup_codes', sa.Text(), nullable=True),
+ sa.Column('totp_confirmed_at', sa.DateTime(), nullable=True),
+ )
+ else:
+ # Add columns that may be missing in existing installs
+ existing_cols = {c['name'] for c in inspector.get_columns('users')}
+ if 'auth_provider' not in existing_cols:
+ with op.batch_alter_table('users') as batch_op:
+ batch_op.add_column(sa.Column('auth_provider', sa.String(50), server_default='local'))
+
+ if 'applications' not in existing_tables:
+ op.create_table('applications',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('name', sa.String(100), nullable=False),
+ sa.Column('app_type', sa.String(50), nullable=False),
+ sa.Column('status', sa.String(20), server_default='stopped'),
+ sa.Column('php_version', sa.String(10), nullable=True),
+ sa.Column('python_version', sa.String(10), nullable=True),
+ sa.Column('port', sa.Integer(), nullable=True),
+ sa.Column('root_path', sa.String(500), nullable=True),
+ sa.Column('docker_image', sa.String(200), nullable=True),
+ sa.Column('container_id', sa.String(100), nullable=True),
+ sa.Column('private_slug', sa.String(50), unique=True, nullable=True, index=True),
+ sa.Column('private_url_enabled', sa.Boolean(), server_default=sa.text('0')),
+ sa.Column('environment_type', sa.String(20), server_default='standalone'),
+ sa.Column('linked_app_id', sa.Integer(), sa.ForeignKey('applications.id'), nullable=True),
+ sa.Column('shared_config', sa.Text(), nullable=True),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('updated_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('last_deployed_at', sa.DateTime(), nullable=True),
+ sa.Column('user_id', sa.Integer(), sa.ForeignKey('users.id'), nullable=False),
+ )
+
+ if 'domains' not in existing_tables:
+ op.create_table('domains',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('name', sa.String(255), unique=True, nullable=False, index=True),
+ sa.Column('is_primary', sa.Boolean(), server_default=sa.text('0')),
+ sa.Column('ssl_enabled', sa.Boolean(), server_default=sa.text('0')),
+ sa.Column('ssl_certificate_path', sa.String(500), nullable=True),
+ sa.Column('ssl_key_path', sa.String(500), nullable=True),
+ sa.Column('ssl_expires_at', sa.DateTime(), nullable=True),
+ sa.Column('ssl_auto_renew', sa.Boolean(), server_default=sa.text('1')),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('updated_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('application_id', sa.Integer(), sa.ForeignKey('applications.id'), nullable=False),
+ )
+
+ if 'environment_variables' not in existing_tables:
+ op.create_table('environment_variables',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('application_id', sa.Integer(), sa.ForeignKey('applications.id'), nullable=False),
+ sa.Column('key', sa.String(255), nullable=False),
+ sa.Column('encrypted_value', sa.Text(), nullable=False),
+ sa.Column('is_secret', sa.Boolean(), server_default=sa.text('0')),
+ sa.Column('description', sa.String(500), nullable=True),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('updated_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('created_by', sa.Integer(), sa.ForeignKey('users.id'), nullable=True),
+ sa.UniqueConstraint('application_id', 'key', name='unique_app_env_key'),
+ )
+
+ if 'environment_variable_history' not in existing_tables:
+ op.create_table('environment_variable_history',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('env_variable_id', sa.Integer(), nullable=False),
+ sa.Column('application_id', sa.Integer(), sa.ForeignKey('applications.id'), nullable=False),
+ sa.Column('key', sa.String(255), nullable=False),
+ sa.Column('action', sa.String(20), nullable=False),
+ sa.Column('old_value_hash', sa.String(64), nullable=True),
+ sa.Column('new_value_hash', sa.String(64), nullable=True),
+ sa.Column('changed_by', sa.Integer(), sa.ForeignKey('users.id'), nullable=True),
+ sa.Column('changed_at', sa.DateTime(), server_default=sa.func.now()),
+ )
+
+ if 'notification_preferences' not in existing_tables:
+ op.create_table('notification_preferences',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('user_id', sa.Integer(), sa.ForeignKey('users.id'), unique=True, nullable=False),
+ sa.Column('enabled', sa.Boolean(), server_default=sa.text('1')),
+ sa.Column('channels', sa.Text(), server_default='["email"]'),
+ sa.Column('severities', sa.Text(), server_default='["critical", "warning"]'),
+ sa.Column('email', sa.String(255), nullable=True),
+ sa.Column('discord_webhook', sa.String(512), nullable=True),
+ sa.Column('telegram_chat_id', sa.String(64), nullable=True),
+ sa.Column('categories', sa.Text(), server_default='{"system": true, "security": true, "backups": true, "apps": true}'),
+ sa.Column('quiet_hours_enabled', sa.Boolean(), server_default=sa.text('0')),
+ sa.Column('quiet_hours_start', sa.String(5), server_default='22:00'),
+ sa.Column('quiet_hours_end', sa.String(5), server_default='08:00'),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('updated_at', sa.DateTime(), server_default=sa.func.now()),
+ )
+
+ if 'deployments' not in existing_tables:
+ op.create_table('deployments',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('app_id', sa.Integer(), sa.ForeignKey('applications.id'), nullable=False),
+ sa.Column('version', sa.Integer(), nullable=False),
+ sa.Column('version_tag', sa.String(100), nullable=True),
+ sa.Column('status', sa.String(20), server_default='pending'),
+ sa.Column('build_method', sa.String(20), nullable=True),
+ sa.Column('image_tag', sa.String(255), nullable=True),
+ sa.Column('commit_hash', sa.String(40), nullable=True),
+ sa.Column('commit_message', sa.Text(), nullable=True),
+ sa.Column('container_id', sa.String(100), nullable=True),
+ sa.Column('deployed_by', sa.Integer(), sa.ForeignKey('users.id'), nullable=True),
+ sa.Column('deploy_trigger', sa.String(20), server_default='manual'),
+ sa.Column('build_log_path', sa.String(500), nullable=True),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('build_started_at', sa.DateTime(), nullable=True),
+ sa.Column('build_completed_at', sa.DateTime(), nullable=True),
+ sa.Column('deploy_started_at', sa.DateTime(), nullable=True),
+ sa.Column('deploy_completed_at', sa.DateTime(), nullable=True),
+ sa.Column('error_message', sa.Text(), nullable=True),
+ sa.Column('extra_data', sa.Text(), server_default='{}'),
+ )
+
+ if 'deployment_diffs' not in existing_tables:
+ op.create_table('deployment_diffs',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('deployment_id', sa.Integer(), sa.ForeignKey('deployments.id'), nullable=False),
+ sa.Column('previous_deployment_id', sa.Integer(), sa.ForeignKey('deployments.id'), nullable=True),
+ sa.Column('files_added', sa.Text(), server_default='[]'),
+ sa.Column('files_removed', sa.Text(), server_default='[]'),
+ sa.Column('files_modified', sa.Text(), server_default='[]'),
+ sa.Column('additions', sa.Integer(), server_default='0'),
+ sa.Column('deletions', sa.Integer(), server_default='0'),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now()),
+ )
+
+ if 'system_settings' not in existing_tables:
+ op.create_table('system_settings',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('key', sa.String(100), unique=True, nullable=False, index=True),
+ sa.Column('value', sa.Text(), nullable=True),
+ sa.Column('value_type', sa.String(20), server_default='string'),
+ sa.Column('description', sa.String(500), nullable=True),
+ sa.Column('updated_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('updated_by', sa.Integer(), sa.ForeignKey('users.id'), nullable=True),
+ )
+
+ if 'audit_logs' not in existing_tables:
+ op.create_table('audit_logs',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('action', sa.String(100), nullable=False, index=True),
+ sa.Column('user_id', sa.Integer(), sa.ForeignKey('users.id'), nullable=True),
+ sa.Column('target_type', sa.String(50), nullable=True),
+ sa.Column('target_id', sa.Integer(), nullable=True),
+ sa.Column('details', sa.Text(), nullable=True),
+ sa.Column('ip_address', sa.String(45), nullable=True),
+ sa.Column('user_agent', sa.String(500), nullable=True),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now(), index=True),
+ )
+
+ if 'metrics_history' not in existing_tables:
+ op.create_table('metrics_history',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('timestamp', sa.DateTime(), nullable=False, index=True),
+ sa.Column('level', sa.String(10), nullable=False, server_default='minute', index=True),
+ sa.Column('cpu_percent', sa.Float(), nullable=False),
+ sa.Column('cpu_percent_min', sa.Float(), nullable=True),
+ sa.Column('cpu_percent_max', sa.Float(), nullable=True),
+ sa.Column('memory_percent', sa.Float(), nullable=False),
+ sa.Column('memory_used_bytes', sa.BigInteger(), nullable=False),
+ sa.Column('memory_total_bytes', sa.BigInteger(), nullable=False),
+ sa.Column('disk_percent', sa.Float(), nullable=False),
+ sa.Column('disk_used_bytes', sa.BigInteger(), nullable=False),
+ sa.Column('disk_total_bytes', sa.BigInteger(), nullable=False),
+ sa.Column('load_1m', sa.Float(), nullable=True),
+ sa.Column('load_5m', sa.Float(), nullable=True),
+ sa.Column('load_15m', sa.Float(), nullable=True),
+ sa.Column('sample_count', sa.Integer(), server_default='1'),
+ )
+ op.create_index('idx_metrics_level_timestamp', 'metrics_history', ['level', 'timestamp'])
+
+ if 'workflows' not in existing_tables:
+ op.create_table('workflows',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('name', sa.String(100), nullable=False),
+ sa.Column('description', sa.Text(), nullable=True),
+ sa.Column('nodes', sa.Text(), nullable=True),
+ sa.Column('edges', sa.Text(), nullable=True),
+ sa.Column('viewport', sa.Text(), nullable=True),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('updated_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('user_id', sa.Integer(), sa.ForeignKey('users.id'), nullable=False),
+ )
+
+ if 'git_webhooks' not in existing_tables:
+ op.create_table('git_webhooks',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('name', sa.String(100), nullable=False),
+ sa.Column('source', sa.String(50), nullable=False),
+ sa.Column('source_repo_url', sa.String(500), nullable=False),
+ sa.Column('source_branch', sa.String(100), server_default='main'),
+ sa.Column('local_repo_name', sa.String(200), nullable=True),
+ sa.Column('secret', sa.String(100), nullable=False),
+ sa.Column('webhook_token', sa.String(50), nullable=False, unique=True),
+ sa.Column('sync_direction', sa.String(20), server_default='pull'),
+ sa.Column('auto_sync', sa.Boolean(), server_default=sa.text('1')),
+ sa.Column('app_id', sa.Integer(), sa.ForeignKey('applications.id'), nullable=True),
+ sa.Column('deploy_on_push', sa.Boolean(), server_default=sa.text('0')),
+ sa.Column('pre_deploy_script', sa.Text(), nullable=True),
+ sa.Column('post_deploy_script', sa.Text(), nullable=True),
+ sa.Column('zero_downtime', sa.Boolean(), server_default=sa.text('0')),
+ sa.Column('is_active', sa.Boolean(), server_default=sa.text('1')),
+ sa.Column('last_sync_at', sa.DateTime(), nullable=True),
+ sa.Column('last_sync_status', sa.String(20), nullable=True),
+ sa.Column('last_sync_message', sa.Text(), nullable=True),
+ sa.Column('sync_count', sa.Integer(), server_default='0'),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('updated_at', sa.DateTime(), server_default=sa.func.now()),
+ )
+
+ if 'webhook_logs' not in existing_tables:
+ op.create_table('webhook_logs',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('webhook_id', sa.Integer(), sa.ForeignKey('git_webhooks.id'), nullable=True),
+ sa.Column('source', sa.String(50), nullable=False),
+ sa.Column('event_type', sa.String(50), nullable=False),
+ sa.Column('delivery_id', sa.String(100), nullable=True),
+ sa.Column('ref', sa.String(200), nullable=True),
+ sa.Column('commit_sha', sa.String(64), nullable=True),
+ sa.Column('commit_message', sa.Text(), nullable=True),
+ sa.Column('pusher', sa.String(100), nullable=True),
+ sa.Column('status', sa.String(20), server_default='received'),
+ sa.Column('status_message', sa.Text(), nullable=True),
+ sa.Column('headers_json', sa.Text(), nullable=True),
+ sa.Column('payload_preview', sa.Text(), nullable=True),
+ sa.Column('received_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('processed_at', sa.DateTime(), nullable=True),
+ )
+
+ if 'git_deployments' not in existing_tables:
+ op.create_table('git_deployments',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('app_id', sa.Integer(), sa.ForeignKey('applications.id'), nullable=False),
+ sa.Column('webhook_id', sa.Integer(), sa.ForeignKey('git_webhooks.id'), nullable=True),
+ sa.Column('version', sa.Integer(), nullable=False),
+ sa.Column('commit_sha', sa.String(64), nullable=True),
+ sa.Column('commit_message', sa.Text(), nullable=True),
+ sa.Column('branch', sa.String(100), nullable=True),
+ sa.Column('triggered_by', sa.String(100), nullable=True),
+ sa.Column('status', sa.String(20), server_default='pending'),
+ sa.Column('started_at', sa.DateTime(), nullable=True),
+ sa.Column('completed_at', sa.DateTime(), nullable=True),
+ sa.Column('duration_seconds', sa.Integer(), nullable=True),
+ sa.Column('pre_script_output', sa.Text(), nullable=True),
+ sa.Column('deploy_output', sa.Text(), nullable=True),
+ sa.Column('post_script_output', sa.Text(), nullable=True),
+ sa.Column('error_message', sa.Text(), nullable=True),
+ sa.Column('is_rollback', sa.Boolean(), server_default=sa.text('0')),
+ sa.Column('rollback_from_version', sa.Integer(), nullable=True),
+ sa.Column('rolled_back_at', sa.DateTime(), nullable=True),
+ sa.Column('rolled_back_to_id', sa.Integer(), nullable=True),
+ sa.Column('snapshot_data', sa.Text(), nullable=True),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now()),
+ )
+
+ if 'server_groups' not in existing_tables:
+ op.create_table('server_groups',
+ sa.Column('id', sa.String(36), primary_key=True),
+ sa.Column('name', sa.String(100), nullable=False),
+ sa.Column('description', sa.Text()),
+ sa.Column('color', sa.String(7), server_default='#6366f1'),
+ sa.Column('icon', sa.String(50), server_default='server'),
+ sa.Column('parent_id', sa.String(36), sa.ForeignKey('server_groups.id'), nullable=True),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('updated_at', sa.DateTime(), server_default=sa.func.now()),
+ )
+
+ if 'servers' not in existing_tables:
+ op.create_table('servers',
+ sa.Column('id', sa.String(36), primary_key=True),
+ sa.Column('name', sa.String(100), nullable=False),
+ sa.Column('description', sa.Text()),
+ sa.Column('hostname', sa.String(255)),
+ sa.Column('ip_address', sa.String(45)),
+ sa.Column('group_id', sa.String(36), sa.ForeignKey('server_groups.id'), nullable=True),
+ sa.Column('tags', sa.JSON()),
+ sa.Column('status', sa.String(20), server_default='pending'),
+ sa.Column('last_seen', sa.DateTime()),
+ sa.Column('last_error', sa.Text()),
+ sa.Column('agent_version', sa.String(20)),
+ sa.Column('agent_id', sa.String(36), unique=True, index=True),
+ sa.Column('os_type', sa.String(20)),
+ sa.Column('os_version', sa.String(100)),
+ sa.Column('platform', sa.String(100)),
+ sa.Column('architecture', sa.String(20)),
+ sa.Column('cpu_cores', sa.Integer()),
+ sa.Column('cpu_model', sa.String(200)),
+ sa.Column('total_memory', sa.BigInteger()),
+ sa.Column('total_disk', sa.BigInteger()),
+ sa.Column('docker_version', sa.String(50)),
+ sa.Column('api_key_hash', sa.String(256)),
+ sa.Column('api_key_prefix', sa.String(12)),
+ sa.Column('api_secret_encrypted', sa.Text()),
+ sa.Column('permissions', sa.JSON()),
+ sa.Column('allowed_ips', sa.JSON()),
+ sa.Column('api_key_pending_hash', sa.String(256)),
+ sa.Column('api_key_pending_prefix', sa.String(12)),
+ sa.Column('api_secret_pending_encrypted', sa.Text()),
+ sa.Column('api_key_rotation_expires', sa.DateTime()),
+ sa.Column('api_key_rotation_id', sa.String(36)),
+ sa.Column('api_key_last_rotated', sa.DateTime()),
+ sa.Column('registration_token_hash', sa.String(256)),
+ sa.Column('registration_token_expires', sa.DateTime()),
+ sa.Column('registered_at', sa.DateTime()),
+ sa.Column('registered_by', sa.Integer(), sa.ForeignKey('users.id')),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('updated_at', sa.DateTime(), server_default=sa.func.now()),
+ )
+
+ if 'server_metrics' not in existing_tables:
+ op.create_table('server_metrics',
+ sa.Column('id', sa.BigInteger(), primary_key=True, autoincrement=True),
+ sa.Column('server_id', sa.String(36), sa.ForeignKey('servers.id'), nullable=False, index=True),
+ sa.Column('timestamp', sa.DateTime(), server_default=sa.func.now(), index=True),
+ sa.Column('cpu_percent', sa.Float()),
+ sa.Column('memory_percent', sa.Float()),
+ sa.Column('memory_used', sa.BigInteger()),
+ sa.Column('disk_percent', sa.Float()),
+ sa.Column('disk_used', sa.BigInteger()),
+ sa.Column('network_rx', sa.BigInteger()),
+ sa.Column('network_tx', sa.BigInteger()),
+ sa.Column('network_rx_rate', sa.Float()),
+ sa.Column('network_tx_rate', sa.Float()),
+ sa.Column('container_count', sa.Integer()),
+ sa.Column('container_running', sa.Integer()),
+ sa.Column('extra', sa.JSON()),
+ )
+ op.create_index('ix_server_metrics_server_time', 'server_metrics', ['server_id', 'timestamp'])
+
+ if 'server_commands' not in existing_tables:
+ op.create_table('server_commands',
+ sa.Column('id', sa.String(36), primary_key=True),
+ sa.Column('server_id', sa.String(36), sa.ForeignKey('servers.id'), nullable=False, index=True),
+ sa.Column('user_id', sa.Integer(), sa.ForeignKey('users.id')),
+ sa.Column('command_type', sa.String(50)),
+ sa.Column('command_data', sa.JSON()),
+ sa.Column('status', sa.String(20), server_default='pending'),
+ sa.Column('started_at', sa.DateTime()),
+ sa.Column('completed_at', sa.DateTime()),
+ sa.Column('result', sa.JSON()),
+ sa.Column('error', sa.Text()),
+ sa.Column('exit_code', sa.Integer()),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now()),
+ )
+
+ if 'agent_sessions' not in existing_tables:
+ op.create_table('agent_sessions',
+ sa.Column('id', sa.String(36), primary_key=True),
+ sa.Column('server_id', sa.String(36), sa.ForeignKey('servers.id'), nullable=False, index=True),
+ sa.Column('session_token', sa.String(256)),
+ sa.Column('socket_id', sa.String(100)),
+ sa.Column('connected_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('last_heartbeat', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('ip_address', sa.String(45)),
+ sa.Column('user_agent', sa.String(255)),
+ sa.Column('is_active', sa.Boolean(), server_default=sa.text('1'), index=True),
+ sa.Column('disconnected_at', sa.DateTime()),
+ sa.Column('disconnect_reason', sa.String(100)),
+ )
+
+ if 'security_alerts' not in existing_tables:
+ op.create_table('security_alerts',
+ sa.Column('id', sa.String(36), primary_key=True),
+ sa.Column('server_id', sa.String(36), sa.ForeignKey('servers.id'), nullable=True, index=True),
+ sa.Column('alert_type', sa.String(50), nullable=False, index=True),
+ sa.Column('severity', sa.String(20), nullable=False, server_default='info', index=True),
+ sa.Column('source_ip', sa.String(45)),
+ sa.Column('details', sa.JSON()),
+ sa.Column('status', sa.String(20), server_default='open', index=True),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now(), index=True),
+ sa.Column('acknowledged_at', sa.DateTime()),
+ sa.Column('acknowledged_by', sa.Integer(), sa.ForeignKey('users.id')),
+ sa.Column('resolved_at', sa.DateTime()),
+ sa.Column('resolved_by', sa.Integer(), sa.ForeignKey('users.id')),
+ )
+
+ if 'wordpress_sites' not in existing_tables:
+ op.create_table('wordpress_sites',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('application_id', sa.Integer(), sa.ForeignKey('applications.id'), unique=True, nullable=False),
+ sa.Column('wp_version', sa.String(20)),
+ sa.Column('multisite', sa.Boolean(), server_default=sa.text('0')),
+ sa.Column('admin_user', sa.String(100)),
+ sa.Column('admin_email', sa.String(200)),
+ sa.Column('db_name', sa.String(100)),
+ sa.Column('db_user', sa.String(100)),
+ sa.Column('db_host', sa.String(200), server_default='localhost'),
+ sa.Column('db_prefix', sa.String(20), server_default='wp_'),
+ sa.Column('git_repo_url', sa.String(500)),
+ sa.Column('git_branch', sa.String(100), server_default='main'),
+ sa.Column('git_paths', sa.Text()),
+ sa.Column('auto_deploy', sa.Boolean(), server_default=sa.text('0')),
+ sa.Column('last_deploy_commit', sa.String(40)),
+ sa.Column('last_deploy_at', sa.DateTime()),
+ sa.Column('is_production', sa.Boolean(), server_default=sa.text('1')),
+ sa.Column('production_site_id', sa.Integer(), sa.ForeignKey('wordpress_sites.id'), nullable=True),
+ sa.Column('sync_config', sa.Text()),
+ sa.Column('environment_type', sa.String(20), server_default='standalone'),
+ sa.Column('multidev_branch', sa.String(200)),
+ sa.Column('is_locked', sa.Boolean(), server_default=sa.text('0')),
+ sa.Column('locked_by', sa.String(100)),
+ sa.Column('locked_reason', sa.String(200)),
+ sa.Column('lock_expires_at', sa.DateTime()),
+ sa.Column('compose_project_name', sa.String(100)),
+ sa.Column('container_prefix', sa.String(100)),
+ sa.Column('resource_limits', sa.Text()),
+ sa.Column('basic_auth_enabled', sa.Boolean(), server_default=sa.text('0')),
+ sa.Column('basic_auth_user', sa.String(100)),
+ sa.Column('basic_auth_password_hash', sa.String(200)),
+ sa.Column('health_status', sa.String(20), server_default='unknown'),
+ sa.Column('last_health_check', sa.DateTime()),
+ sa.Column('disk_usage_bytes', sa.BigInteger(), server_default='0'),
+ sa.Column('disk_usage_updated_at', sa.DateTime()),
+ sa.Column('auto_sync_schedule', sa.String(100)),
+ sa.Column('auto_sync_enabled', sa.Boolean(), server_default=sa.text('0')),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('updated_at', sa.DateTime(), server_default=sa.func.now()),
+ )
+
+ if 'database_snapshots' not in existing_tables:
+ op.create_table('database_snapshots',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('site_id', sa.Integer(), sa.ForeignKey('wordpress_sites.id'), nullable=False),
+ sa.Column('name', sa.String(200), nullable=False),
+ sa.Column('description', sa.Text()),
+ sa.Column('tag', sa.String(100)),
+ sa.Column('file_path', sa.String(500), nullable=False),
+ sa.Column('size_bytes', sa.BigInteger(), server_default='0'),
+ sa.Column('compressed', sa.Boolean(), server_default=sa.text('1')),
+ sa.Column('commit_sha', sa.String(40)),
+ sa.Column('commit_message', sa.Text()),
+ sa.Column('tables_included', sa.Text()),
+ sa.Column('row_count', sa.Integer()),
+ sa.Column('status', sa.String(20), server_default='completed'),
+ sa.Column('error_message', sa.Text()),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('expires_at', sa.DateTime()),
+ )
+
+ if 'sync_jobs' not in existing_tables:
+ op.create_table('sync_jobs',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('source_site_id', sa.Integer(), sa.ForeignKey('wordpress_sites.id'), nullable=False),
+ sa.Column('target_site_id', sa.Integer(), sa.ForeignKey('wordpress_sites.id'), nullable=False),
+ sa.Column('name', sa.String(200)),
+ sa.Column('schedule', sa.String(100)),
+ sa.Column('enabled', sa.Boolean(), server_default=sa.text('1')),
+ sa.Column('config', sa.Text()),
+ sa.Column('last_run', sa.DateTime()),
+ sa.Column('last_run_status', sa.String(20)),
+ sa.Column('last_run_duration', sa.Integer()),
+ sa.Column('last_run_error', sa.Text()),
+ sa.Column('next_run', sa.DateTime()),
+ sa.Column('run_count', sa.Integer(), server_default='0'),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('updated_at', sa.DateTime(), server_default=sa.func.now()),
+ )
+
+ if 'environment_activities' not in existing_tables:
+ op.create_table('environment_activities',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('site_id', sa.Integer(), sa.ForeignKey('wordpress_sites.id'), nullable=False),
+ sa.Column('user_id', sa.Integer(), sa.ForeignKey('users.id'), nullable=True),
+ sa.Column('action', sa.String(50), nullable=False),
+ sa.Column('description', sa.Text()),
+ sa.Column('metadata', sa.Text()),
+ sa.Column('status', sa.String(20), server_default='completed'),
+ sa.Column('error_message', sa.Text()),
+ sa.Column('duration_seconds', sa.Float()),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now()),
+ )
+
+ if 'promotion_jobs' not in existing_tables:
+ op.create_table('promotion_jobs',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('source_site_id', sa.Integer(), sa.ForeignKey('wordpress_sites.id'), nullable=False),
+ sa.Column('target_site_id', sa.Integer(), sa.ForeignKey('wordpress_sites.id'), nullable=False),
+ sa.Column('user_id', sa.Integer(), sa.ForeignKey('users.id'), nullable=True),
+ sa.Column('promotion_type', sa.String(20), nullable=False),
+ sa.Column('config', sa.Text()),
+ sa.Column('status', sa.String(20), server_default='pending'),
+ sa.Column('pre_promotion_snapshot_id', sa.Integer(), sa.ForeignKey('database_snapshots.id'), nullable=True),
+ sa.Column('error_message', sa.Text()),
+ sa.Column('started_at', sa.DateTime()),
+ sa.Column('completed_at', sa.DateTime()),
+ sa.Column('duration_seconds', sa.Float()),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now()),
+ )
+
+ if 'sanitization_profiles' not in existing_tables:
+ op.create_table('sanitization_profiles',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('user_id', sa.Integer(), sa.ForeignKey('users.id'), nullable=False),
+ sa.Column('name', sa.String(100), nullable=False),
+ sa.Column('description', sa.Text()),
+ sa.Column('config', sa.Text(), nullable=False),
+ sa.Column('is_default', sa.Boolean(), server_default=sa.text('0')),
+ sa.Column('is_builtin', sa.Boolean(), server_default=sa.text('0')),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('updated_at', sa.DateTime()),
+ )
+
+ if 'email_accounts' not in existing_tables:
+ op.create_table('email_accounts',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('email', sa.String(255), unique=True, nullable=False),
+ sa.Column('domain', sa.String(255), nullable=False),
+ sa.Column('username', sa.String(100), nullable=False),
+ sa.Column('quota_mb', sa.Integer(), server_default='1024'),
+ sa.Column('enabled', sa.Boolean(), server_default=sa.text('1')),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('updated_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('forward_to', sa.Text(), nullable=True),
+ sa.Column('forward_keep_copy', sa.Boolean(), server_default=sa.text('1')),
+ )
+
+ if 'oauth_identities' not in existing_tables:
+ op.create_table('oauth_identities',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('user_id', sa.Integer(), sa.ForeignKey('users.id', ondelete='CASCADE'), nullable=False, index=True),
+ sa.Column('provider', sa.String(50), nullable=False),
+ sa.Column('provider_user_id', sa.String(256), nullable=False),
+ sa.Column('provider_email', sa.String(256), nullable=True),
+ sa.Column('provider_display_name', sa.String(256), nullable=True),
+ sa.Column('access_token_encrypted', sa.Text(), nullable=True),
+ sa.Column('refresh_token_encrypted', sa.Text(), nullable=True),
+ sa.Column('token_expires_at', sa.DateTime(), nullable=True),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('updated_at', sa.DateTime(), server_default=sa.func.now()),
+ sa.Column('last_login_at', sa.DateTime(), nullable=True),
+ sa.UniqueConstraint('provider', 'provider_user_id', name='uq_provider_identity'),
+ )
+
+
+def downgrade():
+ # Drop tables in reverse dependency order
+ tables = [
+ 'oauth_identities', 'email_accounts', 'sanitization_profiles',
+ 'promotion_jobs', 'environment_activities', 'sync_jobs',
+ 'database_snapshots', 'wordpress_sites', 'security_alerts',
+ 'agent_sessions', 'server_commands', 'server_metrics', 'servers',
+ 'server_groups', 'git_deployments', 'webhook_logs', 'git_webhooks',
+ 'workflows', 'metrics_history', 'audit_logs', 'system_settings',
+ 'deployment_diffs', 'deployments', 'notification_preferences',
+ 'environment_variable_history', 'environment_variables', 'domains',
+ 'applications', 'users',
+ ]
+
+ conn = op.get_bind()
+ inspector = sa.inspect(conn)
+ existing_tables = inspector.get_table_names()
+
+ for table in tables:
+ if table in existing_tables:
+ op.drop_table(table)
diff --git a/backend/migrations/versions/002_permissions_invitations.py b/backend/migrations/versions/002_permissions_invitations.py
new file mode 100644
index 0000000..09fba0d
--- /dev/null
+++ b/backend/migrations/versions/002_permissions_invitations.py
@@ -0,0 +1,57 @@
+"""Add permissions column to users and create invitations table.
+
+Revision ID: 002_permissions_invitations
+Revises: 001_baseline
+Create Date: 2026-03-04
+"""
+from alembic import op
+import sqlalchemy as sa
+
+revision = '002_permissions_invitations'
+down_revision = '001_baseline'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ conn = op.get_bind()
+ inspector = sa.inspect(conn)
+ existing_tables = inspector.get_table_names()
+
+ # Add permissions column to users if missing
+ if 'users' in existing_tables:
+ existing_cols = {c['name'] for c in inspector.get_columns('users')}
+ if 'permissions' not in existing_cols:
+ with op.batch_alter_table('users') as batch_op:
+ batch_op.add_column(sa.Column('permissions', sa.Text(), nullable=True))
+
+ # Create invitations table
+ if 'invitations' not in existing_tables:
+ op.create_table('invitations',
+ sa.Column('id', sa.Integer(), primary_key=True),
+ sa.Column('email', sa.String(255), nullable=True),
+ sa.Column('token', sa.String(64), unique=True, nullable=False, index=True),
+ sa.Column('role', sa.String(20), nullable=False, server_default='developer'),
+ sa.Column('permissions', sa.Text(), nullable=True),
+ sa.Column('invited_by', sa.Integer(), sa.ForeignKey('users.id'), nullable=False),
+ sa.Column('expires_at', sa.DateTime(), nullable=True),
+ sa.Column('accepted_at', sa.DateTime(), nullable=True),
+ sa.Column('accepted_by', sa.Integer(), sa.ForeignKey('users.id'), nullable=True),
+ sa.Column('status', sa.String(20), nullable=False, server_default='pending', index=True),
+ sa.Column('created_at', sa.DateTime(), server_default=sa.func.now()),
+ )
+
+
+def downgrade():
+ conn = op.get_bind()
+ inspector = sa.inspect(conn)
+ existing_tables = inspector.get_table_names()
+
+ if 'invitations' in existing_tables:
+ op.drop_table('invitations')
+
+ if 'users' in existing_tables:
+ existing_cols = {c['name'] for c in inspector.get_columns('users')}
+ if 'permissions' in existing_cols:
+ with op.batch_alter_table('users') as batch_op:
+ batch_op.drop_column('permissions')
diff --git a/backend/requirements.txt b/backend/requirements.txt
index 6eba1fa..e15ea80 100644
--- a/backend/requirements.txt
+++ b/backend/requirements.txt
@@ -5,6 +5,7 @@ Werkzeug==3.1.6
# Database
Flask-SQLAlchemy==3.1.1
SQLAlchemy==2.0.23
+Flask-Migrate==4.0.7
# Authentication
Flask-JWT-Extended==4.6.0
@@ -53,4 +54,12 @@ qrcode[pil]==7.4.2
requests==2.32.5
# S3-compatible storage (AWS S3, Backblaze B2, MinIO, Wasabi)
-boto3==1.35.0
\ No newline at end of file
+boto3==1.35.0
+
+# SSO / OAuth
+Authlib==1.6.9
+python3-saml==1.16.0
+
+# OpenAPI documentation
+apispec==6.4.0
+apispec-webframeworks==1.2.0
\ No newline at end of file
diff --git a/backend/tests/conftest.py b/backend/tests/conftest.py
new file mode 100644
index 0000000..e727d34
--- /dev/null
+++ b/backend/tests/conftest.py
@@ -0,0 +1,62 @@
+"""Pytest fixtures for backend tests (Flask app, DB, client)."""
+import os
+import sys
+
+import pytest
+
+# Ensure backend root is on path
+_backend = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+if _backend not in sys.path:
+ sys.path.insert(0, _backend)
+
+os.environ.setdefault('FLASK_ENV', 'testing')
+
+
+@pytest.fixture(scope='function')
+def app():
+ """Create Flask app with testing config and in-memory DB."""
+ from app import create_app
+ from app import db as _db
+
+ app = create_app('testing')
+ with app.app_context():
+ _db.create_all()
+ yield app
+ _db.session.remove()
+ _db.drop_all()
+
+
+@pytest.fixture
+def client(app):
+ """Flask test client."""
+ return app.test_client()
+
+
+@pytest.fixture
+def db_session(app):
+ """Database session for the current test (same as app's db)."""
+ from app import db
+ return db
+
+
+@pytest.fixture
+def auth_headers(app):
+ """Create an admin user and return headers with valid JWT for API tests."""
+ from app import db
+ from app.models import User
+ from flask_jwt_extended import create_access_token
+ from werkzeug.security import generate_password_hash
+
+ with app.app_context():
+ user = User(
+ email='testadmin@test.local',
+ username='testadmin',
+ password_hash=generate_password_hash('testpass'),
+ role=User.ROLE_ADMIN,
+ is_active=True,
+ )
+ db.session.add(user)
+ db.session.commit()
+ token = create_access_token(identity=user.id)
+
+ return {'Authorization': f'Bearer {token}'}
diff --git a/backend/tests/test_utils_system.py b/backend/tests/test_utils_system.py
index ab0fb8e..a5e053b 100644
--- a/backend/tests/test_utils_system.py
+++ b/backend/tests/test_utils_system.py
@@ -48,9 +48,11 @@
class TestRunPrivileged:
"""Tests for :func:`run_privileged`."""
+ @patch('app.utils.system.os.name', 'posix')
+ @patch('app.utils.system.shutil.which', return_value='/usr/bin/sudo')
@patch('app.utils.system.subprocess.run')
@patch('app.utils.system.os.geteuid', return_value=1000, create=True)
- def test_prepends_sudo_when_not_root(self, _euid, mock_run):
+ def test_prepends_sudo_when_not_root(self, _euid, mock_run, _which):
mock_run.return_value = subprocess.CompletedProcess([], 0)
run_privileged(['systemctl', 'restart', 'nginx'])
mock_run.assert_called_once_with(
@@ -104,9 +106,11 @@ def test_caller_can_override_defaults(self, _euid, mock_run):
_, kwargs = mock_run.call_args
assert kwargs['capture_output'] is False
+ @patch('app.utils.system.os.name', 'posix')
+ @patch('app.utils.system.shutil.which', return_value='/usr/bin/sudo')
@patch('app.utils.system.subprocess.run')
@patch('app.utils.system.os.geteuid', return_value=1000, create=True)
- def test_string_command_gets_sudo(self, _euid, mock_run):
+ def test_string_command_gets_sudo(self, _euid, mock_run, _which):
mock_run.return_value = subprocess.CompletedProcess([], 0)
run_privileged('systemctl restart nginx')
args, _ = mock_run.call_args
@@ -227,9 +231,10 @@ def test_is_installed_no_manager(self, _which):
# -- install --
+ @patch('app.utils.system.os.name', 'posix')
@patch('app.utils.system.subprocess.run')
@patch('app.utils.system.os.geteuid', return_value=1000, create=True)
- @patch('app.utils.system.shutil.which', side_effect=lambda c: '/usr/bin/apt' if c == 'apt' else None)
+ @patch('app.utils.system.shutil.which', side_effect=lambda c: '/usr/bin/apt' if c == 'apt' else ('/usr/bin/sudo' if c == 'sudo' else None))
def test_install_apt(self, _which, _euid, mock_run):
mock_run.return_value = subprocess.CompletedProcess([], 0)
result = PackageManager.install(['nginx', 'curl'])
@@ -238,9 +243,10 @@ def test_install_apt(self, _which, _euid, mock_run):
capture_output=True, text=True, timeout=300,
)
+ @patch('app.utils.system.os.name', 'posix')
@patch('app.utils.system.subprocess.run')
@patch('app.utils.system.os.geteuid', return_value=1000, create=True)
- @patch('app.utils.system.shutil.which', side_effect=lambda c: '/usr/bin/dnf' if c == 'dnf' else None)
+ @patch('app.utils.system.shutil.which', side_effect=lambda c: '/usr/bin/dnf' if c == 'dnf' else ('/usr/bin/sudo' if c == 'sudo' else None))
def test_install_dnf(self, _which, _euid, mock_run):
mock_run.return_value = subprocess.CompletedProcess([], 0)
PackageManager.install('nginx')
@@ -259,9 +265,11 @@ def test_install_no_manager_raises(self, _which):
class TestServiceControl:
"""Tests for :class:`ServiceControl`."""
+ @patch('app.utils.system.os.name', 'posix')
+ @patch('app.utils.system.shutil.which', return_value='/usr/bin/sudo')
@patch('app.utils.system.subprocess.run')
@patch('app.utils.system.os.geteuid', return_value=1000, create=True)
- def test_start(self, _euid, mock_run):
+ def test_start(self, _euid, mock_run, _which):
mock_run.return_value = subprocess.CompletedProcess([], 0)
ServiceControl.start('nginx')
mock_run.assert_called_once_with(
@@ -269,9 +277,11 @@ def test_start(self, _euid, mock_run):
capture_output=True, text=True,
)
+ @patch('app.utils.system.os.name', 'posix')
+ @patch('app.utils.system.shutil.which', return_value='/usr/bin/sudo')
@patch('app.utils.system.subprocess.run')
@patch('app.utils.system.os.geteuid', return_value=1000, create=True)
- def test_stop(self, _euid, mock_run):
+ def test_stop(self, _euid, mock_run, _which):
mock_run.return_value = subprocess.CompletedProcess([], 0)
ServiceControl.stop('nginx')
mock_run.assert_called_once_with(
@@ -279,9 +289,11 @@ def test_stop(self, _euid, mock_run):
capture_output=True, text=True,
)
+ @patch('app.utils.system.os.name', 'posix')
+ @patch('app.utils.system.shutil.which', return_value='/usr/bin/sudo')
@patch('app.utils.system.subprocess.run')
@patch('app.utils.system.os.geteuid', return_value=1000, create=True)
- def test_restart(self, _euid, mock_run):
+ def test_restart(self, _euid, mock_run, _which):
mock_run.return_value = subprocess.CompletedProcess([], 0)
ServiceControl.restart('nginx')
mock_run.assert_called_once_with(
@@ -289,9 +301,11 @@ def test_restart(self, _euid, mock_run):
capture_output=True, text=True,
)
+ @patch('app.utils.system.os.name', 'posix')
+ @patch('app.utils.system.shutil.which', return_value='/usr/bin/sudo')
@patch('app.utils.system.subprocess.run')
@patch('app.utils.system.os.geteuid', return_value=1000, create=True)
- def test_reload(self, _euid, mock_run):
+ def test_reload(self, _euid, mock_run, _which):
mock_run.return_value = subprocess.CompletedProcess([], 0)
ServiceControl.reload('nginx')
mock_run.assert_called_once_with(
@@ -299,9 +313,11 @@ def test_reload(self, _euid, mock_run):
capture_output=True, text=True,
)
+ @patch('app.utils.system.os.name', 'posix')
+ @patch('app.utils.system.shutil.which', return_value='/usr/bin/sudo')
@patch('app.utils.system.subprocess.run')
@patch('app.utils.system.os.geteuid', return_value=1000, create=True)
- def test_enable(self, _euid, mock_run):
+ def test_enable(self, _euid, mock_run, _which):
mock_run.return_value = subprocess.CompletedProcess([], 0)
ServiceControl.enable('nginx')
mock_run.assert_called_once_with(
@@ -309,9 +325,11 @@ def test_enable(self, _euid, mock_run):
capture_output=True, text=True,
)
+ @patch('app.utils.system.os.name', 'posix')
+ @patch('app.utils.system.shutil.which', return_value='/usr/bin/sudo')
@patch('app.utils.system.subprocess.run')
@patch('app.utils.system.os.geteuid', return_value=1000, create=True)
- def test_disable(self, _euid, mock_run):
+ def test_disable(self, _euid, mock_run, _which):
mock_run.return_value = subprocess.CompletedProcess([], 0)
ServiceControl.disable('nginx')
mock_run.assert_called_once_with(
@@ -319,9 +337,11 @@ def test_disable(self, _euid, mock_run):
capture_output=True, text=True,
)
+ @patch('app.utils.system.os.name', 'posix')
+ @patch('app.utils.system.shutil.which', return_value='/usr/bin/sudo')
@patch('app.utils.system.subprocess.run')
@patch('app.utils.system.os.geteuid', return_value=1000, create=True)
- def test_daemon_reload(self, _euid, mock_run):
+ def test_daemon_reload(self, _euid, mock_run, _which):
mock_run.return_value = subprocess.CompletedProcess([], 0)
ServiceControl.daemon_reload()
mock_run.assert_called_once_with(
diff --git a/frontend/index.html b/frontend/index.html
index 4db11b3..c1fcdfa 100644
--- a/frontend/index.html
+++ b/frontend/index.html
@@ -10,10 +10,22 @@
ServerKit
diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx
index 85ed8ef..51b7040 100644
--- a/frontend/src/App.jsx
+++ b/frontend/src/App.jsx
@@ -35,6 +35,9 @@ import WordPressDetail from './pages/WordPressDetail';
import WordPressProjects from './pages/WordPressProjects';
import WordPressProject from './pages/WordPressProject';
import SSLCertificates from './pages/SSLCertificates';
+import Email from './pages/Email';
+import SSOCallback from './pages/SSOCallback';
+import DatabaseMigration from './pages/DatabaseMigration';
// Page title mapping
const PAGE_TITLES = {
@@ -60,8 +63,10 @@ const PAGE_TITLES = {
'/backups': 'Backups',
'/cron': 'Cron Jobs',
'/security': 'Security',
+ '/email': 'Email Server',
'/terminal': 'Terminal',
'/settings': 'Settings',
+ '/migrate': 'Database Migration',
};
function PageTitleUpdater() {
@@ -71,9 +76,13 @@ function PageTitleUpdater() {
const path = location.pathname;
let title = PAGE_TITLES[path];
- // Handle dynamic routes
+ // Handle dynamic routes and tab sub-routes
if (!title) {
- if (path.startsWith('/apps/')) title = 'Application Details';
+ // Check if it's a base page with a tab suffix (e.g., /security/firewall)
+ const basePath = '/' + path.split('/')[1];
+ if (PAGE_TITLES[basePath]) {
+ title = PAGE_TITLES[basePath];
+ } else if (path.startsWith('/apps/')) title = 'Application Details';
else if (path.startsWith('/servers/')) title = 'Server Details';
else if (path.startsWith('/wordpress/projects/')) title = 'WordPress Pipeline';
else if (path.startsWith('/wordpress/')) title = 'WordPress Site';
@@ -87,13 +96,17 @@ function PageTitleUpdater() {
}
function PrivateRoute({ children }) {
- const { isAuthenticated, loading, needsSetup } = useAuth();
+ const { isAuthenticated, loading, needsSetup, needsMigration } = useAuth();
if (loading) {
return Loading...
;
}
- // If setup is needed, redirect to setup
+ // Priority: migrations > setup > auth
+ if (needsMigration) {
+ return ;
+ }
+
if (needsSetup) {
return ;
}
@@ -102,13 +115,17 @@ function PrivateRoute({ children }) {
}
function PublicRoute({ children }) {
- const { isAuthenticated, loading, needsSetup } = useAuth();
+ const { isAuthenticated, loading, needsSetup, needsMigration } = useAuth();
if (loading) {
return Loading...
;
}
- // If setup is needed, redirect to setup
+ // Priority: migrations > setup > auth
+ if (needsMigration) {
+ return ;
+ }
+
if (needsSetup) {
return ;
}
@@ -132,10 +149,9 @@ function SetupRoute({ children }) {
}
function AppRoutes() {
- const { registrationEnabled } = useAuth();
-
return (
+ } />
@@ -146,13 +162,16 @@ function AppRoutes() {
} />
- {registrationEnabled && (
-
-
-
- } />
- )}
+
+
+
+ } />
+
+
+
+ } />
@@ -161,30 +180,44 @@ function AppRoutes() {
} />
} />
} />
+ } />
} />
} />
} />
+ } />
} />
+ } />
} />
} />
} />
} />
+ } />
} />
} />
+ } />
} />
} />
- } />
+ } />
} />
- } />
+ } />
} />
+ } />
} />
} />
+ } />
} />
+ } />
} />
+ } />
} />
} />
+ } />
+ } />
+ } />
} />
+ } />
} />
+ } />
);
diff --git a/frontend/src/components/MetricsGraph.jsx b/frontend/src/components/MetricsGraph.jsx
index f2beceb..a23ba3c 100644
--- a/frontend/src/components/MetricsGraph.jsx
+++ b/frontend/src/components/MetricsGraph.jsx
@@ -13,7 +13,7 @@ const CHART_COLORS = {
disk: '#f59e0b' // Amber/Orange (Disk)
};
-const MetricsGraph = ({ compact = false, timezone }) => {
+const MetricsGraph = ({ compact = false, timezone, serverId }) => {
const [data, setData] = useState(null);
const [period, setPeriod] = useState('1h');
const [loading, setLoading] = useState(true);
@@ -35,12 +35,14 @@ const MetricsGraph = ({ compact = false, timezone }) => {
useEffect(() => {
loadHistory();
- }, [period]);
+ }, [period, serverId]);
async function loadHistory() {
try {
setLoading(true);
- const response = await api.getMetricsHistory(period);
+ const response = serverId
+ ? await api.getServerMetricsHistory(serverId, period)
+ : await api.getMetricsHistory(period);
setData(response);
setError(null);
} catch (err) {
@@ -64,9 +66,9 @@ const MetricsGraph = ({ compact = false, timezone }) => {
const chartData = data?.data?.map(point => ({
time: formatTimestamp(point.timestamp),
- cpu: point.cpu.percent,
- memory: point.memory.percent,
- disk: point.disk.percent
+ cpu: point.cpu?.percent ?? point.cpu_percent ?? 0,
+ memory: point.memory?.percent ?? point.memory_percent ?? 0,
+ disk: point.disk?.percent ?? point.disk_percent ?? 0
})) || [];
// Auto-zoom: compute Y-axis ceiling from visible metrics
diff --git a/frontend/src/components/SSOProviderIcon.jsx b/frontend/src/components/SSOProviderIcon.jsx
new file mode 100644
index 0000000..7054c95
--- /dev/null
+++ b/frontend/src/components/SSOProviderIcon.jsx
@@ -0,0 +1,27 @@
+import React from 'react';
+import { Key } from 'lucide-react';
+
+const GoogleIcon = () => (
+
+);
+
+const GitHubIcon = () => (
+
+);
+
+const SSOProviderIcon = ({ provider }) => {
+ switch (provider) {
+ case 'google': return ;
+ case 'github': return ;
+ default: return ;
+ }
+};
+
+export default SSOProviderIcon;
diff --git a/frontend/src/components/ServerKitLogo.jsx b/frontend/src/components/ServerKitLogo.jsx
new file mode 100644
index 0000000..f91dc0e
--- /dev/null
+++ b/frontend/src/components/ServerKitLogo.jsx
@@ -0,0 +1,27 @@
+const ServerKitLogo = ({ width = 64, height = 64, className = '' }) => (
+
+);
+
+export default ServerKitLogo;
diff --git a/frontend/src/components/Sidebar.jsx b/frontend/src/components/Sidebar.jsx
index dd93a74..f56dd00 100644
--- a/frontend/src/components/Sidebar.jsx
+++ b/frontend/src/components/Sidebar.jsx
@@ -2,13 +2,13 @@ import React, { useState, useEffect, useRef } from 'react';
import { NavLink, useNavigate } from 'react-router-dom';
import { useAuth } from '../contexts/AuthContext';
import { useTheme } from '../contexts/ThemeContext';
-import { Star, Settings, LogOut, Sun, Moon, Monitor, ChevronRight, ChevronUp } from 'lucide-react';
+import { Star, Settings, LogOut, Sun, Moon, Monitor, ChevronRight, ChevronUp, Layers } from 'lucide-react';
import { api } from '../services/api';
-import ServerKitLogo from '../assets/ServerKitLogo.svg';
+import ServerKitLogo from './ServerKitLogo';
const Sidebar = () => {
const { user, logout } = useAuth();
- const { theme, resolvedTheme, setTheme } = useTheme();
+ const { theme, resolvedTheme, setTheme, whiteLabel } = useTheme();
const navigate = useNavigate();
const [starAnimating, setStarAnimating] = useState(false);
const [menuOpen, setMenuOpen] = useState(false);
@@ -35,6 +35,8 @@ const Sidebar = () => {
}, []);
useEffect(() => {
+ if (whiteLabel.enabled) return;
+
let playCount = 0;
let timeoutId;
@@ -68,32 +70,63 @@ const Sidebar = () => {
clearTimeout(initialDelay);
clearTimeout(timeoutId);
};
- }, []);
+ }, [whiteLabel.enabled]);
return (
diff --git a/frontend/src/components/settings/UsersTab.jsx b/frontend/src/components/settings/UsersTab.jsx
index 0ddf7bb..5a69983 100644
--- a/frontend/src/components/settings/UsersTab.jsx
+++ b/frontend/src/components/settings/UsersTab.jsx
@@ -2,6 +2,7 @@ import React, { useState, useEffect } from 'react';
import api from '../../services/api';
import { useAuth } from '../../contexts/AuthContext';
import UserModal from './UserModal';
+import InvitationsTab from './InvitationsTab';
const UsersTab = () => {
const [users, setUsers] = useState([]);
@@ -252,6 +253,8 @@ const UsersTab = () => {
)}
+
+