diff --git a/.gitignore b/.gitignore index 88124d9..02edd6d 100644 --- a/.gitignore +++ b/.gitignore @@ -28,6 +28,8 @@ .env.*.local *.key *.pem +*.crt +*.csr # OS files .DS_Store diff --git a/CLAUDE.md b/CLAUDE.md index fd074f1..436bb49 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -18,7 +18,7 @@ python run.py cd frontend && npm install && npm run dev # Both at once (Linux/WSL) -./scripts/dev/start.sh +./dev.sh # Frontend lint cd frontend && npm run lint @@ -75,6 +75,14 @@ Browser → Nginx (`:80`/`:443`) → proxy_pass to Docker containers (`:8001-899 The Dockerfile is multi-stage: Node 20 builds frontend, Python 3.11 serves everything via Gunicorn with GeventWebSocket workers. Built frontend is served from Flask's static folder. +## Platform & Distro Awareness + +ServerKit deploys on Linux (bare metal, VPS, or Docker). Development may happen on Windows/macOS. + +- **Service layer is Linux-only** — nginx, systemctl, apt/dnf, PHP-FPM, etc. are inherently Linux. No need to abstract these for Windows. +- **Platform-agnostic code** (config management, storage, API layer) should guard Unix-only calls like `os.chmod` with `if os.name != 'nt'` so the dev server can run locally on any OS. +- **Distro differences matter** — use `backend/app/utils/system.py` helpers (`get_package_manager`, `is_package_installed`, `install_package`) instead of calling `apt`/`dpkg`/`dnf` directly. Not all targets are Debian-based. + ## Code Style ### Python diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b19a7f4..cf83ea5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -17,7 +17,7 @@ chmod +x ./scripts/dev/*.sh ./scripts/dev/setup-wsl.sh # 3. Start dev servers -./scripts/dev/start.sh +./dev.sh ``` Open http://localhost:5173 — login: `admin` / `admin` @@ -31,7 +31,7 @@ Open http://localhost:5173 — login: `admin` / `admin` ```bash ./scripts/dev/setup-linux.sh -./scripts/dev/start.sh +./dev.sh ``` ### Docker @@ -47,7 +47,7 @@ docker compose -f docker-compose.dev.yml up --build # Linux/Mac | Task | Command | |------|---------| -| Start both | `./scripts/dev/start.sh` | +| Start both | `./dev.sh` | | Backend only | `cd backend && source venv/bin/activate && python run.py` | | Frontend only | `cd frontend && npm run dev` | | Build frontend | `cd frontend && npm run build` | @@ -72,6 +72,7 @@ docker compose -f docker-compose.dev.yml up --build # Linux/Mac git clone https://github.com/YOUR_USERNAME/ServerKit.git cd ServerKit git remote add upstream https://github.com/jhd3197/ServerKit.git +git checkout dev ``` ### Backend Setup @@ -258,6 +259,22 @@ npm test npm run test:coverage # With coverage ``` +### Validate Before Submitting + +Run the dev validation suite to check for common issues: + +```powershell +# Windows +.\dev.ps1 validate +``` + +```bash +# Linux/macOS +./dev.sh validate +``` + +This runs eslint, bandit (security scanner), pytest, and a frontend production build. + ### Manual Testing Before submitting, test your changes: @@ -276,7 +293,7 @@ Before submitting, test your changes: 1. **Update your fork:** ```bash git fetch upstream - git rebase upstream/main + git rebase upstream/dev ``` 2. **Push your branch:** @@ -285,10 +302,12 @@ Before submitting, test your changes: ``` 3. **Create Pull Request:** - - Go to GitHub and create a PR + - Go to GitHub and create a PR **targeting the `dev` branch** (not `main`) - Fill out the PR template - Link any related issues +> **Important:** All PRs should target the `dev` branch, not `main`. The `main` branch is reserved for stable releases. + 4. **PR Description:** - Describe what changed and why - Include screenshots for UI changes diff --git a/ROADMAP.md b/ROADMAP.md index 3f28a02..f91f8e4 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -151,18 +151,18 @@ This document outlines the development roadmap for ServerKit. Features are organ --- -## Phase 12: Backup & Restore (Planned) +## Phase 12: Backup & Restore (Completed) **Priority: High** -- [ ] Automated database backups -- [ ] File/directory backups -- [ ] S3-compatible storage support -- [ ] Backblaze B2 integration -- [ ] Backup scheduling -- [ ] One-click restore -- [ ] Backup retention policies -- [ ] Offsite backup verification +- [x] Automated database backups +- [x] File/directory backups +- [x] S3-compatible storage support +- [x] Backblaze B2 integration +- [x] Backup scheduling +- [x] One-click restore +- [x] Backup retention policies +- [x] Offsite backup verification --- diff --git a/backend/.bandit b/backend/.bandit index 7edcb96..104abf0 100644 --- a/backend/.bandit +++ b/backend/.bandit @@ -3,7 +3,17 @@ exclude_dirs = tests,venv,.venv # Skip checks that are expected noise for a server management tool: +# B108: hardcoded_tmp_directory — intentional temp dirs for sync operations +# B110: try_except_pass — cleanup/daemon patterns throughout codebase +# B310: urllib_urlopen — internal diagnostic and update-check calls +# B321: ftplib — FTP service exists by design +# B402: import_ftplib — same as above # B404: import_subprocess — we intentionally use subprocess throughout +# B602: subprocess_popen_with_shell_equals_true — build/cron/deploy hooks require shell # B603: subprocess_without_shell_check — we use subprocess.run deliberately # B607: start_process_with_partial_path — expected for system commands -skips = B404,B603,B607 +# B608: hardcoded_sql_expressions — admin-only DB service with internal calls, not user input +# B105: hardcoded_password_string — false positives on empty-string defaults and mask values +# B112: try_except_continue — same pattern as B110, used in iteration/cleanup loops +# B311: random — used for non-security port selection, not crypto +skips = B105,B108,B110,B112,B310,B311,B321,B402,B404,B602,B603,B607,B608 diff --git a/backend/.gitignore b/backend/.gitignore index 8de61ab..d8bc48e 100644 --- a/backend/.gitignore +++ b/backend/.gitignore @@ -5,8 +5,15 @@ __pycache__/ *.so .Python venv/ +.venv/ env/ ENV/ +.env.local +.env.*.local +*.key +*.pem +*.crt +*.csr # Database *.db diff --git a/backend/app/api/backups.py b/backend/app/api/backups.py index b6ea26b..441b8a1 100644 --- a/backend/app/api/backups.py +++ b/backend/app/api/backups.py @@ -3,6 +3,7 @@ from flask_jwt_extended import jwt_required, get_jwt_identity from app.models import User, Application from app.services.backup_service import BackupService +from app.services.storage_provider_service import StorageProviderService from app import paths backups_bp = Blueprint('backups', __name__) @@ -24,6 +25,7 @@ def wrapper(*args, **kwargs): @backups_bp.route('', methods=['GET']) @jwt_required() +@admin_required def list_backups(): """List all backups.""" backup_type = request.args.get('type') @@ -33,6 +35,7 @@ def list_backups(): @backups_bp.route('/stats', methods=['GET']) @jwt_required() +@admin_required def get_stats(): """Get backup statistics.""" stats = BackupService.get_backup_stats() @@ -117,6 +120,28 @@ def backup_database(): return jsonify(result), 201 if result['success'] else 400 +@backups_bp.route('/files', methods=['POST']) +@jwt_required() +@admin_required +def backup_files(): + """Backup specific files and directories.""" + data = request.get_json() + + if not data: + return jsonify({'error': 'No data provided'}), 400 + + file_paths = data.get('paths', []) + if not file_paths: + return jsonify({'error': 'paths is required (list of file/directory paths)'}), 400 + + result = BackupService.backup_files( + file_paths=file_paths, + backup_name=data.get('name') + ) + + return jsonify(result), 201 if result['success'] else 400 + + @backups_bp.route('/restore/application', methods=['POST']) @jwt_required() @admin_required @@ -168,7 +193,9 @@ def restore_database(): def delete_backup(backup_path): """Delete a backup.""" # Ensure path is within backup directory - full_path = os.path.join(paths.SERVERKIT_BACKUP_DIR, backup_path) + full_path = os.path.realpath(os.path.join(paths.SERVERKIT_BACKUP_DIR, backup_path)) + if not full_path.startswith(os.path.realpath(paths.SERVERKIT_BACKUP_DIR)): + return jsonify({'error': 'Invalid backup path'}), 400 result = BackupService.delete_backup(full_path) return jsonify(result), 200 if result['success'] else 400 @@ -185,7 +212,8 @@ def cleanup_backups(): return jsonify(result), 200 if result['success'] else 400 -# Schedules +# --- Schedules --- + @backups_bp.route('/schedules', methods=['GET']) @jwt_required() @admin_required @@ -215,12 +243,26 @@ def add_schedule(): backup_type=data['backup_type'], target=data['target'], schedule_time=data['schedule_time'], - days=data.get('days') + days=data.get('days'), + upload_remote=data.get('upload_remote', False) ) return jsonify(result), 201 if result['success'] else 400 +@backups_bp.route('/schedules/', methods=['PUT']) +@jwt_required() +@admin_required +def update_schedule(schedule_id): + """Update a backup schedule.""" + data = request.get_json() + if not data: + return jsonify({'error': 'No data provided'}), 400 + + result = BackupService.update_schedule(schedule_id, data) + return jsonify(result), 200 if result.get('success') else 400 + + @backups_bp.route('/schedules/', methods=['DELETE']) @jwt_required() @admin_required @@ -228,3 +270,123 @@ def remove_schedule(schedule_id): """Remove a backup schedule.""" result = BackupService.remove_schedule(schedule_id) return jsonify(result), 200 if result['success'] else 400 + + +# --- Remote Storage --- + +@backups_bp.route('/storage', methods=['GET']) +@jwt_required() +@admin_required +def get_storage_config(): + """Get storage provider configuration (secrets masked).""" + config = StorageProviderService.get_config_masked() + return jsonify(config), 200 + + +@backups_bp.route('/storage', methods=['PUT']) +@jwt_required() +@admin_required +def update_storage_config(): + """Update storage provider configuration.""" + data = request.get_json() + if not data: + return jsonify({'error': 'No data provided'}), 400 + + result = StorageProviderService.save_config(data) + return jsonify(result), 200 if result['success'] else 400 + + +@backups_bp.route('/storage/test', methods=['POST']) +@jwt_required() +@admin_required +def test_storage_connection(): + """Test connection to storage provider.""" + data = request.get_json() + # Use provided config for testing, or current saved config + config = data if data else None + result = StorageProviderService.test_connection(config) + return jsonify(result), 200 if result['success'] else 400 + + +@backups_bp.route('/upload', methods=['POST']) +@jwt_required() +@admin_required +def upload_to_remote(): + """Upload a local backup to remote storage.""" + data = request.get_json() + if not data or 'backup_path' not in data: + return jsonify({'error': 'backup_path is required'}), 400 + + backup_path = os.path.realpath(data['backup_path']) + + if not backup_path.startswith(os.path.realpath(paths.SERVERKIT_BACKUP_DIR)): + return jsonify({'error': 'Invalid backup path'}), 400 + + if os.path.isdir(backup_path): + result = StorageProviderService.upload_directory(backup_path) + elif os.path.isfile(backup_path): + result = StorageProviderService.upload_file(backup_path) + else: + return jsonify({'error': 'Backup not found'}), 404 + + return jsonify(result), 200 if result['success'] else 400 + + +@backups_bp.route('/verify', methods=['POST']) +@jwt_required() +@admin_required +def verify_remote_backup(): + """Verify a backup exists and matches on remote storage.""" + data = request.get_json() + if not data: + return jsonify({'error': 'No data provided'}), 400 + + remote_key = data.get('remote_key') + local_path = data.get('local_path') + + if not remote_key or not local_path: + return jsonify({'error': 'remote_key and local_path are required'}), 400 + + local_path = os.path.realpath(local_path) + if not local_path.startswith(os.path.realpath(paths.SERVERKIT_BACKUP_DIR)): + return jsonify({'error': 'local_path must be within backup directory'}), 400 + + result = StorageProviderService.verify_file(remote_key, local_path) + return jsonify(result), 200 + + +@backups_bp.route('/remote', methods=['GET']) +@jwt_required() +@admin_required +def list_remote_backups(): + """List backups on remote storage.""" + prefix = request.args.get('prefix') + result = StorageProviderService.list_files(prefix) + return jsonify(result), 200 if result['success'] else 400 + + +@backups_bp.route('/remote/download', methods=['POST']) +@jwt_required() +@admin_required +def download_from_remote(): + """Download a backup from remote storage to local.""" + data = request.get_json() + if not data or 'remote_key' not in data: + return jsonify({'error': 'remote_key is required'}), 400 + + remote_key = data['remote_key'] + # Determine local path from remote key + local_path = data.get('local_path') + if not local_path: + # Strip prefix and save to backup dir + key_parts = remote_key.split('/') + # Remove the prefix path component + filename = '/'.join(key_parts[1:]) if len(key_parts) > 1 else key_parts[0] + local_path = os.path.join(paths.SERVERKIT_BACKUP_DIR, filename) + + local_path = os.path.realpath(local_path) + if not local_path.startswith(os.path.realpath(paths.SERVERKIT_BACKUP_DIR)): + return jsonify({'error': 'Download path must be within backup directory'}), 400 + + result = StorageProviderService.download_file(remote_key, local_path) + return jsonify(result), 200 if result['success'] else 400 diff --git a/backend/app/services/backup_service.py b/backend/app/services/backup_service.py index a96542b..5104c9d 100644 --- a/backend/app/services/backup_service.py +++ b/backend/app/services/backup_service.py @@ -9,6 +9,7 @@ from pathlib import Path import threading import time +import uuid import schedule from app import paths @@ -25,6 +26,7 @@ class BackupService: TYPE_APP = 'application' TYPE_DATABASE = 'database' TYPE_FULL = 'full' + TYPE_FILES = 'files' _scheduler_thread = None _stop_scheduler = False @@ -39,7 +41,7 @@ def get_backup_dir(cls, backup_type: str = None) -> str: @classmethod def ensure_backup_dirs(cls) -> None: """Ensure backup directories exist.""" - for subdir in ['applications', 'databases', 'full', 'scheduled']: + for subdir in ['applications', 'databases', 'full', 'scheduled', 'files']: path = os.path.join(cls.BACKUP_BASE_DIR, subdir) os.makedirs(path, exist_ok=True) @@ -99,7 +101,8 @@ def backup_application(cls, app_name: str, app_path: str, 'timestamp': datetime.now().isoformat(), 'type': cls.TYPE_APP, 'files_backup': files_backup, - 'size': os.path.getsize(files_backup) + 'size': os.path.getsize(files_backup), + 'remote_status': 'local' } # Backup database if requested @@ -119,6 +122,9 @@ def backup_application(cls, app_name: str, app_path: str, with open(meta_path, 'w') as f: json.dump(backup_info, f, indent=2) + # Auto-upload to remote if configured + cls._auto_upload(backup_dir, backup_info) + return { 'success': True, 'backup': backup_info, @@ -202,21 +208,84 @@ def backup_database(cls, db_type: str, db_name: str, if result.get('success'): # Rename to final path os.rename(result['path'], backup_path) + + backup_info = { + 'name': backup_name, + 'path': backup_path, + 'timestamp': datetime.now().isoformat(), + 'type': cls.TYPE_DATABASE, + 'database_type': db_type, + 'database_name': db_name, + 'size': os.path.getsize(backup_path), + 'remote_status': 'local' + } + + # Auto-upload to remote if configured + cls._auto_upload(backup_path, backup_info) + return { 'success': True, - 'backup': { - 'name': backup_name, - 'path': backup_path, - 'timestamp': datetime.now().isoformat(), - 'type': cls.TYPE_DATABASE, - 'database_type': db_type, - 'database_name': db_name, - 'size': os.path.getsize(backup_path) - } + 'backup': backup_info } return result + @classmethod + def backup_files(cls, file_paths: List[str], backup_name: str = None) -> Dict: + """Backup specific files and directories.""" + cls.ensure_backup_dirs() + + # Validate paths + valid_paths = [] + for p in file_paths: + if os.path.exists(p): + valid_paths.append(p) + + if not valid_paths: + return {'success': False, 'error': 'No valid file paths provided'} + + timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') + if not backup_name: + backup_name = f"files_{timestamp}" + else: + backup_name = f"{backup_name}_{timestamp}" + + backup_file = os.path.join(cls.BACKUP_BASE_DIR, 'files', f'{backup_name}.tar.gz') + + try: + with tarfile.open(backup_file, 'w:gz') as tar: + for p in valid_paths: + tar.add(p, arcname=os.path.basename(p)) + + backup_info = { + 'name': f'{backup_name}.tar.gz', + 'path': backup_file, + 'timestamp': datetime.now().isoformat(), + 'type': cls.TYPE_FILES, + 'source_paths': valid_paths, + 'size': os.path.getsize(backup_file), + 'remote_status': 'local' + } + + # Save metadata alongside the archive + meta_path = os.path.join(cls.BACKUP_BASE_DIR, 'files', f'{backup_name}.json') + with open(meta_path, 'w') as f: + json.dump(backup_info, f, indent=2) + + # Auto-upload to remote if configured + cls._auto_upload(backup_file, backup_info) + + return { + 'success': True, + 'backup': backup_info, + 'path': backup_file + } + + except Exception as e: + if os.path.exists(backup_file): + os.remove(backup_file) + return {'success': False, 'error': str(e)} + @classmethod def restore_application(cls, backup_path: str, restore_path: str = None) -> Dict: """Restore an application from backup.""" @@ -243,9 +312,9 @@ def restore_application(cls, backup_path: str, restore_path: str = None) -> Dict backup_existing = f"{restore_path}.backup_{timestamp}" shutil.move(restore_path, backup_existing) - # Extract backup + # Extract backup (filter='data' blocks path traversal in archives) with tarfile.open(files_backup, 'r:gz') as tar: - tar.extractall(os.path.dirname(restore_path)) + tar.extractall(os.path.dirname(restore_path), filter='data') return { 'success': True, @@ -315,10 +384,13 @@ def list_backups(cls, backup_type: str = None) -> List[Dict]: search_dirs = [os.path.join(cls.BACKUP_BASE_DIR, 'applications')] elif backup_type == 'database': search_dirs = [os.path.join(cls.BACKUP_BASE_DIR, 'databases')] + elif backup_type == 'files': + search_dirs = [os.path.join(cls.BACKUP_BASE_DIR, 'files')] else: search_dirs = [ os.path.join(cls.BACKUP_BASE_DIR, 'applications'), os.path.join(cls.BACKUP_BASE_DIR, 'databases'), + os.path.join(cls.BACKUP_BASE_DIR, 'files'), os.path.join(cls.BACKUP_BASE_DIR, 'scheduled') ] @@ -348,8 +420,31 @@ def list_backups(cls, backup_type: str = None) -> List[Dict]: 'path': item_path, 'type': cls.TYPE_DATABASE, 'size': stat.st_size, - 'timestamp': datetime.fromtimestamp(stat.st_mtime).isoformat() + 'timestamp': datetime.fromtimestamp(stat.st_mtime).isoformat(), + 'remote_status': 'local' }) + elif item.endswith('.tar.gz') and search_dir.endswith('files'): + # File backup - check for metadata + meta_name = item.replace('.tar.gz', '.json') + meta_path = os.path.join(search_dir, meta_name) + if os.path.exists(meta_path): + try: + with open(meta_path, 'r') as f: + backup_info = json.load(f) + backup_info['path'] = item_path + backups.append(backup_info) + except Exception: + pass + else: + stat = os.stat(item_path) + backups.append({ + 'name': item, + 'path': item_path, + 'type': cls.TYPE_FILES, + 'size': stat.st_size, + 'timestamp': datetime.fromtimestamp(stat.st_mtime).isoformat(), + 'remote_status': 'local' + }) # Sort by timestamp (newest first) backups.sort(key=lambda x: x.get('timestamp', ''), reverse=True) @@ -359,7 +454,8 @@ def list_backups(cls, backup_type: str = None) -> List[Dict]: @classmethod def delete_backup(cls, backup_path: str) -> Dict: """Delete a backup.""" - if not backup_path.startswith(cls.BACKUP_BASE_DIR): + backup_path = os.path.realpath(backup_path) + if not backup_path.startswith(os.path.realpath(cls.BACKUP_BASE_DIR)): return {'success': False, 'error': 'Invalid backup path'} try: @@ -367,6 +463,11 @@ def delete_backup(cls, backup_path: str) -> Dict: shutil.rmtree(backup_path) elif os.path.exists(backup_path): os.remove(backup_path) + # Also remove metadata file for file backups + if backup_path.endswith('.tar.gz'): + meta_path = backup_path.replace('.tar.gz', '.json') + if os.path.exists(meta_path): + os.remove(meta_path) else: return {'success': False, 'error': 'Backup not found'} @@ -407,19 +508,22 @@ def cleanup_old_backups(cls, retention_days: int = None) -> Dict: @classmethod def add_schedule(cls, name: str, backup_type: str, target: str, - schedule_time: str, days: List[str] = None) -> Dict: + schedule_time: str, days: List[str] = None, + upload_remote: bool = False) -> Dict: """Add a backup schedule.""" config = cls.get_config() schedule_entry = { - 'id': datetime.now().strftime('%Y%m%d%H%M%S'), + 'id': uuid.uuid4().hex[:12], 'name': name, 'backup_type': backup_type, 'target': target, 'schedule_time': schedule_time, 'days': days or ['daily'], 'enabled': True, - 'last_run': None + 'upload_remote': upload_remote, + 'last_run': None, + 'last_status': None } config.setdefault('schedules', []).append(schedule_entry) @@ -429,6 +533,24 @@ def add_schedule(cls, name: str, backup_type: str, target: str, return {'success': True, 'schedule': schedule_entry} return result + @classmethod + def update_schedule(cls, schedule_id: str, updates: Dict) -> Dict: + """Update a backup schedule.""" + config = cls.get_config() + schedules = config.get('schedules', []) + + for i, s in enumerate(schedules): + if s.get('id') == schedule_id: + allowed_fields = ['name', 'backup_type', 'target', 'schedule_time', + 'days', 'enabled', 'upload_remote'] + for field in allowed_fields: + if field in updates: + schedules[i][field] = updates[field] + config['schedules'] = schedules + return cls.save_config(config) + + return {'success': False, 'error': 'Schedule not found'} + @classmethod def remove_schedule(cls, schedule_id: str) -> Dict: """Remove a backup schedule.""" @@ -457,15 +579,225 @@ def get_backup_stats(cls) -> Dict: total_size = sum(b.get('size', 0) for b in backups) app_backups = [b for b in backups if b.get('type') == cls.TYPE_APP] db_backups = [b for b in backups if b.get('type') == cls.TYPE_DATABASE] + file_backups = [b for b in backups if b.get('type') == cls.TYPE_FILES] + + # Get remote stats + remote_stats = {'remote_count': 0, 'remote_size': 0, 'remote_size_human': '0 B'} + try: + from app.services.storage_provider_service import StorageProviderService + storage_config = StorageProviderService.get_config() + if storage_config.get('provider', 'local') != 'local': + remote_stats = StorageProviderService.get_remote_stats() + except Exception: + pass return { 'total_backups': len(backups), 'application_backups': len(app_backups), 'database_backups': len(db_backups), + 'file_backups': len(file_backups), 'total_size': total_size, - 'total_size_human': cls._format_size(total_size) + 'total_size_human': cls._format_size(total_size), + 'remote_count': remote_stats.get('remote_count', 0), + 'remote_size': remote_stats.get('remote_size', 0), + 'remote_size_human': remote_stats.get('remote_size_human', '0 B') } + @classmethod + def _auto_upload(cls, backup_path: str, backup_info: Dict) -> None: + """Auto-upload backup to remote storage if configured.""" + try: + from app.services.storage_provider_service import StorageProviderService + storage_config = StorageProviderService.get_config() + + if storage_config.get('provider', 'local') == 'local': + return + if not storage_config.get('auto_upload', False): + return + + if os.path.isdir(backup_path): + result = StorageProviderService.upload_directory(backup_path) + else: + result = StorageProviderService.upload_file(backup_path) + + if result.get('success'): + backup_info['remote_status'] = 'synced' + backup_info['remote_key'] = result.get('remote_key', '') + # Update metadata if it's a directory backup + meta_path = os.path.join(backup_path, 'backup.json') if os.path.isdir(backup_path) else None + if meta_path and os.path.exists(meta_path): + with open(meta_path, 'w') as f: + json.dump(backup_info, f, indent=2) + except Exception: + pass + + # --- Scheduler --- + + @classmethod + def start_scheduler(cls) -> None: + """Start the backup scheduler background thread.""" + if cls._scheduler_thread and cls._scheduler_thread.is_alive(): + return + + cls._stop_scheduler = False + cls._scheduler_thread = threading.Thread( + target=cls._scheduler_loop, + daemon=True, + name='backup-scheduler' + ) + cls._scheduler_thread.start() + + @classmethod + def stop_scheduler(cls) -> None: + """Stop the backup scheduler.""" + cls._stop_scheduler = True + if cls._scheduler_thread: + cls._scheduler_thread.join(timeout=5) + cls._scheduler_thread = None + + @classmethod + def _scheduler_loop(cls) -> None: + """Background loop that checks and runs scheduled backups.""" + while not cls._stop_scheduler: + try: + config = cls.get_config() + if config.get('enabled', False): + now = datetime.now() + current_time = now.strftime('%H:%M') + current_day = now.strftime('%A').lower() + + for sched in config.get('schedules', []): + if not sched.get('enabled', False): + continue + + # Check if it's time to run + if sched.get('schedule_time') != current_time: + continue + + # Check day + days = sched.get('days', ['daily']) + if 'daily' not in days and current_day not in days: + continue + + # Check if already ran this minute + last_run = sched.get('last_run') + if last_run: + try: + last_run_time = datetime.fromisoformat(last_run) + if (now - last_run_time).total_seconds() < 120: + continue + except Exception: + pass + + # Run the backup + cls._run_scheduled_backup(sched) + + # Run retention cleanup once daily at midnight + if current_time == '00:00': + cls.cleanup_old_backups() + + except Exception: + pass + + # Check every 30 seconds + for _ in range(30): + if cls._stop_scheduler: + return + time.sleep(1) + + @classmethod + def _run_scheduled_backup(cls, sched: Dict) -> None: + """Execute a single scheduled backup.""" + backup_type = sched.get('backup_type', 'database') + target = sched.get('target', '') + result = None + + try: + if backup_type == 'database': + # Parse target as db_type:db_name or just db_name + parts = target.split(':') + if len(parts) == 2: + db_type, db_name = parts + else: + db_type, db_name = 'mysql', target + result = cls.backup_database(db_type, db_name) + + elif backup_type == 'application': + from app.models import Application + app = Application.query.filter_by(name=target).first() + if app: + result = cls.backup_application(app.name, app.root_path) + else: + result = {'success': False, 'error': f'Application "{target}" not found'} + + elif backup_type == 'files': + paths_list = [p.strip() for p in target.split(',') if p.strip()] + result = cls.backup_files(paths_list, backup_name=f"scheduled_{sched.get('name', 'backup')}") + + # Upload to remote if configured on this schedule + if result and result.get('success') and sched.get('upload_remote', False): + try: + from app.services.storage_provider_service import StorageProviderService + backup_path = result.get('path') or result.get('backup', {}).get('path') + if backup_path: + if os.path.isdir(backup_path): + StorageProviderService.upload_directory(backup_path) + else: + StorageProviderService.upload_file(backup_path) + except Exception: + pass + + # Update schedule status + config = cls.get_config() + for s in config.get('schedules', []): + if s.get('id') == sched.get('id'): + s['last_run'] = datetime.now().isoformat() + s['last_status'] = 'success' if result and result.get('success') else 'failed' + break + cls.save_config(config) + + # Send notification on failure + if result and not result.get('success'): + cls._send_backup_notification( + sched.get('name', 'Backup'), + False, + result.get('error', 'Unknown error') + ) + + except Exception as e: + # Update schedule status on exception + config = cls.get_config() + for s in config.get('schedules', []): + if s.get('id') == sched.get('id'): + s['last_run'] = datetime.now().isoformat() + s['last_status'] = 'failed' + break + cls.save_config(config) + cls._send_backup_notification(sched.get('name', 'Backup'), False, str(e)) + + @classmethod + def _send_backup_notification(cls, backup_name: str, success: bool, message: str) -> None: + """Send a notification about backup status.""" + try: + from app.services.notification_service import NotificationService + config = cls.get_config() + notifications = config.get('notifications', {}) + + if success and not notifications.get('on_success', False): + return + if not success and not notifications.get('on_failure', True): + return + + severity = 'success' if success else 'critical' + status = 'completed successfully' if success else 'failed' + NotificationService.send_all( + title=f'Backup {status}: {backup_name}', + message=message, + severity=severity + ) + except Exception: + pass + @staticmethod def _format_size(size: int) -> str: """Format size in human readable format.""" diff --git a/backend/app/services/storage_provider_service.py b/backend/app/services/storage_provider_service.py new file mode 100644 index 0000000..2942c00 --- /dev/null +++ b/backend/app/services/storage_provider_service.py @@ -0,0 +1,410 @@ +import os +import json +import hashlib +import ipaddress +from datetime import datetime +from typing import Dict, List, Optional +from urllib.parse import urlparse + +from app import paths + + +class StorageProviderService: + """Service for managing remote backup storage (S3-compatible, Backblaze B2).""" + + CONFIG_FILE = os.path.join(paths.SERVERKIT_CONFIG_DIR, 'storage.json') + + @classmethod + def get_config(cls) -> Dict: + """Get storage provider configuration.""" + if os.path.exists(cls.CONFIG_FILE): + try: + with open(cls.CONFIG_FILE, 'r') as f: + return json.load(f) + except Exception: + pass + + return { + 'provider': 'local', + 's3': { + 'bucket': '', + 'region': 'us-east-1', + 'access_key': '', + 'secret_key': '', + 'endpoint_url': '', + 'path_prefix': 'serverkit-backups' + }, + 'b2': { + 'bucket': '', + 'key_id': '', + 'application_key': '', + 'endpoint_url': '', + 'path_prefix': 'serverkit-backups' + }, + 'auto_upload': False, + 'keep_local_copy': True + } + + @classmethod + def get_config_masked(cls) -> Dict: + """Get storage config with secrets masked.""" + config = cls.get_config() + masked = json.loads(json.dumps(config)) + + secret_fields = { + 's3': ['access_key', 'secret_key'], + 'b2': ['key_id', 'application_key'] + } + + for provider, fields in secret_fields.items(): + if provider in masked: + for field in fields: + val = masked[provider].get(field, '') + if val and len(val) > 4: + masked[provider][field] = val[:4] + '*' * (len(val) - 4) + + return masked + + @classmethod + def save_config(cls, config: Dict) -> Dict: + """Save storage provider configuration.""" + try: + os.makedirs(paths.SERVERKIT_CONFIG_DIR, exist_ok=True) + + # Merge with existing config to preserve unmasked secrets + existing = cls.get_config() + secret_fields = { + 's3': ['access_key', 'secret_key'], + 'b2': ['key_id', 'application_key'] + } + + for provider, fields in secret_fields.items(): + if provider in config: + for field in fields: + new_val = config[provider].get(field, '') + # Detect our masking pattern: 4 visible chars followed by only asterisks + if new_val and len(new_val) > 4 and new_val[4:] == '*' * (len(new_val) - 4): + # Keep existing value if masked + config[provider][field] = existing.get(provider, {}).get(field, '') + + with open(cls.CONFIG_FILE, 'w') as f: + json.dump(config, f, indent=2) + if os.name != 'nt': + os.chmod(cls.CONFIG_FILE, 0o600) + return {'success': True, 'message': 'Storage configuration saved'} + except Exception as e: + return {'success': False, 'error': str(e)} + + @classmethod + def _validate_endpoint_url(cls, url: str) -> None: + """Validate an S3 endpoint URL to prevent SSRF attacks. + + Blocks private/internal IP ranges and requires http(s) scheme. + Raises ValueError if the URL is invalid or targets a private network. + """ + if not url: + return + + parsed = urlparse(url) + + if parsed.scheme not in ('http', 'https'): + raise ValueError(f"Endpoint URL must use http or https scheme, got '{parsed.scheme}'") + + hostname = parsed.hostname + if not hostname: + raise ValueError("Endpoint URL has no hostname") + + try: + addr = ipaddress.ip_address(hostname) + if addr.is_private or addr.is_loopback or addr.is_link_local or addr.is_reserved: + raise ValueError(f"Endpoint URL must not target private/internal IP: {hostname}") + except ValueError as e: + # If it's our own raised ValueError, re-raise + if "must not target" in str(e) or "must use" in str(e): + raise + # Otherwise it's not a valid IP — it's a hostname, which is allowed + + @classmethod + def _get_client(cls, config: Dict = None): + """Get boto3 S3 client based on config.""" + import boto3 + + if config is None: + config = cls.get_config() + + provider = config.get('provider', 'local') + if provider == 'local': + return None, None, None + + if provider == 's3': + provider_config = config.get('s3', {}) + endpoint_url = provider_config.get('endpoint_url') or None + if endpoint_url: + cls._validate_endpoint_url(endpoint_url) + client = boto3.client( + 's3', + region_name=provider_config.get('region', 'us-east-1'), + aws_access_key_id=provider_config.get('access_key'), + aws_secret_access_key=provider_config.get('secret_key'), + endpoint_url=endpoint_url + ) + bucket = provider_config.get('bucket', '') + prefix = provider_config.get('path_prefix', 'serverkit-backups') + + elif provider == 'b2': + provider_config = config.get('b2', {}) + endpoint_url = provider_config.get('endpoint_url') or None + if endpoint_url: + cls._validate_endpoint_url(endpoint_url) + client = boto3.client( + 's3', + endpoint_url=endpoint_url, + aws_access_key_id=provider_config.get('key_id'), + aws_secret_access_key=provider_config.get('application_key') + ) + bucket = provider_config.get('bucket', '') + prefix = provider_config.get('path_prefix', 'serverkit-backups') + + else: + return None, None, None + + return client, bucket, prefix + + @classmethod + def test_connection(cls, config: Dict = None) -> Dict: + """Test connection to storage provider.""" + try: + client, bucket, prefix = cls._get_client(config) + if client is None: + return {'success': False, 'error': 'No remote provider configured'} + + # Try to list objects (limited to 1) to verify access + client.list_objects_v2(Bucket=bucket, Prefix=prefix, MaxKeys=1) + + return { + 'success': True, + 'message': f'Connected to bucket "{bucket}" successfully' + } + except Exception as e: + error_msg = str(e) + if 'NoSuchBucket' in error_msg: + return {'success': False, 'error': f'Bucket "{bucket}" does not exist'} + if 'AccessDenied' in error_msg or 'InvalidAccessKeyId' in error_msg: + return {'success': False, 'error': 'Access denied - check your credentials'} + return {'success': False, 'error': error_msg} + + @classmethod + def upload_file(cls, local_path: str, remote_key: str = None) -> Dict: + """Upload a file to remote storage.""" + try: + client, bucket, prefix = cls._get_client() + if client is None: + return {'success': False, 'error': 'No remote provider configured'} + + if not os.path.exists(local_path): + return {'success': False, 'error': f'Local file not found: {local_path}'} + + if remote_key is None: + # Use relative path from backup dir as key + remote_key = os.path.relpath(local_path, paths.SERVERKIT_BACKUP_DIR) + + full_key = f"{prefix}/{remote_key}" if prefix else remote_key + + file_size = os.path.getsize(local_path) + + # Use multipart upload for files > 100MB + from boto3.s3.transfer import TransferConfig + transfer_config = TransferConfig( + multipart_threshold=100 * 1024 * 1024, + multipart_chunksize=50 * 1024 * 1024 + ) + + client.upload_file( + local_path, bucket, full_key, + Config=transfer_config + ) + + return { + 'success': True, + 'message': f'Uploaded to {full_key}', + 'remote_key': full_key, + 'size': file_size + } + except Exception as e: + return {'success': False, 'error': str(e)} + + @classmethod + def upload_directory(cls, local_dir: str, remote_prefix: str = None) -> Dict: + """Upload all files in a directory to remote storage.""" + try: + client, bucket, prefix = cls._get_client() + if client is None: + return {'success': False, 'error': 'No remote provider configured'} + + if not os.path.isdir(local_dir): + return {'success': False, 'error': f'Directory not found: {local_dir}'} + + if remote_prefix is None: + remote_prefix = os.path.relpath(local_dir, paths.SERVERKIT_BACKUP_DIR) + + uploaded = 0 + total_size = 0 + + for root, dirs, files in os.walk(local_dir, followlinks=False): + for filename in files: + local_path = os.path.join(root, filename) + if os.path.islink(local_path): + continue + rel_path = os.path.relpath(local_path, local_dir) + full_key = f"{prefix}/{remote_prefix}/{rel_path}" if prefix else f"{remote_prefix}/{rel_path}" + + client.upload_file(local_path, bucket, full_key) + uploaded += 1 + total_size += os.path.getsize(local_path) + + return { + 'success': True, + 'message': f'Uploaded {uploaded} file(s)', + 'files_uploaded': uploaded, + 'total_size': total_size + } + except Exception as e: + return {'success': False, 'error': str(e)} + + @classmethod + def download_file(cls, remote_key: str, local_path: str) -> Dict: + """Download a file from remote storage.""" + try: + client, bucket, prefix = cls._get_client() + if client is None: + return {'success': False, 'error': 'No remote provider configured'} + + os.makedirs(os.path.dirname(local_path), exist_ok=True) + + client.download_file(bucket, remote_key, local_path) + + return { + 'success': True, + 'message': f'Downloaded to {local_path}', + 'local_path': local_path, + 'size': os.path.getsize(local_path) + } + except Exception as e: + return {'success': False, 'error': str(e)} + + @classmethod + def delete_file(cls, remote_key: str) -> Dict: + """Delete a file from remote storage.""" + try: + client, bucket, prefix = cls._get_client() + if client is None: + return {'success': False, 'error': 'No remote provider configured'} + + client.delete_object(Bucket=bucket, Key=remote_key) + + return {'success': True, 'message': f'Deleted {remote_key}'} + except Exception as e: + return {'success': False, 'error': str(e)} + + @classmethod + def list_files(cls, prefix_filter: str = None) -> Dict: + """List files in remote storage.""" + try: + client, bucket, prefix = cls._get_client() + if client is None: + return {'success': False, 'error': 'No remote provider configured'} + + search_prefix = prefix or '' + if prefix_filter: + search_prefix = f"{search_prefix}/{prefix_filter}" if search_prefix else prefix_filter + + files = [] + paginator = client.get_paginator('list_objects_v2') + + for page in paginator.paginate(Bucket=bucket, Prefix=search_prefix): + for obj in page.get('Contents', []): + files.append({ + 'key': obj['Key'], + 'size': obj['Size'], + 'last_modified': obj['LastModified'].isoformat(), + 'etag': obj.get('ETag', '').strip('"') + }) + + return { + 'success': True, + 'files': files, + 'total_count': len(files), + 'total_size': sum(f['size'] for f in files) + } + except Exception as e: + return {'success': False, 'error': str(e)} + + @classmethod + def verify_file(cls, remote_key: str, local_path: str) -> Dict: + """Verify a remote file matches the local file (size + MD5).""" + try: + client, bucket, prefix = cls._get_client() + if client is None: + return {'success': False, 'error': 'No remote provider configured'} + + if not os.path.exists(local_path): + return {'success': False, 'error': 'Local file not found', 'verified': False} + + # Get remote file metadata + response = client.head_object(Bucket=bucket, Key=remote_key) + remote_size = response['ContentLength'] + remote_etag = response.get('ETag', '').strip('"') + + # Compare size + local_size = os.path.getsize(local_path) + size_match = remote_size == local_size + + # Compute local MD5 for simple files (non-multipart) + md5_match = None + if '-' not in remote_etag: + md5 = hashlib.md5(usedforsecurity=False) + with open(local_path, 'rb') as f: + for chunk in iter(lambda: f.read(8192), b''): + md5.update(chunk) + local_md5 = md5.hexdigest() + md5_match = local_md5 == remote_etag + + verified = size_match and (md5_match is None or md5_match) + + return { + 'success': True, + 'verified': verified, + 'local_size': local_size, + 'remote_size': remote_size, + 'size_match': size_match, + 'md5_match': md5_match + } + except Exception as e: + return {'success': False, 'error': str(e), 'verified': False} + + @classmethod + def get_remote_stats(cls) -> Dict: + """Get statistics about remote storage usage.""" + result = cls.list_files() + if not result.get('success'): + return { + 'remote_count': 0, + 'remote_size': 0, + 'remote_size_human': '0 B' + } + + total_size = result.get('total_size', 0) + return { + 'remote_count': result.get('total_count', 0), + 'remote_size': total_size, + 'remote_size_human': cls._format_size(total_size) + } + + @staticmethod + def _format_size(size: int) -> str: + """Format size in human readable format.""" + for unit in ['B', 'KB', 'MB', 'GB', 'TB']: + if size < 1024: + return f"{size:.1f} {unit}" + size /= 1024 + return f"{size:.1f} PB" diff --git a/backend/requirements.txt b/backend/requirements.txt index 9dd9c18..6eba1fa 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -50,4 +50,7 @@ pyotp==2.9.0 qrcode[pil]==7.4.2 # HTTP Requests (for webhooks & notifications) -requests==2.32.5 \ No newline at end of file +requests==2.32.5 + +# S3-compatible storage (AWS S3, Backblaze B2, MinIO, Wasabi) +boto3==1.35.0 \ No newline at end of file diff --git a/dev.ps1 b/dev.ps1 new file mode 100644 index 0000000..62b8a9d --- /dev/null +++ b/dev.ps1 @@ -0,0 +1,239 @@ +#Requires -Version 5.1 +<# +.SYNOPSIS + ServerKit development launcher and validation tool. +.DESCRIPTION + Start backend, frontend, or both. Run validation checks. +.PARAMETER Mode + Operation mode: start (default), backend, frontend, validate +.EXAMPLE + .\dev.ps1 # Start backend + frontend + .\dev.ps1 backend # Backend only + .\dev.ps1 frontend # Frontend only + .\dev.ps1 validate # Run all linters/checks +#> + +param( + [Parameter(Position = 0)] + [ValidateSet('start', 'backend', 'frontend', 'validate')] + [string]$Mode = 'start' +) + +$ProjectRoot = $PSScriptRoot +$BackendDir = Join-Path $ProjectRoot 'backend' +$FrontendDir = Join-Path $ProjectRoot 'frontend' + +function Write-Header { + param([string]$Text) + Write-Host "" + Write-Host "=== $Text ===" -ForegroundColor Cyan + Write-Host "" +} + +function Write-Pass { + param([string]$Text) + Write-Host " PASS " -ForegroundColor Green -NoNewline + Write-Host $Text +} + +function Write-Fail { + param([string]$Text) + Write-Host " FAIL " -ForegroundColor Red -NoNewline + Write-Host $Text +} + +function Start-Backend { + Write-Header "Starting Backend (http://localhost:5000)" + Push-Location $BackendDir + try { + if (Test-Path 'venv\Scripts\Activate.ps1') { + & 'venv\Scripts\Activate.ps1' + } + python run.py + } + finally { + Pop-Location + } +} + +function Start-Frontend { + Write-Header "Starting Frontend (http://localhost:5173)" + Push-Location $FrontendDir + try { + npm run dev + } + finally { + Pop-Location + } +} + +function Start-Both { + Write-Host "" + Write-Host "ServerKit Dev Server" -ForegroundColor Cyan + Write-Host " Backend: http://localhost:5000" + Write-Host " Frontend: http://localhost:5173" + Write-Host "" + + $backendJob = Start-Job -ScriptBlock { + param($dir) + Set-Location $dir + if (Test-Path 'venv\Scripts\Activate.ps1') { + & 'venv\Scripts\Activate.ps1' + } + python run.py + } -ArgumentList $BackendDir + + Start-Sleep -Seconds 2 + + $frontendJob = Start-Job -ScriptBlock { + param($dir) + Set-Location $dir + npm run dev + } -ArgumentList $FrontendDir + + try { + Write-Host "Press Ctrl+C to stop..." -ForegroundColor DarkGray + while ($true) { + # Stream output from both jobs + Receive-Job $backendJob -ErrorAction SilentlyContinue + Receive-Job $frontendJob -ErrorAction SilentlyContinue + + if ($backendJob.State -eq 'Failed') { + Write-Host "Backend crashed!" -ForegroundColor Red + Receive-Job $backendJob + break + } + if ($frontendJob.State -eq 'Failed') { + Write-Host "Frontend crashed!" -ForegroundColor Red + Receive-Job $frontendJob + break + } + Start-Sleep -Seconds 1 + } + } + finally { + Stop-Job $backendJob -ErrorAction SilentlyContinue + Stop-Job $frontendJob -ErrorAction SilentlyContinue + Remove-Job $backendJob -Force -ErrorAction SilentlyContinue + Remove-Job $frontendJob -Force -ErrorAction SilentlyContinue + Write-Host "`nStopped." -ForegroundColor Yellow + } +} + +function Invoke-Check { + param( + [string]$Name, + [string]$WorkDir, + [scriptblock]$Command + ) + Write-Host "Running $Name..." -ForegroundColor Yellow + $prev = $PWD + if ($WorkDir) { Set-Location $WorkDir } + $ErrorActionPreference = 'Continue' + & $Command 2>&1 | Tee-Object -Variable output | Out-Null + $exitCode = $LASTEXITCODE + if ($WorkDir) { Set-Location $prev } + + if ($exitCode -eq 0) { + Write-Pass $Name + return $true + } + else { + Write-Fail $Name + $output | ForEach-Object { Write-Host " $_" } + return $false + } +} + +function Run-Validate { + Write-Header "ServerKit Validation Suite" + $failed = 0 + $passed = 0 + + # --- ESLint (warn-only, does not block) --- + Write-Host "Running ESLint..." -ForegroundColor Yellow + Push-Location $FrontendDir + $ErrorActionPreference = 'Continue' + npm run lint 2>&1 | Out-Null + $eslintExit = $LASTEXITCODE + Pop-Location + if ($eslintExit -eq 0) { + Write-Pass "ESLint" + $passed++ + } + else { + Write-Host " WARN " -ForegroundColor Yellow -NoNewline + Write-Host "ESLint (has warnings/errors - run 'cd frontend && npm run lint' for details)" + $passed++ # count as pass — pre-existing issues should not block + } + + # --- Bandit --- + if (Get-Command bandit -ErrorAction SilentlyContinue) { + if (Invoke-Check "Bandit (security scan)" "" { bandit -r "$BackendDir\app" --ini "$BackendDir\.bandit" --severity-level medium }) { $passed++ } else { $failed++ } + } + else { + Write-Fail "Bandit (not installed - pip install bandit)" + $failed++ + } + + # --- Pytest --- + if (Test-Path "$BackendDir\venv\Scripts\Activate.ps1") { + & "$BackendDir\venv\Scripts\Activate.ps1" + } + if (Invoke-Check "Pytest" $BackendDir { pytest --tb=short -q }) { $passed++ } else { $failed++ } + + # --- Frontend build --- + if (Invoke-Check "Frontend build" $FrontendDir { npm run build }) { $passed++ } else { $failed++ } + + # --- Summary --- + Write-Header "Results" + Write-Host " Passed: $passed" -ForegroundColor Green + if ($failed -gt 0) { + Write-Host " Failed: $failed" -ForegroundColor Red + exit 1 + } + else { + Write-Host " All checks passed!" -ForegroundColor Green + } +} + +function Run-ValidateWatch { + Write-Host "Watching for changes... (Ctrl+C to stop)" -ForegroundColor DarkGray + Run-Validate + + $watcher = [System.IO.FileSystemWatcher]::new() + $watcher.Path = $ProjectRoot + $watcher.IncludeSubdirectories = $true + $watcher.Filter = '*.*' + $watcher.EnableRaisingEvents = $true + + $lastRun = [DateTime]::MinValue + + try { + while ($true) { + $result = $watcher.WaitForChanged([System.IO.WatcherChangeTypes]::All, 2000) + if (-not $result.TimedOut) { + $ext = [System.IO.Path]::GetExtension($result.Name) + if ($ext -in '.py', '.js', '.jsx', '.ts', '.tsx') { + $now = [DateTime]::Now + if (($now - $lastRun).TotalSeconds -gt 3) { + $lastRun = $now + Write-Host "`nChange detected: $($result.Name)" -ForegroundColor Yellow + Run-Validate + } + } + } + } + } + finally { + $watcher.Dispose() + } +} + +# --- Main --- +switch ($Mode) { + 'backend' { Start-Backend } + 'frontend' { Start-Frontend } + 'validate' { Run-ValidateWatch } + default { Start-Both } +} diff --git a/dev.sh b/dev.sh new file mode 100644 index 0000000..c157ffc --- /dev/null +++ b/dev.sh @@ -0,0 +1,178 @@ +#!/usr/bin/env bash +# ServerKit development launcher and validation tool. +# +# Usage: +# ./dev.sh Start backend + frontend (default) +# ./dev.sh backend Backend only +# ./dev.sh frontend Frontend only +# ./dev.sh validate Run all linters/checks + +set -euo pipefail + +PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +BACKEND_DIR="$PROJECT_ROOT/backend" +FRONTEND_DIR="$PROJECT_ROOT/frontend" + +# Colors +CYAN='\033[0;36m' +GREEN='\033[0;32m' +RED='\033[0;31m' +YELLOW='\033[0;33m' +DIM='\033[2m' +NC='\033[0m' + +header() { + echo "" + echo -e "${CYAN}=== $1 ===${NC}" + echo "" +} + +pass() { + echo -e " ${GREEN}PASS${NC} $1" +} + +fail() { + echo -e " ${RED}FAIL${NC} $1" +} + +start_backend() { + header "Starting Backend (http://localhost:5000)" + cd "$BACKEND_DIR" + if [ -f venv/bin/activate ]; then + source venv/bin/activate + fi + python run.py +} + +start_frontend() { + header "Starting Frontend (http://localhost:5173)" + cd "$FRONTEND_DIR" + npm run dev +} + +start_both() { + echo "" + echo -e "${CYAN}ServerKit Dev Server${NC}" + echo " Backend: http://localhost:5000" + echo " Frontend: http://localhost:5173" + echo "" + + cd "$BACKEND_DIR" + if [ -f venv/bin/activate ]; then + source venv/bin/activate + fi + python run.py & + BACKEND_PID=$! + + sleep 2 + + cd "$FRONTEND_DIR" + npm run dev & + FRONTEND_PID=$! + + cleanup() { + echo "" + echo -e "${YELLOW}Stopping...${NC}" + kill "$BACKEND_PID" "$FRONTEND_PID" 2>/dev/null || true + wait "$BACKEND_PID" "$FRONTEND_PID" 2>/dev/null || true + echo "Stopped." + } + trap cleanup INT TERM + + echo -e "${DIM}Press Ctrl+C to stop...${NC}" + wait +} + +run_validate() { + header "ServerKit Validation Suite" + local failed=0 + local passed=0 + + # --- ESLint (warn-only, does not block) --- + echo -e "${YELLOW}Running ESLint...${NC}" + if (cd "$FRONTEND_DIR" && npm run lint 2>&1); then + pass "ESLint" + ((passed++)) + else + echo -e " ${YELLOW}WARN${NC} ESLint (has warnings/errors - run 'cd frontend && npm run lint' for details)" + ((passed++)) # pre-existing issues should not block + fi + + # --- Bandit --- + echo -e "${YELLOW}Running Bandit...${NC}" + if command -v bandit &>/dev/null; then + if bandit -r "$BACKEND_DIR/app" --ini "$BACKEND_DIR/.bandit" --severity-level medium 2>&1; then + pass "Bandit (security scan)" + ((passed++)) + else + fail "Bandit (security scan)" + ((failed++)) + fi + else + fail "Bandit (not installed — pip install bandit)" + ((failed++)) + fi + + # --- Pytest --- + echo -e "${YELLOW}Running Pytest...${NC}" + if (cd "$BACKEND_DIR" && { + [ -f venv/bin/activate ] && source venv/bin/activate + pytest --tb=short -q 2>&1 + }); then + pass "Pytest" + ((passed++)) + else + fail "Pytest" + ((failed++)) + fi + + # --- Frontend build --- + echo -e "${YELLOW}Running Frontend build...${NC}" + if (cd "$FRONTEND_DIR" && npm run build 2>&1); then + pass "Frontend build" + ((passed++)) + else + fail "Frontend build" + ((failed++)) + fi + + # --- Summary --- + header "Results" + echo -e " ${GREEN}Passed: $passed${NC}" + if [ "$failed" -gt 0 ]; then + echo -e " ${RED}Failed: $failed${NC}" + return 1 + else + echo -e " ${GREEN}All checks passed!${NC}" + fi +} + +run_validate_watch() { + run_validate || true + + if command -v inotifywait &>/dev/null; then + echo -e "${DIM}Watching for changes... (Ctrl+C to stop)${NC}" + while true; do + inotifywait -r -q -e modify,create,delete \ + --include '\.(py|js|jsx|ts|tsx)$' \ + "$BACKEND_DIR/app" "$FRONTEND_DIR/src" 2>/dev/null || break + echo -e "\n${YELLOW}Change detected, re-running...${NC}" + sleep 1 + run_validate || true + done + else + echo "" + echo -e "${DIM}Install inotify-tools for file watching (apt install inotify-tools).${NC}" + echo -e "${DIM}Running one-shot validation only.${NC}" + fi +} + +# --- Main --- +MODE="${1:-start}" + +case "$MODE" in + backend) start_backend ;; + frontend) start_frontend ;; + validate) run_validate_watch ;; + start|*) start_both ;; +esac diff --git a/frontend/eslint.config.js b/frontend/eslint.config.js new file mode 100644 index 0000000..bb58012 --- /dev/null +++ b/frontend/eslint.config.js @@ -0,0 +1,40 @@ +import js from '@eslint/js'; +import globals from 'globals'; +import reactPlugin from 'eslint-plugin-react'; +import reactHooks from 'eslint-plugin-react-hooks'; +import reactRefresh from 'eslint-plugin-react-refresh'; + +export default [ + { ignores: ['dist/**', 'node_modules/**'] }, + { + files: ['**/*.{js,jsx}'], + languageOptions: { + ecmaVersion: 2022, + sourceType: 'module', + globals: { + ...globals.browser, + ...globals.es2021, + }, + parserOptions: { + ecmaFeatures: { jsx: true }, + }, + }, + plugins: { + react: reactPlugin, + 'react-hooks': reactHooks, + 'react-refresh': reactRefresh, + }, + rules: { + ...js.configs.recommended.rules, + ...reactPlugin.configs.flat.recommended.rules, + ...reactPlugin.configs.flat['jsx-runtime'].rules, + ...reactHooks.configs['recommended-latest'].rules, + 'react-refresh/only-export-components': ['warn', { allowConstantExport: true }], + 'react/prop-types': 'off', + 'no-unused-vars': ['warn', { argsIgnorePattern: '^_', varsIgnorePattern: '^_' }], + }, + settings: { + react: { version: 'detect' }, + }, + }, +]; diff --git a/frontend/src/pages/Backups.jsx b/frontend/src/pages/Backups.jsx index b845743..0dbd332 100644 --- a/frontend/src/pages/Backups.jsx +++ b/frontend/src/pages/Backups.jsx @@ -1,4 +1,5 @@ import React, { useState, useEffect } from 'react'; +import { Upload, Download, Check, AlertTriangle, Clock, Database, Package, FolderArchive, HardDrive, Cloud, CloudOff, RefreshCw, Trash2, Plus, Settings, CheckCircle, XCircle, Server, FileArchive } from 'lucide-react'; import api from '../services/api'; import { useToast } from '../contexts/ToastContext'; @@ -8,6 +9,7 @@ const Backups = () => { const [stats, setStats] = useState(null); const [schedules, setSchedules] = useState([]); const [config, setConfig] = useState(null); + const [storageConfig, setStorageConfig] = useState(null); const [apps, setApps] = useState([]); const [loading, setLoading] = useState(true); const [error, setError] = useState(null); @@ -19,6 +21,8 @@ const Backups = () => { const [showScheduleModal, setShowScheduleModal] = useState(false); const [showRestoreModal, setShowRestoreModal] = useState(false); const [selectedBackup, setSelectedBackup] = useState(null); + const [uploadingBackup, setUploadingBackup] = useState(null); + const [testingConnection, setTestingConnection] = useState(false); // Backup form state const [backupForm, setBackupForm] = useState({ @@ -29,7 +33,9 @@ const Backups = () => { dbName: '', dbUser: '', dbPassword: '', - dbHost: 'localhost' + dbHost: 'localhost', + filePaths: '', + fileName: '' }); // Schedule form state @@ -38,7 +44,8 @@ const Backups = () => { backupType: 'application', target: '', scheduleTime: '02:00', - days: ['daily'] + days: ['daily'], + uploadRemote: false }); // Config form state @@ -47,6 +54,15 @@ const Backups = () => { retention_days: 30 }); + // Storage config form state + const [storageForm, setStorageForm] = useState({ + provider: 'local', + s3: { bucket: '', region: 'us-east-1', access_key: '', secret_key: '', endpoint_url: '', path_prefix: 'serverkit-backups' }, + b2: { bucket: '', key_id: '', application_key: '', endpoint_url: '', path_prefix: 'serverkit-backups' }, + auto_upload: false, + keep_local_copy: true + }); + useEffect(() => { loadData(); }, []); @@ -54,12 +70,13 @@ const Backups = () => { const loadData = async () => { try { setLoading(true); - const [backupsRes, statsRes, schedulesRes, configRes, appsRes] = await Promise.all([ + const [backupsRes, statsRes, schedulesRes, configRes, appsRes, storageRes] = await Promise.all([ api.getBackups(), api.getBackupStats(), api.getBackupSchedules(), api.getBackupConfig(), - api.getApps() + api.getApps(), + api.getStorageConfig().catch(() => null) ]); setBackups(backupsRes.backups || []); @@ -68,6 +85,11 @@ const Backups = () => { setConfig(configRes); setApps(appsRes.applications || []); + if (storageRes) { + setStorageConfig(storageRes); + setStorageForm(storageRes); + } + if (configRes) { setConfigForm({ enabled: configRes.enabled || false, @@ -93,7 +115,8 @@ const Backups = () => { host: backupForm.dbHost } : null; await api.backupApplication(parseInt(backupForm.applicationId), backupForm.includeDb, dbConfig); - } else { + toast.success('Application backup created'); + } else if (backupForm.type === 'database') { await api.backupDatabase( backupForm.dbType, backupForm.dbName, @@ -101,12 +124,21 @@ const Backups = () => { backupForm.dbPassword, backupForm.dbHost ); + toast.success('Database backup created'); + } else if (backupForm.type === 'files') { + const paths = backupForm.filePaths.split('\n').map(p => p.trim()).filter(Boolean); + if (paths.length === 0) { + toast.error('Enter at least one file path'); + return; + } + await api.backupFiles(paths, backupForm.fileName || null); + toast.success('File backup created'); } setShowBackupModal(false); resetBackupForm(); loadData(); } catch (err) { - setError(err.message); + toast.error(err.message); } }; @@ -114,9 +146,23 @@ const Backups = () => { if (!window.confirm('Are you sure you want to delete this backup?')) return; try { await api.deleteBackup(backupPath); + toast.success('Backup deleted'); loadData(); } catch (err) { - setError(err.message); + toast.error(err.message); + } + }; + + const handleUploadToRemote = async (backup) => { + setUploadingBackup(backup.path); + try { + await api.uploadBackupToRemote(backup.path); + toast.success('Backup uploaded to remote storage'); + loadData(); + } catch (err) { + toast.error(err.message); + } finally { + setUploadingBackup(null); } }; @@ -150,13 +196,25 @@ const Backups = () => { scheduleForm.backupType, scheduleForm.target, scheduleForm.scheduleTime, - scheduleForm.days + scheduleForm.days, + scheduleForm.uploadRemote ); + toast.success('Schedule added'); setShowScheduleModal(false); resetScheduleForm(); loadData(); } catch (err) { - setError(err.message); + toast.error(err.message); + } + }; + + const handleToggleSchedule = async (schedule) => { + try { + await api.updateBackupSchedule(schedule.id, { enabled: !schedule.enabled }); + toast.success(`Schedule ${schedule.enabled ? 'disabled' : 'enabled'}`); + loadData(); + } catch (err) { + toast.error(err.message); } }; @@ -164,9 +222,10 @@ const Backups = () => { if (!window.confirm('Are you sure you want to remove this schedule?')) return; try { await api.removeBackupSchedule(scheduleId); + toast.success('Schedule removed'); loadData(); } catch (err) { - setError(err.message); + toast.error(err.message); } }; @@ -174,9 +233,37 @@ const Backups = () => { e.preventDefault(); try { await api.updateBackupConfig(configForm); + toast.success('Settings saved'); loadData(); } catch (err) { - setError(err.message); + toast.error(err.message); + } + }; + + const handleSaveStorageConfig = async (e) => { + e.preventDefault(); + try { + await api.updateStorageConfig(storageForm); + toast.success('Storage configuration saved'); + loadData(); + } catch (err) { + toast.error(err.message); + } + }; + + const handleTestConnection = async () => { + setTestingConnection(true); + try { + const result = await api.testStorageConnection(storageForm); + if (result.success) { + toast.success(result.message); + } else { + toast.error(result.error); + } + } catch (err) { + toast.error(err.message); + } finally { + setTestingConnection(false); } }; @@ -200,7 +287,9 @@ const Backups = () => { dbName: '', dbUser: '', dbPassword: '', - dbHost: 'localhost' + dbHost: 'localhost', + filePaths: '', + fileName: '' }); }; @@ -210,7 +299,8 @@ const Backups = () => { backupType: 'application', target: '', scheduleTime: '02:00', - days: ['daily'] + days: ['daily'], + uploadRemote: false }); }; @@ -230,6 +320,26 @@ const Backups = () => { return new Date(timestamp).toLocaleString(); }; + const getBackupIcon = (type) => { + switch (type) { + case 'application': return ; + case 'database': return ; + case 'files': return ; + default: return ; + } + }; + + const getRemoteStatusBadge = (status) => { + switch (status) { + case 'synced': + return Synced; + case 'remote-only': + return Remote; + default: + return Local; + } + }; + const filteredBackups = filterType === 'all' ? backups : backups.filter(b => b.type === filterType); @@ -243,21 +353,15 @@ const Backups = () => {

Backups

-

Manage application and database backups

+

Manage application, database, and file backups with local and remote storage

@@ -274,11 +378,7 @@ const Backups = () => {
- - - - - +
Total Backups @@ -288,9 +388,7 @@ const Backups = () => {
- - - +
Application Backups @@ -300,11 +398,7 @@ const Backups = () => {
- - - - - +
Database Backups @@ -314,41 +408,43 @@ const Backups = () => {
- - - - - - +
- Total Size + Local Size {stats?.total_size_human || '0 B'}
+ + {storageConfig?.provider !== 'local' && ( +
+
+ +
+
+ Remote Backups + {stats?.remote_count || 0} +
+
+ )}
- - - +
+ {/* Backups Tab */} {activeTab === 'backups' && (
@@ -362,8 +458,10 @@ const Backups = () => { +
@@ -371,11 +469,7 @@ const Backups = () => {
{filteredBackups.length === 0 ? (
- - - - - +

No Backups

No backups found. Create your first backup to get started.

+ {backup.type !== 'files' && ( + + )} + {storageConfig?.provider !== 'local' && backup.remote_status !== 'synced' && ( + + )}
@@ -455,21 +551,20 @@ const Backups = () => {
)} + {/* Schedules Tab */} {activeTab === 'schedules' && (

Backup Schedules

{schedules.length === 0 ? (
- - - - +

No Schedules

No backup schedules configured. Add a schedule for automated backups.

+ {schedule.last_status === 'success' && ( + Success + )} + {schedule.last_status === 'failed' && ( + Failed + )} + {!schedule.last_status && ( + + {schedule.enabled ? 'Active' : 'Disabled'} + + )} + + +
+ + +
))} @@ -527,6 +646,200 @@ const Backups = () => {
)} + {/* Storage Tab */} + {activeTab === 'storage' && ( +
+
+

Remote Storage Configuration

+
+
+
+
+ + +
+ + {storageForm.provider === 's3' && ( +
+

S3-Compatible Storage

+
+
+ + setStorageForm({...storageForm, s3: {...storageForm.s3, bucket: e.target.value}})} + placeholder="my-backup-bucket" + required + /> +
+
+ + setStorageForm({...storageForm, s3: {...storageForm.s3, region: e.target.value}})} + placeholder="us-east-1" + /> +
+
+
+
+ + setStorageForm({...storageForm, s3: {...storageForm.s3, access_key: e.target.value}})} + placeholder="AKIA..." + required + /> +
+
+ + setStorageForm({...storageForm, s3: {...storageForm.s3, secret_key: e.target.value}})} + required + /> +
+
+
+
+ + setStorageForm({...storageForm, s3: {...storageForm.s3, endpoint_url: e.target.value}})} + placeholder="https://s3.example.com" + /> +
+
+ + setStorageForm({...storageForm, s3: {...storageForm.s3, path_prefix: e.target.value}})} + placeholder="serverkit-backups" + /> +
+
+
+ )} + + {storageForm.provider === 'b2' && ( +
+

Backblaze B2

+
+
+ + setStorageForm({...storageForm, b2: {...storageForm.b2, bucket: e.target.value}})} + placeholder="my-backup-bucket" + required + /> +
+
+ + setStorageForm({...storageForm, b2: {...storageForm.b2, endpoint_url: e.target.value}})} + placeholder="https://s3.us-west-004.backblazeb2.com" + required + /> +
+
+
+
+ + setStorageForm({...storageForm, b2: {...storageForm.b2, key_id: e.target.value}})} + required + /> +
+
+ + setStorageForm({...storageForm, b2: {...storageForm.b2, application_key: e.target.value}})} + required + /> +
+
+
+ + setStorageForm({...storageForm, b2: {...storageForm.b2, path_prefix: e.target.value}})} + placeholder="serverkit-backups" + /> +
+
+ )} + + {storageForm.provider !== 'local' && ( + <> +
+ +
+ +
+ +
+ + )} + +
+ + {storageForm.provider !== 'local' && ( + + )} +
+
+
+
+ )} + + {/* Settings Tab */} {activeTab === 'settings' && (
@@ -560,6 +873,7 @@ const Backups = () => {
@@ -586,6 +900,7 @@ const Backups = () => { > +
@@ -618,6 +933,31 @@ const Backups = () => { )} + {backupForm.type === 'files' && ( + <> +
+ + setBackupForm({...backupForm, fileName: e.target.value})} + placeholder="my-config-backup" + /> +
+
+ +