diff --git a/hatch_build.py b/hatch_build.py index 723aa15d..d1165ea4 100644 --- a/hatch_build.py +++ b/hatch_build.py @@ -20,6 +20,12 @@ def initialize(self, version, build_data): self.app.display_info("Skipping frontend build (SKIP_FRONTEND_BUILD set)") return + # Editable installs do not need packaged frontend assets; the dedicated + # frontend CI job already validates the client bundle separately. + if version == "editable": + self.app.display_info("Skipping frontend build for editable install") + return + # Only build for wheel target if self.target_name != "wheel": return @@ -44,40 +50,45 @@ def initialize(self, version, build_data): dist_dir = client_dir / "dist" if dist_dir.exists() and (dist_dir / "index.html").exists(): self.app.display_info("Frontend already built - skipping build") - return + else: + # Check if npm is available + if not shutil.which("npm"): + self.app.display_info("npm not found - skipping frontend build") + return - # Check if npm is available - if not shutil.which("npm"): - self.app.display_info("npm not found - skipping frontend build") - return + self.app.display_info("Building frontend client...") + + # Prefer the lockfile for reproducible CI/frontend packaging. + install_cmd = ( + ["npm", "ci"] if (client_dir / "package-lock.json").exists() else ["npm", "install"] + ) + + # Install dependencies + self.app.display_info("Installing npm dependencies...") + result = subprocess.run( + install_cmd, + cwd=client_dir, + capture_output=True, + text=True, + ) + if result.returncode != 0: + cmd_name = " ".join(install_cmd) + self.app.display_error(f"{cmd_name} failed: {result.stderr}") + raise RuntimeError(f"{cmd_name} failed: {result.stderr}") + + # Build the frontend + self.app.display_info("Running npm build...") + result = subprocess.run( + ["npm", "run", "build"], + cwd=client_dir, + capture_output=True, + text=True, + ) + if result.returncode != 0: + self.app.display_error(f"npm build failed: {result.stderr}") + raise RuntimeError(f"npm build failed: {result.stderr}") - self.app.display_info("Building frontend client...") - - # Install dependencies - self.app.display_info("Installing npm dependencies...") - result = subprocess.run( - ["npm", "install"], - cwd=client_dir, - capture_output=True, - text=True, - ) - if result.returncode != 0: - self.app.display_error(f"npm install failed: {result.stderr}") - raise RuntimeError(f"npm install failed: {result.stderr}") - - # Build the frontend - self.app.display_info("Running npm build...") - result = subprocess.run( - ["npm", "run", "build"], - cwd=client_dir, - capture_output=True, - text=True, - ) - if result.returncode != 0: - self.app.display_error(f"npm build failed: {result.stderr}") - raise RuntimeError(f"npm build failed: {result.stderr}") - - # Verify build output exists + # Non-editable wheel builds should leave the built assets in-package. dist_dir = client_dir / "dist" if not dist_dir.exists() or not (dist_dir / "index.html").exists(): raise RuntimeError(f"Build output not found at {dist_dir}") diff --git a/pyproject.toml b/pyproject.toml index 6e50118a..12f970eb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -93,9 +93,6 @@ packages = ["src/kurt", "eval"] [tool.hatch.build.targets.wheel.hooks.custom] path = "hatch_build.py" -[tool.hatch.build.targets.wheel.force-include] -"src/kurt/web/client/dist" = "kurt/web/client/dist" - [tool.ruff] line-length = 100 target-version = "py310" diff --git a/src/kurt/cli/main.py b/src/kurt/cli/main.py index b9e0a9f8..8bcd8da6 100644 --- a/src/kurt/cli/main.py +++ b/src/kurt/cli/main.py @@ -158,6 +158,7 @@ def get_command(self, ctx, name): "help": ("kurt.cli.show", "show_group"), # Agent workflows "agents": ("kurt.workflows.agents.cli", "agents_group"), + "media": ("kurt.workflows.media.cli", "media_group"), # Skill management "skill": ("kurt.cli.skill", "skill"), }, @@ -209,7 +210,7 @@ def main(ctx, json_output: bool, quiet: bool): # Skip auto-migrate for commands that don't need DB or use Dolt CLI directly # doctor/repair use Dolt CLI commands which conflict with the auto-started server - if ctx.invoked_subcommand in ["init", "help", "doctor", "repair"]: + if ctx.invoked_subcommand in ["init", "help", "doctor", "repair", "media"]: return # Skip if no project initialized diff --git a/src/kurt/services/__init__.py b/src/kurt/services/__init__.py new file mode 100644 index 00000000..42cdedff --- /dev/null +++ b/src/kurt/services/__init__.py @@ -0,0 +1,6 @@ +"""Kurt services - external API integrations and utilities.""" + +from kurt.services.ai_generation import AIGenerationService +from kurt.services.media_edit import MediaEditService + +__all__ = ["AIGenerationService", "MediaEditService"] diff --git a/src/kurt/services/ai_generation.py b/src/kurt/services/ai_generation.py new file mode 100644 index 00000000..29d35afa --- /dev/null +++ b/src/kurt/services/ai_generation.py @@ -0,0 +1,846 @@ +"""AI Generation Service - unified interface to image/video generation APIs. + +Supported providers: +- fal.ai: Fast inference, Flux models, video generation +- Leonardo.ai: Nano Banana, Phoenix, commercial-grade +- Replicate: Huge model library, pay-per-use +- Runway: Video generation (Gen-3, Gen-4) + +Environment variables: +- FAL_KEY: fal.ai API key +- LEONARDO_API_KEY: Leonardo.ai API key +- REPLICATE_API_TOKEN: Replicate API token +- RUNWAY_API_KEY: Runway API key +""" + +from __future__ import annotations + +import asyncio +import os +import time +from dataclasses import dataclass, field +from enum import Enum +from typing import Any + +import httpx + + +class Provider(str, Enum): + """Supported AI generation providers.""" + + FAL = "fal" + LEONARDO = "leonardo" + REPLICATE = "replicate" + RUNWAY = "runway" + + +class MediaType(str, Enum): + """Type of media to generate.""" + + IMAGE = "image" + VIDEO = "video" + + +@dataclass +class GenerationResult: + """Result from an AI generation request.""" + + success: bool + url: str | None = None + urls: list[str] = field(default_factory=list) + job_id: str | None = None + provider: str | None = None + model: str | None = None + error: str | None = None + metadata: dict[str, Any] = field(default_factory=dict) + + @property + def primary_url(self) -> str | None: + """Get the primary output URL.""" + return self.url or (self.urls[0] if self.urls else None) + + +class AIGenerationService: + """Unified interface to AI generation APIs. + + Example: + service = AIGenerationService() + + # Generate image + result = await service.generate_image( + prompt="A futuristic city at sunset", + model="flux-dev", + ) + print(result.url) + + # Generate video from image + result = await service.generate_video( + image_url=result.url, + prompt="Slow zoom in with particles floating", + duration=5, + ) + print(result.url) + """ + + # Default models for each provider + DEFAULT_MODELS = { + Provider.FAL: { + MediaType.IMAGE: "flux/dev", + MediaType.VIDEO: "ltx-video/image-to-video", + }, + Provider.LEONARDO: { + MediaType.IMAGE: "phoenix", + }, + Provider.REPLICATE: { + MediaType.IMAGE: "stability-ai/sdxl", + MediaType.VIDEO: "stability-ai/stable-video-diffusion", + }, + Provider.RUNWAY: { + MediaType.VIDEO: "gen3a_turbo", + }, + } + + def __init__( + self, + fal_key: str | None = None, + leonardo_key: str | None = None, + replicate_token: str | None = None, + runway_key: str | None = None, + default_image_provider: Provider = Provider.FAL, + default_video_provider: Provider = Provider.FAL, + ): + """Initialize the AI generation service. + + Args: + fal_key: fal.ai API key (or FAL_KEY env var) + leonardo_key: Leonardo.ai API key (or LEONARDO_API_KEY env var) + replicate_token: Replicate token (or REPLICATE_API_TOKEN env var) + runway_key: Runway API key (or RUNWAY_API_KEY env var) + default_image_provider: Default provider for image generation + default_video_provider: Default provider for video generation + """ + self.fal_key = fal_key or os.environ.get("FAL_KEY") + self.leonardo_key = leonardo_key or os.environ.get("LEONARDO_API_KEY") + self.replicate_token = replicate_token or os.environ.get("REPLICATE_API_TOKEN") + self.runway_key = runway_key or os.environ.get("RUNWAY_API_KEY") + + self.default_image_provider = default_image_provider + self.default_video_provider = default_video_provider + + self._client: httpx.AsyncClient | None = None + + @property + def client(self) -> httpx.AsyncClient: + """Get or create the HTTP client.""" + if self._client is None: + self._client = httpx.AsyncClient(timeout=300.0) + return self._client + + async def close(self) -> None: + """Close the HTTP client.""" + if self._client is not None: + await self._client.aclose() + self._client = None + + def _get_provider_key(self, provider: Provider) -> str | None: + """Get the API key for a provider.""" + return { + Provider.FAL: self.fal_key, + Provider.LEONARDO: self.leonardo_key, + Provider.REPLICATE: self.replicate_token, + Provider.RUNWAY: self.runway_key, + }.get(provider) + + async def generate_image( + self, + prompt: str, + model: str | None = None, + provider: Provider | str | None = None, + width: int = 1024, + height: int = 1024, + num_images: int = 1, + negative_prompt: str | None = None, + **kwargs: Any, + ) -> GenerationResult: + """Generate an image using AI. + + Args: + prompt: Text description of the image to generate + model: Model identifier (provider-specific) + provider: Provider to use (fal, leonardo, replicate) + width: Image width in pixels + height: Image height in pixels + num_images: Number of images to generate + negative_prompt: Things to avoid in the image + **kwargs: Additional provider-specific parameters + + Returns: + GenerationResult with URL(s) of generated images + """ + if provider is None: + provider = self.default_image_provider + elif isinstance(provider, str): + provider = Provider(provider) + + if model is None: + model = self.DEFAULT_MODELS.get(provider, {}).get(MediaType.IMAGE) + + if provider == Provider.FAL: + return await self._fal_generate_image( + prompt=prompt, + model=model, + width=width, + height=height, + num_images=num_images, + negative_prompt=negative_prompt, + **kwargs, + ) + elif provider == Provider.LEONARDO: + return await self._leonardo_generate_image( + prompt=prompt, + model=model, + width=width, + height=height, + num_images=num_images, + negative_prompt=negative_prompt, + **kwargs, + ) + elif provider == Provider.REPLICATE: + return await self._replicate_generate_image( + prompt=prompt, + model=model, + width=width, + height=height, + num_images=num_images, + negative_prompt=negative_prompt, + **kwargs, + ) + else: + return GenerationResult( + success=False, + error=f"Provider {provider} does not support image generation", + ) + + async def generate_video( + self, + prompt: str, + image_url: str | None = None, + model: str | None = None, + provider: Provider | str | None = None, + duration: int = 5, + **kwargs: Any, + ) -> GenerationResult: + """Generate a video using AI. + + Args: + prompt: Text description of the video motion/content + image_url: Source image URL (for image-to-video) + model: Model identifier (provider-specific) + provider: Provider to use (fal, runway, replicate) + duration: Video duration in seconds + **kwargs: Additional provider-specific parameters + + Returns: + GenerationResult with URL of generated video + """ + if provider is None: + provider = self.default_video_provider + elif isinstance(provider, str): + provider = Provider(provider) + + if model is None: + model = self.DEFAULT_MODELS.get(provider, {}).get(MediaType.VIDEO) + + if provider == Provider.FAL: + return await self._fal_generate_video( + prompt=prompt, + image_url=image_url, + model=model, + duration=duration, + **kwargs, + ) + elif provider == Provider.RUNWAY: + return await self._runway_generate_video( + prompt=prompt, + image_url=image_url, + model=model, + duration=duration, + **kwargs, + ) + elif provider == Provider.REPLICATE: + return await self._replicate_generate_video( + prompt=prompt, + image_url=image_url, + model=model, + **kwargs, + ) + else: + return GenerationResult( + success=False, + error=f"Provider {provider} does not support video generation", + ) + + # ------------------------------------------------------------------------- + # fal.ai Implementation + # ------------------------------------------------------------------------- + + async def _fal_generate_image( + self, + prompt: str, + model: str, + width: int, + height: int, + num_images: int, + negative_prompt: str | None, + **kwargs: Any, + ) -> GenerationResult: + """Generate image via fal.ai.""" + if not self.fal_key: + return GenerationResult( + success=False, + error="FAL_KEY not configured", + ) + + # fal.ai uses model paths like "fal-ai/flux/dev" + if not model.startswith("fal-ai/"): + model = f"fal-ai/{model}" + + url = f"https://fal.run/{model}" + + payload: dict[str, Any] = { + "prompt": prompt, + "image_size": {"width": width, "height": height}, + "num_images": num_images, + } + if negative_prompt: + payload["negative_prompt"] = negative_prompt + payload.update(kwargs) + + try: + response = await self.client.post( + url, + headers={ + "Authorization": f"Key {self.fal_key}", + "Content-Type": "application/json", + }, + json=payload, + ) + response.raise_for_status() + data = response.json() + + images = data.get("images", []) + urls = [img.get("url") for img in images if img.get("url")] + + return GenerationResult( + success=True, + url=urls[0] if urls else None, + urls=urls, + provider="fal", + model=model, + metadata={"seed": data.get("seed")}, + ) + except httpx.HTTPStatusError as e: + return GenerationResult( + success=False, + error=f"fal.ai API error: {e.response.status_code} - {e.response.text}", + provider="fal", + ) + except Exception as e: + return GenerationResult( + success=False, + error=f"fal.ai request failed: {e}", + provider="fal", + ) + + async def _fal_generate_video( + self, + prompt: str, + image_url: str | None, + model: str, + duration: int, + **kwargs: Any, + ) -> GenerationResult: + """Generate video via fal.ai.""" + if not self.fal_key: + return GenerationResult( + success=False, + error="FAL_KEY not configured", + ) + + if not model.startswith("fal-ai/"): + model = f"fal-ai/{model}" + + url = f"https://fal.run/{model}" + + payload: dict[str, Any] = {"prompt": prompt} + if image_url: + payload["image_url"] = image_url + if "num_frames" not in kwargs: + # Approximate frames from duration (assuming ~24fps output) + payload["num_frames"] = min(duration * 24, 257) + payload.update(kwargs) + + try: + response = await self.client.post( + url, + headers={ + "Authorization": f"Key {self.fal_key}", + "Content-Type": "application/json", + }, + json=payload, + ) + response.raise_for_status() + data = response.json() + + video_url = data.get("video", {}).get("url") + + return GenerationResult( + success=True, + url=video_url, + provider="fal", + model=model, + metadata=data, + ) + except httpx.HTTPStatusError as e: + return GenerationResult( + success=False, + error=f"fal.ai API error: {e.response.status_code} - {e.response.text}", + provider="fal", + ) + except Exception as e: + return GenerationResult( + success=False, + error=f"fal.ai request failed: {e}", + provider="fal", + ) + + # ------------------------------------------------------------------------- + # Leonardo.ai Implementation + # ------------------------------------------------------------------------- + + async def _leonardo_generate_image( + self, + prompt: str, + model: str, + width: int, + height: int, + num_images: int, + negative_prompt: str | None, + **kwargs: Any, + ) -> GenerationResult: + """Generate image via Leonardo.ai.""" + if not self.leonardo_key: + return GenerationResult( + success=False, + error="LEONARDO_API_KEY not configured", + ) + + base_url = "https://cloud.leonardo.ai/api/rest/v1" + + # Model name to ID mapping (common models) + model_ids = { + "phoenix": "6b645e3a-d64f-4341-a6d8-7a3690fbf042", + "nano-banana": "aa77f04e-3eec-4034-9c07-d0f619684628", + "nano-banana-pro": "faf3e8d3-6d19-4e98-8c3a-5c17e9f67a28", + "sdxl": "1e60896f-3c26-4296-8ecc-53e2afecc132", + } + + model_id = model_ids.get(model, model) + + payload: dict[str, Any] = { + "prompt": prompt, + "modelId": model_id, + "width": width, + "height": height, + "num_images": num_images, + } + if negative_prompt: + payload["negative_prompt"] = negative_prompt + payload.update(kwargs) + + try: + # Start generation + response = await self.client.post( + f"{base_url}/generations", + headers={ + "Authorization": f"Bearer {self.leonardo_key}", + "Content-Type": "application/json", + }, + json=payload, + ) + response.raise_for_status() + data = response.json() + + generation_id = data.get("sdGenerationJob", {}).get("generationId") + if not generation_id: + return GenerationResult( + success=False, + error="No generation ID returned", + provider="leonardo", + ) + + # Poll for completion + urls = await self._leonardo_poll_generation(generation_id) + + return GenerationResult( + success=True, + url=urls[0] if urls else None, + urls=urls, + job_id=generation_id, + provider="leonardo", + model=model, + ) + except httpx.HTTPStatusError as e: + return GenerationResult( + success=False, + error=f"Leonardo API error: {e.response.status_code} - {e.response.text}", + provider="leonardo", + ) + except Exception as e: + return GenerationResult( + success=False, + error=f"Leonardo request failed: {e}", + provider="leonardo", + ) + + async def _leonardo_poll_generation( + self, + generation_id: str, + max_wait: int = 120, + poll_interval: float = 2.0, + ) -> list[str]: + """Poll Leonardo.ai for generation completion.""" + base_url = "https://cloud.leonardo.ai/api/rest/v1" + start_time = time.time() + + while time.time() - start_time < max_wait: + response = await self.client.get( + f"{base_url}/generations/{generation_id}", + headers={"Authorization": f"Bearer {self.leonardo_key}"}, + ) + response.raise_for_status() + data = response.json() + + generation = data.get("generations_by_pk", {}) + status = generation.get("status") + + if status == "COMPLETE": + images = generation.get("generated_images", []) + return [img.get("url") for img in images if img.get("url")] + elif status == "FAILED": + raise Exception("Generation failed") + + await asyncio.sleep(poll_interval) + + raise Exception("Generation timed out") + + # ------------------------------------------------------------------------- + # Replicate Implementation + # ------------------------------------------------------------------------- + + async def _replicate_generate_image( + self, + prompt: str, + model: str, + width: int, + height: int, + num_images: int, + negative_prompt: str | None, + **kwargs: Any, + ) -> GenerationResult: + """Generate image via Replicate.""" + if not self.replicate_token: + return GenerationResult( + success=False, + error="REPLICATE_API_TOKEN not configured", + ) + + base_url = "https://api.replicate.com/v1" + + # Build input based on model + input_data: dict[str, Any] = { + "prompt": prompt, + "width": width, + "height": height, + "num_outputs": num_images, + } + if negative_prompt: + input_data["negative_prompt"] = negative_prompt + input_data.update(kwargs) + + try: + # Start prediction + response = await self.client.post( + f"{base_url}/predictions", + headers={ + "Authorization": f"Token {self.replicate_token}", + "Content-Type": "application/json", + }, + json={"version": model, "input": input_data}, + ) + response.raise_for_status() + data = response.json() + + prediction_id = data.get("id") + if not prediction_id: + return GenerationResult( + success=False, + error="No prediction ID returned", + provider="replicate", + ) + + # Poll for completion + urls = await self._replicate_poll_prediction(prediction_id) + + return GenerationResult( + success=True, + url=urls[0] if urls else None, + urls=urls, + job_id=prediction_id, + provider="replicate", + model=model, + ) + except httpx.HTTPStatusError as e: + return GenerationResult( + success=False, + error=f"Replicate API error: {e.response.status_code} - {e.response.text}", + provider="replicate", + ) + except Exception as e: + return GenerationResult( + success=False, + error=f"Replicate request failed: {e}", + provider="replicate", + ) + + async def _replicate_poll_prediction( + self, + prediction_id: str, + max_wait: int = 300, + poll_interval: float = 2.0, + ) -> list[str]: + """Poll Replicate for prediction completion.""" + base_url = "https://api.replicate.com/v1" + start_time = time.time() + + while time.time() - start_time < max_wait: + response = await self.client.get( + f"{base_url}/predictions/{prediction_id}", + headers={"Authorization": f"Token {self.replicate_token}"}, + ) + response.raise_for_status() + data = response.json() + + status = data.get("status") + + if status == "succeeded": + output = data.get("output", []) + if isinstance(output, list): + return output + return [output] if output else [] + elif status in ("failed", "canceled"): + raise Exception(f"Prediction {status}: {data.get('error')}") + + await asyncio.sleep(poll_interval) + + raise Exception("Prediction timed out") + + async def _replicate_generate_video( + self, + prompt: str, + image_url: str | None, + model: str, + **kwargs: Any, + ) -> GenerationResult: + """Generate video via Replicate.""" + if not self.replicate_token: + return GenerationResult( + success=False, + error="REPLICATE_API_TOKEN not configured", + ) + + base_url = "https://api.replicate.com/v1" + + input_data: dict[str, Any] = {} + if image_url: + input_data["image"] = image_url + if prompt: + input_data["prompt"] = prompt + input_data.update(kwargs) + + try: + response = await self.client.post( + f"{base_url}/predictions", + headers={ + "Authorization": f"Token {self.replicate_token}", + "Content-Type": "application/json", + }, + json={"version": model, "input": input_data}, + ) + response.raise_for_status() + data = response.json() + + prediction_id = data.get("id") + if not prediction_id: + return GenerationResult( + success=False, + error="No prediction ID returned", + provider="replicate", + ) + + urls = await self._replicate_poll_prediction(prediction_id) + + return GenerationResult( + success=True, + url=urls[0] if urls else None, + urls=urls, + job_id=prediction_id, + provider="replicate", + model=model, + ) + except Exception as e: + return GenerationResult( + success=False, + error=f"Replicate request failed: {e}", + provider="replicate", + ) + + # ------------------------------------------------------------------------- + # Runway Implementation + # ------------------------------------------------------------------------- + + async def _runway_generate_video( + self, + prompt: str, + image_url: str | None, + model: str, + duration: int, + **kwargs: Any, + ) -> GenerationResult: + """Generate video via Runway.""" + if not self.runway_key: + return GenerationResult( + success=False, + error="RUNWAY_API_KEY not configured", + ) + + base_url = "https://api.dev.runwayml.com/v1" + + payload: dict[str, Any] = { + "model": model, + "promptText": prompt, + "duration": duration, + } + if image_url: + payload["promptImage"] = image_url + payload.update(kwargs) + + try: + # Start generation + response = await self.client.post( + f"{base_url}/image_to_video" if image_url else f"{base_url}/text_to_video", + headers={ + "Authorization": f"Bearer {self.runway_key}", + "Content-Type": "application/json", + "X-Runway-Version": "2024-11-06", + }, + json=payload, + ) + response.raise_for_status() + data = response.json() + + task_id = data.get("id") + if not task_id: + return GenerationResult( + success=False, + error="No task ID returned", + provider="runway", + ) + + # Poll for completion + video_url = await self._runway_poll_task(task_id) + + return GenerationResult( + success=True, + url=video_url, + job_id=task_id, + provider="runway", + model=model, + ) + except httpx.HTTPStatusError as e: + return GenerationResult( + success=False, + error=f"Runway API error: {e.response.status_code} - {e.response.text}", + provider="runway", + ) + except Exception as e: + return GenerationResult( + success=False, + error=f"Runway request failed: {e}", + provider="runway", + ) + + async def _runway_poll_task( + self, + task_id: str, + max_wait: int = 600, + poll_interval: float = 5.0, + ) -> str: + """Poll Runway for task completion.""" + base_url = "https://api.dev.runwayml.com/v1" + start_time = time.time() + + while time.time() - start_time < max_wait: + response = await self.client.get( + f"{base_url}/tasks/{task_id}", + headers={ + "Authorization": f"Bearer {self.runway_key}", + "X-Runway-Version": "2024-11-06", + }, + ) + response.raise_for_status() + data = response.json() + + status = data.get("status") + + if status == "SUCCEEDED": + output = data.get("output", []) + if output: + return output[0] + raise Exception("No output URL in completed task") + elif status == "FAILED": + raise Exception(f"Task failed: {data.get('failure')}") + + await asyncio.sleep(poll_interval) + + raise Exception("Task timed out") + + +# Convenience function for synchronous usage +def generate_image_sync( + prompt: str, + model: str | None = None, + provider: str | None = None, + **kwargs: Any, +) -> GenerationResult: + """Synchronous wrapper for image generation.""" + service = AIGenerationService() + return asyncio.run( + service.generate_image(prompt=prompt, model=model, provider=provider, **kwargs) + ) + + +def generate_video_sync( + prompt: str, + image_url: str | None = None, + model: str | None = None, + provider: str | None = None, + **kwargs: Any, +) -> GenerationResult: + """Synchronous wrapper for video generation.""" + service = AIGenerationService() + return asyncio.run( + service.generate_video( + prompt=prompt, image_url=image_url, model=model, provider=provider, **kwargs + ) + ) diff --git a/src/kurt/services/media_edit.py b/src/kurt/services/media_edit.py new file mode 100644 index 00000000..162dad02 --- /dev/null +++ b/src/kurt/services/media_edit.py @@ -0,0 +1,1012 @@ +"""Media Edit Service - wrapper around FFmpeg and ImageMagick. + +Provides a unified interface for common media editing operations: +- Image: resize, crop, rotate, format conversion, filters +- Video: trim, resize, extract audio, add audio, format conversion +- Audio: trim, convert, extract from video + +Requirements: +- FFmpeg: video/audio processing (install via apt/brew/choco) +- ImageMagick: image processing (install via apt/brew/choco) + +Environment variables: +- FFMPEG_PATH: Path to ffmpeg binary (default: "ffmpeg") +- MAGICK_PATH: Path to magick binary (default: "magick") +""" + +from __future__ import annotations + +import asyncio +import os +import shutil +import tempfile +from dataclasses import dataclass, field +from enum import Enum +from pathlib import Path +from typing import Any + + +class MediaFormat(str, Enum): + """Supported output formats.""" + + # Image formats + JPEG = "jpeg" + JPG = "jpg" + PNG = "png" + WEBP = "webp" + GIF = "gif" + AVIF = "avif" + TIFF = "tiff" + + # Video formats + MP4 = "mp4" + WEBM = "webm" + MOV = "mov" + AVI = "avi" + MKV = "mkv" + + # Audio formats + MP3 = "mp3" + WAV = "wav" + AAC = "aac" + OGG = "ogg" + FLAC = "flac" + + +@dataclass +class EditResult: + """Result from a media editing operation.""" + + success: bool + output_path: str | None = None + error: str | None = None + command: str | None = None + stdout: str | None = None + stderr: str | None = None + metadata: dict[str, Any] = field(default_factory=dict) + + +@dataclass +class MediaInfo: + """Information about a media file.""" + + path: str + format: str | None = None + width: int | None = None + height: int | None = None + duration: float | None = None + bitrate: int | None = None + codec: str | None = None + audio_codec: str | None = None + fps: float | None = None + size_bytes: int | None = None + + +class MediaEditService: + """Service for editing media files using FFmpeg and ImageMagick. + + Example: + service = MediaEditService() + + # Resize image + result = await service.resize_image( + "input.jpg", + output_path="output.jpg", + width=800, + height=600, + ) + + # Trim video + result = await service.trim_video( + "input.mp4", + output_path="output.mp4", + start="00:00:30", + end="00:01:00", + ) + + # Convert format + result = await service.convert( + "input.png", + output_path="output.webp", + format=MediaFormat.WEBP, + ) + """ + + def __init__( + self, + ffmpeg_path: str | None = None, + magick_path: str | None = None, + ): + """Initialize the media edit service. + + Args: + ffmpeg_path: Path to ffmpeg binary (or FFMPEG_PATH env var) + magick_path: Path to magick binary (or MAGICK_PATH env var) + """ + self.ffmpeg_path = ffmpeg_path or os.environ.get("FFMPEG_PATH", "ffmpeg") + self.magick_path = magick_path or os.environ.get("MAGICK_PATH", "magick") + + def _check_ffmpeg(self) -> bool: + """Check if FFmpeg is available.""" + return shutil.which(self.ffmpeg_path) is not None + + def _check_imagemagick(self) -> bool: + """Check if ImageMagick is available.""" + return shutil.which(self.magick_path) is not None + + async def _run_command( + self, + cmd: list[str], + check: bool = True, + ) -> tuple[int, str, str]: + """Run a command asynchronously. + + Returns: + Tuple of (return_code, stdout, stderr) + """ + process = await asyncio.create_subprocess_exec( + *cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + stdout, stderr = await process.communicate() + return ( + process.returncode or 0, + stdout.decode("utf-8", errors="replace"), + stderr.decode("utf-8", errors="replace"), + ) + + def _ensure_output_path( + self, + input_path: str, + output_path: str | None, + suffix: str | None = None, + ) -> str: + """Generate output path if not provided.""" + if output_path: + return output_path + + input_p = Path(input_path) + if suffix: + return str(input_p.parent / f"{input_p.stem}{suffix}") + return str(input_p.parent / f"{input_p.stem}_edited{input_p.suffix}") + + # ------------------------------------------------------------------------- + # Image Operations (ImageMagick) + # ------------------------------------------------------------------------- + + async def resize_image( + self, + input_path: str, + output_path: str | None = None, + width: int | None = None, + height: int | None = None, + scale: float | None = None, + maintain_aspect: bool = True, + quality: int = 85, + ) -> EditResult: + """Resize an image. + + Args: + input_path: Path to input image + output_path: Path for output (auto-generated if not provided) + width: Target width in pixels + height: Target height in pixels + scale: Scale factor (e.g., 0.5 for half size) + maintain_aspect: Keep aspect ratio (default True) + quality: Output quality 1-100 (for JPEG/WebP) + + Returns: + EditResult with output path + """ + if not self._check_imagemagick(): + return EditResult( + success=False, + error="ImageMagick not found. Install with: apt install imagemagick", + ) + + output_path = self._ensure_output_path(input_path, output_path) + cmd = [self.magick_path, input_path] + + if scale: + cmd.extend(["-resize", f"{int(scale * 100)}%"]) + elif width and height: + resize_op = f"{width}x{height}" if maintain_aspect else f"{width}x{height}!" + cmd.extend(["-resize", resize_op]) + elif width: + cmd.extend(["-resize", f"{width}x"]) + elif height: + cmd.extend(["-resize", f"x{height}"]) + + cmd.extend(["-quality", str(quality)]) + cmd.append(output_path) + + returncode, stdout, stderr = await self._run_command(cmd) + + return EditResult( + success=returncode == 0, + output_path=output_path if returncode == 0 else None, + error=stderr if returncode != 0 else None, + command=" ".join(cmd), + stdout=stdout, + stderr=stderr, + ) + + async def crop_image( + self, + input_path: str, + output_path: str | None = None, + width: int | None = None, + height: int | None = None, + x: int = 0, + y: int = 0, + gravity: str | None = None, + ) -> EditResult: + """Crop an image. + + Args: + input_path: Path to input image + output_path: Path for output + width: Crop width + height: Crop height + x: X offset from left (or from gravity point) + y: Y offset from top (or from gravity point) + gravity: Gravity point (Center, North, South, East, West, etc.) + + Returns: + EditResult with output path + """ + if not self._check_imagemagick(): + return EditResult( + success=False, + error="ImageMagick not found", + ) + + output_path = self._ensure_output_path(input_path, output_path) + cmd = [self.magick_path, input_path] + + if gravity: + cmd.extend(["-gravity", gravity]) + + crop_spec = f"{width}x{height}+{x}+{y}" + cmd.extend(["-crop", crop_spec, "+repage"]) + cmd.append(output_path) + + returncode, stdout, stderr = await self._run_command(cmd) + + return EditResult( + success=returncode == 0, + output_path=output_path if returncode == 0 else None, + error=stderr if returncode != 0 else None, + command=" ".join(cmd), + ) + + async def rotate_image( + self, + input_path: str, + output_path: str | None = None, + degrees: float = 90, + background: str = "white", + ) -> EditResult: + """Rotate an image. + + Args: + input_path: Path to input image + output_path: Path for output + degrees: Rotation angle (positive = clockwise) + background: Background color for corners + + Returns: + EditResult with output path + """ + if not self._check_imagemagick(): + return EditResult(success=False, error="ImageMagick not found") + + output_path = self._ensure_output_path(input_path, output_path) + cmd = [ + self.magick_path, + input_path, + "-background", + background, + "-rotate", + str(degrees), + output_path, + ] + + returncode, stdout, stderr = await self._run_command(cmd) + + return EditResult( + success=returncode == 0, + output_path=output_path if returncode == 0 else None, + error=stderr if returncode != 0 else None, + command=" ".join(cmd), + ) + + async def apply_filter( + self, + input_path: str, + output_path: str | None = None, + filter_name: str = "grayscale", + **kwargs: Any, + ) -> EditResult: + """Apply a filter to an image. + + Args: + input_path: Path to input image + output_path: Path for output + filter_name: Filter to apply (grayscale, blur, sharpen, etc.) + **kwargs: Filter-specific parameters + + Returns: + EditResult with output path + """ + if not self._check_imagemagick(): + return EditResult(success=False, error="ImageMagick not found") + + output_path = self._ensure_output_path(input_path, output_path) + cmd = [self.magick_path, input_path] + + # Map filter names to ImageMagick operations + filter_ops = { + "grayscale": ["-colorspace", "Gray"], + "sepia": ["-sepia-tone", str(kwargs.get("intensity", 80)) + "%"], + "blur": ["-blur", f"0x{kwargs.get('radius', 3)}"], + "sharpen": ["-sharpen", f"0x{kwargs.get('radius', 1)}"], + "negate": ["-negate"], + "normalize": ["-normalize"], + "equalize": ["-equalize"], + "brightness": [ + "-modulate", + f"{kwargs.get('brightness', 100)},{kwargs.get('saturation', 100)}", + ], + "contrast": ["-contrast-stretch", f"{kwargs.get('black', 0)}x{kwargs.get('white', 0)}%"], + } + + if filter_name in filter_ops: + cmd.extend(filter_ops[filter_name]) + else: + return EditResult( + success=False, + error=f"Unknown filter: {filter_name}. Available: {list(filter_ops.keys())}", + ) + + cmd.append(output_path) + + returncode, stdout, stderr = await self._run_command(cmd) + + return EditResult( + success=returncode == 0, + output_path=output_path if returncode == 0 else None, + error=stderr if returncode != 0 else None, + command=" ".join(cmd), + ) + + async def composite_images( + self, + background_path: str, + overlay_path: str, + output_path: str | None = None, + x: int = 0, + y: int = 0, + gravity: str = "NorthWest", + opacity: float = 1.0, + ) -> EditResult: + """Composite two images (overlay one on another). + + Args: + background_path: Path to background image + overlay_path: Path to overlay image + output_path: Path for output + x: X offset + y: Y offset + gravity: Placement gravity + opacity: Overlay opacity (0.0-1.0) + + Returns: + EditResult with output path + """ + if not self._check_imagemagick(): + return EditResult(success=False, error="ImageMagick not found") + + output_path = self._ensure_output_path(background_path, output_path, "_composite.png") + + # Build composite command + cmd = [ + self.magick_path, + background_path, + "(", + overlay_path, + "-alpha", + "set", + "-channel", + "A", + "-evaluate", + "multiply", + str(opacity), + "+channel", + ")", + "-gravity", + gravity, + "-geometry", + f"+{x}+{y}", + "-composite", + output_path, + ] + + returncode, stdout, stderr = await self._run_command(cmd) + + return EditResult( + success=returncode == 0, + output_path=output_path if returncode == 0 else None, + error=stderr if returncode != 0 else None, + command=" ".join(cmd), + ) + + # ------------------------------------------------------------------------- + # Video Operations (FFmpeg) + # ------------------------------------------------------------------------- + + async def trim_video( + self, + input_path: str, + output_path: str | None = None, + start: str | float | None = None, + end: str | float | None = None, + duration: float | None = None, + copy_codec: bool = True, + ) -> EditResult: + """Trim a video to a specific segment. + + Args: + input_path: Path to input video + output_path: Path for output + start: Start time (e.g., "00:00:30" or 30.0) + end: End time (e.g., "00:01:00" or 60.0) + duration: Duration in seconds (alternative to end) + copy_codec: Copy codecs without re-encoding (fast but less precise) + + Returns: + EditResult with output path + """ + if not self._check_ffmpeg(): + return EditResult( + success=False, + error="FFmpeg not found. Install with: apt install ffmpeg", + ) + + output_path = self._ensure_output_path(input_path, output_path, "_trimmed.mp4") + cmd = [self.ffmpeg_path, "-y"] + + if start: + cmd.extend(["-ss", str(start)]) + + cmd.extend(["-i", input_path]) + + if end: + cmd.extend(["-to", str(end)]) + elif duration: + cmd.extend(["-t", str(duration)]) + + if copy_codec: + cmd.extend(["-c", "copy"]) + + cmd.append(output_path) + + returncode, stdout, stderr = await self._run_command(cmd) + + return EditResult( + success=returncode == 0, + output_path=output_path if returncode == 0 else None, + error=stderr if returncode != 0 else None, + command=" ".join(cmd), + stderr=stderr, + ) + + async def resize_video( + self, + input_path: str, + output_path: str | None = None, + width: int | None = None, + height: int | None = None, + scale: str | None = None, + preset: str | None = None, + ) -> EditResult: + """Resize a video. + + Args: + input_path: Path to input video + output_path: Path for output + width: Target width (use -1 for auto based on height) + height: Target height (use -1 for auto based on width) + scale: FFmpeg scale filter (e.g., "1280:720", "iw/2:ih/2") + preset: Preset resolution ("480p", "720p", "1080p", "4k") + + Returns: + EditResult with output path + """ + if not self._check_ffmpeg(): + return EditResult(success=False, error="FFmpeg not found") + + output_path = self._ensure_output_path(input_path, output_path, "_resized.mp4") + + # Handle presets + presets = { + "480p": "854:480", + "720p": "1280:720", + "1080p": "1920:1080", + "4k": "3840:2160", + } + + if preset: + scale = presets.get(preset, scale) + elif width and height: + scale = f"{width}:{height}" + elif width: + scale = f"{width}:-2" + elif height: + scale = f"-2:{height}" + + if not scale: + return EditResult(success=False, error="Must specify width, height, scale, or preset") + + cmd = [ + self.ffmpeg_path, + "-y", + "-i", + input_path, + "-vf", + f"scale={scale}", + "-c:a", + "copy", + output_path, + ] + + returncode, stdout, stderr = await self._run_command(cmd) + + return EditResult( + success=returncode == 0, + output_path=output_path if returncode == 0 else None, + error=stderr if returncode != 0 else None, + command=" ".join(cmd), + ) + + async def extract_audio( + self, + input_path: str, + output_path: str | None = None, + format: str = "mp3", + bitrate: str = "192k", + ) -> EditResult: + """Extract audio track from video. + + Args: + input_path: Path to input video + output_path: Path for output audio + format: Output format (mp3, wav, aac, etc.) + bitrate: Audio bitrate + + Returns: + EditResult with output path + """ + if not self._check_ffmpeg(): + return EditResult(success=False, error="FFmpeg not found") + + output_path = self._ensure_output_path(input_path, output_path, f".{format}") + + cmd = [ + self.ffmpeg_path, + "-y", + "-i", + input_path, + "-vn", + "-acodec", + "libmp3lame" if format == "mp3" else "copy", + "-ab", + bitrate, + output_path, + ] + + returncode, stdout, stderr = await self._run_command(cmd) + + return EditResult( + success=returncode == 0, + output_path=output_path if returncode == 0 else None, + error=stderr if returncode != 0 else None, + command=" ".join(cmd), + ) + + async def add_audio( + self, + video_path: str, + audio_path: str, + output_path: str | None = None, + replace: bool = True, + volume: float = 1.0, + ) -> EditResult: + """Add or replace audio in a video. + + Args: + video_path: Path to input video + audio_path: Path to audio file + output_path: Path for output + replace: Replace existing audio (True) or mix (False) + volume: Audio volume multiplier + + Returns: + EditResult with output path + """ + if not self._check_ffmpeg(): + return EditResult(success=False, error="FFmpeg not found") + + output_path = self._ensure_output_path(video_path, output_path, "_audio.mp4") + + if replace: + cmd = [ + self.ffmpeg_path, + "-y", + "-i", + video_path, + "-i", + audio_path, + "-c:v", + "copy", + "-map", + "0:v:0", + "-map", + "1:a:0", + "-shortest", + output_path, + ] + else: + # Mix audio + cmd = [ + self.ffmpeg_path, + "-y", + "-i", + video_path, + "-i", + audio_path, + "-c:v", + "copy", + "-filter_complex", + f"[0:a][1:a]amerge=inputs=2,volume={volume}[a]", + "-map", + "0:v", + "-map", + "[a]", + "-shortest", + output_path, + ] + + returncode, stdout, stderr = await self._run_command(cmd) + + return EditResult( + success=returncode == 0, + output_path=output_path if returncode == 0 else None, + error=stderr if returncode != 0 else None, + command=" ".join(cmd), + ) + + async def extract_frames( + self, + input_path: str, + output_dir: str | None = None, + fps: float = 1.0, + format: str = "jpg", + quality: int = 2, + ) -> EditResult: + """Extract frames from video as images. + + Args: + input_path: Path to input video + output_dir: Directory for output frames + fps: Frames per second to extract + format: Output image format + quality: JPEG quality (2-31, lower is better) + + Returns: + EditResult with output directory + """ + if not self._check_ffmpeg(): + return EditResult(success=False, error="FFmpeg not found") + + if output_dir is None: + output_dir = tempfile.mkdtemp(prefix="frames_") + else: + os.makedirs(output_dir, exist_ok=True) + + output_pattern = os.path.join(output_dir, f"frame_%04d.{format}") + + cmd = [ + self.ffmpeg_path, + "-y", + "-i", + input_path, + "-vf", + f"fps={fps}", + "-q:v", + str(quality), + output_pattern, + ] + + returncode, stdout, stderr = await self._run_command(cmd) + + return EditResult( + success=returncode == 0, + output_path=output_dir if returncode == 0 else None, + error=stderr if returncode != 0 else None, + command=" ".join(cmd), + metadata={"pattern": output_pattern}, + ) + + async def create_thumbnail( + self, + input_path: str, + output_path: str | None = None, + time: str | float = "00:00:01", + width: int = 320, + height: int | None = None, + ) -> EditResult: + """Create a thumbnail from video. + + Args: + input_path: Path to input video + output_path: Path for output image + time: Time position to capture + width: Thumbnail width + height: Thumbnail height (auto if not specified) + + Returns: + EditResult with output path + """ + if not self._check_ffmpeg(): + return EditResult(success=False, error="FFmpeg not found") + + output_path = self._ensure_output_path(input_path, output_path, "_thumb.jpg") + + scale = f"{width}:-1" if height is None else f"{width}:{height}" + + cmd = [ + self.ffmpeg_path, + "-y", + "-ss", + str(time), + "-i", + input_path, + "-vframes", + "1", + "-vf", + f"scale={scale}", + output_path, + ] + + returncode, stdout, stderr = await self._run_command(cmd) + + return EditResult( + success=returncode == 0, + output_path=output_path if returncode == 0 else None, + error=stderr if returncode != 0 else None, + command=" ".join(cmd), + ) + + # ------------------------------------------------------------------------- + # Format Conversion + # ------------------------------------------------------------------------- + + async def convert( + self, + input_path: str, + output_path: str | None = None, + format: MediaFormat | str | None = None, + quality: int = 85, + **kwargs: Any, + ) -> EditResult: + """Convert media to different format. + + Args: + input_path: Path to input file + output_path: Path for output (format inferred from extension) + format: Target format + quality: Quality setting (for lossy formats) + **kwargs: Additional format-specific options + + Returns: + EditResult with output path + """ + if format is None and output_path: + format = Path(output_path).suffix.lstrip(".") + + if format is None: + return EditResult(success=False, error="Must specify format or output_path with extension") + + if isinstance(format, MediaFormat): + format = format.value + + # Determine if it's an image or video format + image_formats = {"jpeg", "jpg", "png", "webp", "gif", "avif", "tiff"} + video_formats = {"mp4", "webm", "mov", "avi", "mkv"} + audio_formats = {"mp3", "wav", "aac", "ogg", "flac"} + + if format in image_formats: + return await self._convert_image(input_path, output_path, format, quality) + elif format in video_formats: + return await self._convert_video(input_path, output_path, format, **kwargs) + elif format in audio_formats: + return await self._convert_audio(input_path, output_path, format, **kwargs) + else: + return EditResult(success=False, error=f"Unsupported format: {format}") + + async def _convert_image( + self, + input_path: str, + output_path: str | None, + format: str, + quality: int, + ) -> EditResult: + """Convert image format using ImageMagick.""" + if not self._check_imagemagick(): + return EditResult(success=False, error="ImageMagick not found") + + output_path = self._ensure_output_path(input_path, output_path, f".{format}") + + cmd = [ + self.magick_path, + input_path, + "-quality", + str(quality), + output_path, + ] + + returncode, stdout, stderr = await self._run_command(cmd) + + return EditResult( + success=returncode == 0, + output_path=output_path if returncode == 0 else None, + error=stderr if returncode != 0 else None, + command=" ".join(cmd), + ) + + async def _convert_video( + self, + input_path: str, + output_path: str | None, + format: str, + **kwargs: Any, + ) -> EditResult: + """Convert video format using FFmpeg.""" + if not self._check_ffmpeg(): + return EditResult(success=False, error="FFmpeg not found") + + output_path = self._ensure_output_path(input_path, output_path, f".{format}") + + cmd = [self.ffmpeg_path, "-y", "-i", input_path] + + # Add codec options based on format + if format == "webm": + cmd.extend(["-c:v", "libvpx-vp9", "-c:a", "libopus"]) + elif format == "mp4": + cmd.extend(["-c:v", "libx264", "-c:a", "aac"]) + + # Add any additional options + crf = kwargs.get("crf", 23) + cmd.extend(["-crf", str(crf)]) + + cmd.append(output_path) + + returncode, stdout, stderr = await self._run_command(cmd) + + return EditResult( + success=returncode == 0, + output_path=output_path if returncode == 0 else None, + error=stderr if returncode != 0 else None, + command=" ".join(cmd), + ) + + async def _convert_audio( + self, + input_path: str, + output_path: str | None, + format: str, + **kwargs: Any, + ) -> EditResult: + """Convert audio format using FFmpeg.""" + if not self._check_ffmpeg(): + return EditResult(success=False, error="FFmpeg not found") + + output_path = self._ensure_output_path(input_path, output_path, f".{format}") + + cmd = [self.ffmpeg_path, "-y", "-i", input_path] + + bitrate = kwargs.get("bitrate", "192k") + cmd.extend(["-ab", bitrate]) + + cmd.append(output_path) + + returncode, stdout, stderr = await self._run_command(cmd) + + return EditResult( + success=returncode == 0, + output_path=output_path if returncode == 0 else None, + error=stderr if returncode != 0 else None, + command=" ".join(cmd), + ) + + # ------------------------------------------------------------------------- + # Media Information + # ------------------------------------------------------------------------- + + async def get_info(self, input_path: str) -> MediaInfo: + """Get information about a media file. + + Args: + input_path: Path to media file + + Returns: + MediaInfo with file details + """ + if not self._check_ffmpeg(): + # Fallback to basic info + path = Path(input_path) + return MediaInfo( + path=input_path, + format=path.suffix.lstrip("."), + size_bytes=path.stat().st_size if path.exists() else None, + ) + + cmd = [ + self.ffmpeg_path, + "-i", + input_path, + "-hide_banner", + ] + + returncode, stdout, stderr = await self._run_command(cmd) + + # FFmpeg outputs info to stderr + info = MediaInfo(path=input_path) + + # Parse format + path = Path(input_path) + info.format = path.suffix.lstrip(".") + if path.exists(): + info.size_bytes = path.stat().st_size + + # Parse dimensions (from "Stream #0:0: Video: ... 1920x1080") + import re + + dimension_match = re.search(r"(\d{2,5})x(\d{2,5})", stderr) + if dimension_match: + info.width = int(dimension_match.group(1)) + info.height = int(dimension_match.group(2)) + + # Parse duration + duration_match = re.search(r"Duration: (\d+):(\d+):(\d+)\.(\d+)", stderr) + if duration_match: + h, m, s, ms = duration_match.groups() + info.duration = int(h) * 3600 + int(m) * 60 + int(s) + int(ms) / 100 + + # Parse FPS + fps_match = re.search(r"(\d+(?:\.\d+)?)\s*fps", stderr) + if fps_match: + info.fps = float(fps_match.group(1)) + + return info + + +# Convenience functions for synchronous usage +def resize_image_sync(input_path: str, **kwargs: Any) -> EditResult: + """Synchronous wrapper for image resize.""" + service = MediaEditService() + return asyncio.run(service.resize_image(input_path, **kwargs)) + + +def trim_video_sync(input_path: str, **kwargs: Any) -> EditResult: + """Synchronous wrapper for video trim.""" + service = MediaEditService() + return asyncio.run(service.trim_video(input_path, **kwargs)) + + +def convert_sync(input_path: str, **kwargs: Any) -> EditResult: + """Synchronous wrapper for format conversion.""" + service = MediaEditService() + return asyncio.run(service.convert(input_path, **kwargs)) diff --git a/src/kurt/tools/fetch/tests/test_fetch_tool.py b/src/kurt/tools/fetch/tests/test_fetch_tool.py index 7e6971eb..c64f2b93 100644 --- a/src/kurt/tools/fetch/tests/test_fetch_tool.py +++ b/src/kurt/tools/fetch/tests/test_fetch_tool.py @@ -855,6 +855,15 @@ async def test_real_fetch(tmp_sqlmodel_project): result = await tool.run(params, tool_context) + if not result.success: + error_msg = result.data[0].get("error", "") if result.data else "" + if ( + "CERTIFICATE_VERIFY_FAILED" in error_msg + or "No content from: https://example.com" in error_msg + or error_msg == "connection_error" + ): + pytest.skip(f"real fetch unavailable in this environment: {error_msg}") + assert result.success is True assert len(result.data) == 1 assert result.data[0]["status"] == "SUCCESS" diff --git a/src/kurt/web/api/server.py b/src/kurt/web/api/server.py index cc75d1e5..cb5ff712 100644 --- a/src/kurt/web/api/server.py +++ b/src/kurt/web/api/server.py @@ -76,6 +76,53 @@ app.mount("/assets", StaticFiles(directory=str(assets_dir)), name="assets") +@app.get("/api/project") +def api_project(): + return {"root": str(Path.cwd().resolve())} + + +@app.get("/api/config") +def api_config(): + """Get Kurt project configuration for frontend sections.""" + try: + from kurt.config import config_file_exists, load_config + + if not config_file_exists(): + return { + "available": False, + "paths": { + "sources": "sources", + "projects": "projects", + "workflows": "workflows", + "rules": "rules", + "kurt": ".kurt", + }, + } + + config = load_config() + return { + "available": True, + "paths": { + "sources": config.PATH_SOURCES, + "projects": config.PATH_PROJECTS, + "workflows": config.PATH_WORKFLOWS, + "rules": config.PATH_RULES, + "kurt": str(Path(config.PATH_DB).parent), + }, + } + except Exception: + return { + "available": False, + "paths": { + "sources": "sources", + "projects": "projects", + "workflows": "workflows", + "rules": "rules", + "kurt": ".kurt", + }, + } + + # --- SPA catch-all route for production --- # This must be registered LAST to not interfere with API routes if CLIENT_DIST.exists() and (CLIENT_DIST / "index.html").exists(): diff --git a/src/kurt/web/client/package-lock.json b/src/kurt/web/client/package-lock.json index c004262d..1d2d1f80 100644 --- a/src/kurt/web/client/package-lock.json +++ b/src/kurt/web/client/package-lock.json @@ -14,6 +14,7 @@ "@radix-ui/react-dialog": "^1.1.15", "@radix-ui/react-slot": "^1.2.4", "@radix-ui/react-tooltip": "^1.2.8", + "@remotion/player": "^4.0.250", "@tiptap/core": "^3.15.2", "@tiptap/extension-code-block-lowlight": "^3.17.0", "@tiptap/extension-highlight": "^3.15.2", @@ -36,6 +37,7 @@ "clsx": "^2.1.1", "diff": "^8.0.2", "dockview-react": "^4.13.1", + "konva": "^9.3.18", "lowlight": "^3.3.0", "lucide-react": "^0.562.0", "markdown-it": "^14.0.0", @@ -43,6 +45,7 @@ "react": "^18.2.0", "react-diff-view": "^3.2.0", "react-dom": "^18.2.0", + "react-konva": "^18.2.14", "react-simple-code-editor": "^0.14.1", "remark-gfm": "^4.0.1", "tailwind-merge": "^3.4.0", @@ -245,7 +248,6 @@ "integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/generator": "^7.28.5", @@ -601,7 +603,6 @@ } ], "license": "MIT", - "peer": true, "engines": { "node": ">=18" }, @@ -625,7 +626,6 @@ } ], "license": "MIT", - "peer": true, "engines": { "node": ">=18" } @@ -1192,7 +1192,6 @@ "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.7.4.tgz", "integrity": "sha512-OOchDgh4F2CchOX94cRVqhvy7b3AFb+/rQXyswmzmGakRfkMgoWVjfnLWkRirfLEfuD4ysVW16eXzwt3jHIzKA==", "license": "MIT", - "peer": true, "dependencies": { "@floating-ui/core": "^1.7.3", "@floating-ui/utils": "^0.2.10" @@ -2395,6 +2394,19 @@ "integrity": "sha512-42aWfPrimMfDKDi4YegyS7x+/0tlzaqwPQCULLanv3DMIlu96KTJR0fM5isWX2UViOqlGnX6YFgqWepcX+XMNg==", "license": "MIT" }, + "node_modules/@remotion/player": { + "version": "4.0.443", + "resolved": "https://registry.npmjs.org/@remotion/player/-/player-4.0.443.tgz", + "integrity": "sha512-R7dftiGaIHXMXG7HwHjuvpV63NgLlPRzDtM/RrMWSYj3yLZdqDhjCFvbNIhPoydekCj1D1yVyBt0Uk8pdeMGiA==", + "license": "SEE LICENSE IN LICENSE.md", + "dependencies": { + "remotion": "4.0.443" + }, + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, "node_modules/@rolldown/pluginutils": { "version": "1.0.0-beta.27", "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.27.tgz", @@ -2988,6 +3000,66 @@ "node": ">=14.0.0" } }, + "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/core": { + "version": "1.7.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/wasi-threads": "1.1.0", + "tslib": "^2.4.0" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/runtime": { + "version": "1.7.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/wasi-threads": { + "version": "1.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@napi-rs/wasm-runtime": { + "version": "1.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/core": "^1.7.1", + "@emnapi/runtime": "^1.7.1", + "@tybys/wasm-util": "^0.10.1" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@tybys/wasm-util": { + "version": "0.10.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/tslib": { + "version": "2.8.1", + "dev": true, + "inBundle": true, + "license": "0BSD", + "optional": true + }, "node_modules/@tailwindcss/oxide-win32-arm64-msvc": { "version": "4.1.18", "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.18.tgz", @@ -3153,7 +3225,6 @@ "resolved": "https://registry.npmjs.org/@tiptap/core/-/core-3.17.0.tgz", "integrity": "sha512-jpGwcSdr0WRmLRmQWAYo6DlR2lIoZ7XYq8/slwJvC/4GUbafVzYiyGlJLRxhh/9LYTIz5FUavThFKd4y6OtOQw==", "license": "MIT", - "peer": true, "funding": { "type": "github", "url": "https://github.com/sponsors/ueberdosis" @@ -3237,7 +3308,6 @@ "resolved": "https://registry.npmjs.org/@tiptap/extension-code-block/-/extension-code-block-3.17.0.tgz", "integrity": "sha512-yEfwV8l4FFswglut8T7/2bVbERNEHKB9gHvpSF1Vm+R/opFNX61WFHg/2tupO0s+s8bRIzhzxYdBqtj4Bv27+g==", "license": "MIT", - "peer": true, "funding": { "type": "github", "url": "https://github.com/sponsors/ueberdosis" @@ -3377,7 +3447,6 @@ "resolved": "https://registry.npmjs.org/@tiptap/extension-image/-/extension-image-3.15.3.tgz", "integrity": "sha512-Tjq9BHlC/0bGR9/uySA0tv6I1Ua1Q5t5P/mdbWyZi4JdUpKHRfgenzfXF5DYnklJ01QJ7uOPSp9sAGgPzBixtQ==", "license": "MIT", - "peer": true, "funding": { "type": "github", "url": "https://github.com/sponsors/ueberdosis" @@ -3421,7 +3490,6 @@ "resolved": "https://registry.npmjs.org/@tiptap/extension-list/-/extension-list-3.15.2.tgz", "integrity": "sha512-QP0Y1fVOUwAVKdhAwOc66iihBqEnaEEcRCX877dk9yDu3QkdjwCrAPMldvsK62O9pK02qyP/SAFrpCp69asHIg==", "license": "MIT", - "peer": true, "funding": { "type": "github", "url": "https://github.com/sponsors/ueberdosis" @@ -3514,7 +3582,6 @@ "resolved": "https://registry.npmjs.org/@tiptap/extension-table/-/extension-table-3.16.0.tgz", "integrity": "sha512-m7h7YdffWxI0lglKUfR+39UD9psOprn/E4qYzjxOSXl1rg8DnP6zi8LF+5X+v32my9WBbizXxVBIdy8AuDWxAw==", "license": "MIT", - "peer": true, "funding": { "type": "github", "url": "https://github.com/sponsors/ueberdosis" @@ -3633,7 +3700,6 @@ "resolved": "https://registry.npmjs.org/@tiptap/extensions/-/extensions-3.15.2.tgz", "integrity": "sha512-BYwAuuKZ0TGoypqGj8RSOu9qxqiUaaoDIPNebfio4kcieBvGmk80i48QCfLvX5QnzjsD/SjduW0NX2Gs+50+7A==", "license": "MIT", - "peer": true, "funding": { "type": "github", "url": "https://github.com/sponsors/ueberdosis" @@ -3665,7 +3731,6 @@ "resolved": "https://registry.npmjs.org/@tiptap/pm/-/pm-3.17.0.tgz", "integrity": "sha512-zb3FNjwMIwpQtPD6dkQvKIlVqhL0TsVCmmJsFOJZaJCmBrzvGq7M+p0GAK+zT+ZO6youLZlPyyF7t/N6T0dxrA==", "license": "MIT", - "peer": true, "dependencies": { "prosemirror-changeset": "^2.3.0", "prosemirror-collab": "^1.3.1", @@ -3900,7 +3965,6 @@ "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.27.tgz", "integrity": "sha512-cisd7gxkzjBKU2GgdYrTdtQx1SORymWyaAFhaxQPK9bYO9ot3Y5OikQRvY0VYQtvwjeQnizCINJAenh/V7MK2w==", "license": "MIT", - "peer": true, "dependencies": { "@types/prop-types": "*", "csstype": "^3.2.2" @@ -3911,11 +3975,19 @@ "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.7.tgz", "integrity": "sha512-MEe3UeoENYVFXzoXEWsvcpg6ZvlrFNlOQ7EOsvhI3CfAXwzPfO8Qwuxd40nepsYKqyyVQnTdEfv68q91yLcKrQ==", "license": "MIT", - "peer": true, "peerDependencies": { "@types/react": "^18.0.0" } }, + "node_modules/@types/react-reconciler": { + "version": "0.28.9", + "resolved": "https://registry.npmjs.org/@types/react-reconciler/-/react-reconciler-0.28.9.tgz", + "integrity": "sha512-HHM3nxyUZ3zAylX8ZEyrDNd2XZOnQ0D5XfunJF5FLQnZbHHYq4UWvW1QfelQNXv1ICNkwYhfxjwfnqivYB6bFg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*" + } + }, "node_modules/@types/unist": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", @@ -4111,7 +4183,6 @@ "integrity": "sha512-xa57bCPGuzEFqGjPs3vVLyqareG8DX0uMkr5U/v5vLv5/ZUrBrPL7gzxzTJedEyZxFMfsozwTIbbYfEQVo3kgg==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@vitest/utils": "1.6.1", "fast-glob": "^3.3.2", @@ -4185,7 +4256,6 @@ "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", "dev": true, "license": "MIT", - "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -4587,7 +4657,6 @@ } ], "license": "MIT", - "peer": true, "dependencies": { "baseline-browser-mapping": "^2.9.0", "caniuse-lite": "^1.0.30001759", @@ -5565,7 +5634,6 @@ "integrity": "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@eslint-community/eslint-utils": "^4.8.0", "@eslint-community/regexpp": "^4.12.1", @@ -6410,7 +6478,6 @@ "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-11.11.1.tgz", "integrity": "sha512-Xwwo44whKBVCYoliBQwaPvtd/2tYFkRQtXDWj1nackaV2JPXx3L0+Jvd8/qCJ2p+ML0/XVkJ2q+Mr+UVdpJK5w==", "license": "BSD-3-Clause", - "peer": true, "engines": { "node": ">=12.0.0" } @@ -7154,6 +7221,18 @@ "node": ">= 0.4" } }, + "node_modules/its-fine": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/its-fine/-/its-fine-1.2.5.tgz", + "integrity": "sha512-fXtDA0X0t0eBYAGLVM5YsgJGsJ5jEmqZEPrGbzdf5awjv0xE7nqv3TVnvtUF060Tkes15DbDAKW/I48vsb6SyA==", + "license": "MIT", + "dependencies": { + "@types/react-reconciler": "^0.28.0" + }, + "peerDependencies": { + "react": ">=18.0" + } + }, "node_modules/jiti": { "version": "2.6.1", "resolved": "https://registry.npmjs.org/jiti/-/jiti-2.6.1.tgz", @@ -7189,7 +7268,6 @@ "integrity": "sha512-MyL55p3Ut3cXbeBEG7Hcv0mVM8pp8PBNWxRqchZnSfAiES1v1mRnMeFfaHWIPULpwsYfvO+ZmMZz5tGCnjzDUQ==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "cssstyle": "^4.0.1", "data-urls": "^5.0.0", @@ -7298,6 +7376,26 @@ "json-buffer": "3.0.1" } }, + "node_modules/konva": { + "version": "9.3.22", + "resolved": "https://registry.npmjs.org/konva/-/konva-9.3.22.tgz", + "integrity": "sha512-yQI5d1bmELlD/fowuyfOp9ff+oamg26WOCkyqUyc+nczD/lhRa3EvD2MZOoc4c1293TAubW9n34fSQLgSeEgSw==", + "funding": [ + { + "type": "patreon", + "url": "https://www.patreon.com/lavrton" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/konva" + }, + { + "type": "github", + "url": "https://github.com/sponsors/lavrton" + } + ], + "license": "MIT" + }, "node_modules/levn": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", @@ -7671,7 +7769,6 @@ "resolved": "https://registry.npmjs.org/lowlight/-/lowlight-3.3.0.tgz", "integrity": "sha512-0JNhgFoPvP6U6lE/UdVsSq99tn6DhjjpAj5MxG49ewd2mOBVtwWYIT8ClyABhq198aXXODMU6Ox8DrGy/CpTZQ==", "license": "MIT", - "peer": true, "dependencies": { "@types/hast": "^3.0.0", "devlop": "^1.0.0", @@ -9470,7 +9567,6 @@ "resolved": "https://registry.npmjs.org/prosemirror-model/-/prosemirror-model-1.25.4.tgz", "integrity": "sha512-PIM7E43PBxKce8OQeezAs9j4TP+5yDpZVbuurd1h5phUxEKIu+G2a+EUZzIC5nS1mJktDJWzbqS23n1tsAf5QA==", "license": "MIT", - "peer": true, "dependencies": { "orderedmap": "^2.0.0" } @@ -9500,7 +9596,6 @@ "resolved": "https://registry.npmjs.org/prosemirror-state/-/prosemirror-state-1.4.4.tgz", "integrity": "sha512-6jiYHH2CIGbCfnxdHbXZ12gySFY/fz/ulZE333G6bPqIZ4F+TXo9ifiR86nAHpWnfoNjOb3o5ESi7J8Uz1jXHw==", "license": "MIT", - "peer": true, "dependencies": { "prosemirror-model": "^1.0.0", "prosemirror-transform": "^1.0.0", @@ -9549,7 +9644,6 @@ "resolved": "https://registry.npmjs.org/prosemirror-view/-/prosemirror-view-1.41.4.tgz", "integrity": "sha512-WkKgnyjNncri03Gjaz3IFWvCAE94XoiEgvtr0/r2Xw7R8/IjK3sKLSiDoCHWcsXSAinVaKlGRZDvMCsF1kbzjA==", "license": "MIT", - "peer": true, "dependencies": { "prosemirror-model": "^1.20.0", "prosemirror-state": "^1.0.0", @@ -9621,7 +9715,6 @@ "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", "license": "MIT", - "peer": true, "dependencies": { "loose-envify": "^1.1.0" }, @@ -9651,7 +9744,6 @@ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", "license": "MIT", - "peer": true, "dependencies": { "loose-envify": "^1.1.0", "scheduler": "^0.23.2" @@ -9667,6 +9759,37 @@ "dev": true, "license": "MIT" }, + "node_modules/react-konva": { + "version": "18.2.14", + "resolved": "https://registry.npmjs.org/react-konva/-/react-konva-18.2.14.tgz", + "integrity": "sha512-lBDe/5fTgquMdg1AHI0B16YZdAOvEhWMBWuo12ioyY0icdxcz9Cf12j86fsCJCHdnvjUOlZeC0f5q+siyHbD4Q==", + "funding": [ + { + "type": "patreon", + "url": "https://www.patreon.com/lavrton" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/konva" + }, + { + "type": "github", + "url": "https://github.com/sponsors/lavrton" + } + ], + "license": "MIT", + "dependencies": { + "@types/react-reconciler": "^0.28.2", + "its-fine": "^1.1.1", + "react-reconciler": "~0.29.0", + "scheduler": "^0.23.0" + }, + "peerDependencies": { + "konva": "^8.0.1 || ^7.2.5 || ^9.0.0 || ^10.0.0", + "react": ">=18.0.0", + "react-dom": ">=18.0.0" + } + }, "node_modules/react-markdown": { "version": "10.1.0", "resolved": "https://registry.npmjs.org/react-markdown/-/react-markdown-10.1.0.tgz", @@ -9694,6 +9817,22 @@ "react": ">=18" } }, + "node_modules/react-reconciler": { + "version": "0.29.2", + "resolved": "https://registry.npmjs.org/react-reconciler/-/react-reconciler-0.29.2.tgz", + "integrity": "sha512-zZQqIiYgDCTP/f1N/mAR10nJGrPD2ZR+jDSEsKWJHYC7Cm2wodlwbR3upZRdC3cjIjSlTLNVyO7Iu0Yy7t2AYg==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.2" + }, + "engines": { + "node": ">=0.10.0" + }, + "peerDependencies": { + "react": "^18.3.1" + } + }, "node_modules/react-refresh": { "version": "0.17.0", "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.17.0.tgz", @@ -9924,6 +10063,16 @@ "url": "https://opencollective.com/unified" } }, + "node_modules/remotion": { + "version": "4.0.443", + "resolved": "https://registry.npmjs.org/remotion/-/remotion-4.0.443.tgz", + "integrity": "sha512-zfqqhy4TN148lT2Rf6Xl6iYLnnCdh4WNpZM+iZ1aTe9vbMNY1Wb3FAcQNN2hnUi2hfPdD8zOpwdt9/KDRZEN9A==", + "license": "SEE LICENSE IN LICENSE.md", + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, "node_modules/requires-port": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", @@ -11253,7 +11402,6 @@ "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "esbuild": "^0.21.3", "postcss": "^8.4.43", @@ -11337,7 +11485,6 @@ "integrity": "sha512-Ljb1cnSJSivGN0LqXd/zmDbWEM0RNNg2t1QW/XUhYl/qPqyu7CsqeWtqQXHVaJsecLPuDoak2oJcZN2QoRIOag==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@vitest/expect": "1.6.1", "@vitest/runner": "1.6.1", @@ -11657,8 +11804,7 @@ "resolved": "https://registry.npmjs.org/xterm/-/xterm-5.3.0.tgz", "integrity": "sha512-8QqjlekLUFTrU6x7xck1MsPzPA571K5zNqWm0M0oroYEWVOptZ0+ubQSkQ3uxIEhcIHRujJy6emDWX4A7qyFzg==", "deprecated": "This package is now deprecated. Move to @xterm/xterm instead.", - "license": "MIT", - "peer": true + "license": "MIT" }, "node_modules/xterm-addon-fit": { "version": "0.8.0", @@ -11695,7 +11841,6 @@ "resolved": "https://registry.npmjs.org/zod/-/zod-4.3.5.tgz", "integrity": "sha512-k7Nwx6vuWx1IJ9Bjuf4Zt1PEllcwe7cls3VNzm4CQ1/hgtFUK2bRNG3rvnpPUhFjmqJKAKtjV576KnUkHocg/g==", "license": "MIT", - "peer": true, "funding": { "url": "https://github.com/sponsors/colinhacks" } diff --git a/src/kurt/web/client/package.json b/src/kurt/web/client/package.json index 70168627..720cae6e 100644 --- a/src/kurt/web/client/package.json +++ b/src/kurt/web/client/package.json @@ -20,6 +20,7 @@ "@radix-ui/react-dialog": "^1.1.15", "@radix-ui/react-slot": "^1.2.4", "@radix-ui/react-tooltip": "^1.2.8", + "@remotion/player": "^4.0.250", "@tiptap/core": "^3.15.2", "@tiptap/extension-code-block-lowlight": "^3.17.0", "@tiptap/extension-highlight": "^3.15.2", @@ -42,6 +43,7 @@ "clsx": "^2.1.1", "diff": "^8.0.2", "dockview-react": "^4.13.1", + "konva": "^9.3.18", "lowlight": "^3.3.0", "lucide-react": "^0.562.0", "markdown-it": "^14.0.0", @@ -49,6 +51,7 @@ "react": "^18.2.0", "react-diff-view": "^3.2.0", "react-dom": "^18.2.0", + "react-konva": "^18.2.14", "react-simple-code-editor": "^0.14.1", "remark-gfm": "^4.0.1", "tailwind-merge": "^3.4.0", diff --git a/src/kurt/web/client/src/App.jsx b/src/kurt/web/client/src/App.jsx index 48527715..f8e008da 100644 --- a/src/kurt/web/client/src/App.jsx +++ b/src/kurt/web/client/src/App.jsx @@ -14,6 +14,8 @@ import EmptyPanel from './panels/EmptyPanel' import ReviewPanel from './panels/ReviewPanel' import WorkflowsPanel from './panels/WorkflowsPanel' import WorkflowTerminalPanel from './panels/WorkflowTerminalPanel' +import ImageEditorPanel from './panels/ImageEditorPanel' +import VideoEditorPanel from './panels/VideoEditorPanel' import WorkflowDetailPanel from './panels/WorkflowDetailPanel' import ClaudeStreamChat from './components/chat/ClaudeStreamChat' @@ -32,6 +34,8 @@ const components = { review: ReviewPanel, workflows: WorkflowsPanel, workflowTerminal: WorkflowTerminalPanel, + imageEditor: ImageEditorPanel, + videoEditor: VideoEditorPanel, workflowDetail: WorkflowDetailPanel, } diff --git a/src/kurt/web/client/src/components/FileTree.jsx b/src/kurt/web/client/src/components/FileTree.jsx index e9d0c63a..881e9e8b 100644 --- a/src/kurt/web/client/src/components/FileTree.jsx +++ b/src/kurt/web/client/src/components/FileTree.jsx @@ -19,7 +19,20 @@ const formatSectionLabel = (path) => { return name.charAt(0).toUpperCase() + name.slice(1) } -export default function FileTree({ onOpen, onOpenToSide, onFileDeleted, onFileRenamed, onFileMoved, projectRoot, activeFile, creatingFile, onFileCreated, onCancelCreate }) { +// Media file type detection +const IMAGE_EXTENSIONS = ['jpg', 'jpeg', 'png', 'gif', 'webp', 'avif', 'tiff', 'tif', 'bmp', 'svg'] +const VIDEO_EXTENSIONS = ['mp4', 'webm', 'mov', 'avi', 'mkv', 'm4v', 'ogv'] + +const getFileExtension = (path) => { + const parts = path.split('.') + return parts.length > 1 ? parts.pop().toLowerCase() : '' +} + +const isImageFile = (path) => IMAGE_EXTENSIONS.includes(getFileExtension(path)) +const isVideoFile = (path) => VIDEO_EXTENSIONS.includes(getFileExtension(path)) +const isMediaFile = (path) => isImageFile(path) || isVideoFile(path) + +export default function FileTree({ onOpen, onOpenToSide, onOpenImageEditor, onOpenVideoEditor, onOpenMediaFile, onFileDeleted, onFileRenamed, onFileMoved, projectRoot, activeFile, creatingFile, onFileCreated, onCancelCreate }) { const [entries, setEntries] = useState([]) const [expandedDirs, setExpandedDirs] = useState({}) const [searchQuery, setSearchQuery] = useState('') @@ -212,7 +225,12 @@ export default function FileTree({ onOpen, onOpenToSide, onFileDeleted, onFileRe })) } } else { - onOpen(entry.path) + // Route media files to their respective editors + if (onOpenMediaFile && isMediaFile(entry.path)) { + onOpenMediaFile(entry.path) + } else { + onOpen(entry.path) + } } } @@ -865,6 +883,16 @@ export default function FileTree({ onOpen, onOpenToSide, onFileDeleted, onFileRe
{ onOpenToSide?.(contextMenu.entry.path); setContextMenu(null) }}> Open to the Side
+ {isImageFile(contextMenu.entry.path) && onOpenImageEditor && ( +
{ onOpenImageEditor(contextMenu.entry.path); setContextMenu(null) }}> + Open in Image Editor +
+ )} + {isVideoFile(contextMenu.entry.path) && onOpenVideoEditor && ( +
{ onOpenVideoEditor(contextMenu.entry.path); setContextMenu(null) }}> + Open in Video Editor +
+ )}
)} diff --git a/src/kurt/web/client/src/components/ImageEditor.jsx b/src/kurt/web/client/src/components/ImageEditor.jsx new file mode 100644 index 00000000..05cc1b68 --- /dev/null +++ b/src/kurt/web/client/src/components/ImageEditor.jsx @@ -0,0 +1,812 @@ +import React, { useState, useEffect, useRef, useCallback } from 'react' +import { Stage, Layer, Image as KonvaImage, Rect, Circle, Text, Line, Transformer } from 'react-konva' + +const apiBase = import.meta.env.VITE_API_URL || '' +const apiUrl = (path) => `${apiBase}${path}` + +// Load image from URL or file +const useImage = (src) => { + const [image, setImage] = useState(null) + const [status, setStatus] = useState('loading') + + useEffect(() => { + if (!src) { + setImage(null) + setStatus('idle') + return + } + + setStatus('loading') + const img = new window.Image() + img.crossOrigin = 'anonymous' + + img.onload = () => { + setImage(img) + setStatus('loaded') + } + + img.onerror = () => { + setImage(null) + setStatus('error') + } + + img.src = src + }, [src]) + + return [image, status] +} + +// Shape component that can be selected and transformed +const Shape = ({ shapeProps, isSelected, onSelect, onChange }) => { + const shapeRef = useRef() + const trRef = useRef() + + useEffect(() => { + if (isSelected && trRef.current && shapeRef.current) { + trRef.current.nodes([shapeRef.current]) + trRef.current.getLayer().batchDraw() + } + }, [isSelected]) + + const ShapeComponent = shapeProps.type === 'circle' ? Circle : Rect + + return ( + <> + { + onChange({ + ...shapeProps, + x: e.target.x(), + y: e.target.y(), + }) + }} + onTransformEnd={(e) => { + const node = shapeRef.current + const scaleX = node.scaleX() + const scaleY = node.scaleY() + + node.scaleX(1) + node.scaleY(1) + + onChange({ + ...shapeProps, + x: node.x(), + y: node.y(), + width: Math.max(5, node.width() * scaleX), + height: Math.max(5, node.height() * scaleY), + rotation: node.rotation(), + }) + }} + /> + {isSelected && ( + { + if (newBox.width < 5 || newBox.height < 5) { + return oldBox + } + return newBox + }} + /> + )} + + ) +} + +// Text element component +const TextElement = ({ textProps, isSelected, onSelect, onChange }) => { + const textRef = useRef() + const trRef = useRef() + + useEffect(() => { + if (isSelected && trRef.current && textRef.current) { + trRef.current.nodes([textRef.current]) + trRef.current.getLayer().batchDraw() + } + }, [isSelected]) + + return ( + <> + { + onChange({ + ...textProps, + x: e.target.x(), + y: e.target.y(), + }) + }} + onTransformEnd={(e) => { + const node = textRef.current + onChange({ + ...textProps, + x: node.x(), + y: node.y(), + fontSize: Math.max(8, textProps.fontSize * node.scaleY()), + rotation: node.rotation(), + }) + node.scaleX(1) + node.scaleY(1) + }} + /> + {isSelected && ( + { + newBox.width = Math.max(30, newBox.width) + return newBox + }} + /> + )} + + ) +} + +// History state for undo/redo +const MAX_HISTORY = 50 + +export default function ImageEditor({ + imageSrc, + onSave, + onGenerate, + width = 800, + height = 600, +}) { + const [image, imageStatus] = useImage(imageSrc) + const [shapes, setShapes] = useState([]) + const [texts, setTexts] = useState([]) + const [selectedId, setSelectedId] = useState(null) + const [tool, setTool] = useState('select') // select, rect, circle, text, brush + const [fillColor, setFillColor] = useState('#3b82f6') + const [strokeColor, setStrokeColor] = useState('#1e40af') + const [textContent, setTextContent] = useState('Text') + const [brushSize, setBrushSize] = useState(5) + const [lines, setLines] = useState([]) + const [isDrawing, setIsDrawing] = useState(false) + const [filters, setFilters] = useState({ + brightness: 100, + contrast: 100, + saturate: 100, + blur: 0, + grayscale: 0, + sepia: 0, + }) + const [showFilters, setShowFilters] = useState(false) + const [isGenerating, setIsGenerating] = useState(false) + const [generatePrompt, setGeneratePrompt] = useState('') + const [showGenerateDialog, setShowGenerateDialog] = useState(false) + const stageRef = useRef() + + // Build CSS filter string + const getFilterString = useCallback(() => { + const { brightness, contrast, saturate, blur, grayscale, sepia } = filters + return `brightness(${brightness}%) contrast(${contrast}%) saturate(${saturate}%) blur(${blur}px) grayscale(${grayscale}%) sepia(${sepia}%)` + }, [filters]) + + // History for undo/redo + const [history, setHistory] = useState([{ shapes: [], texts: [], lines: [] }]) + const [historyIndex, setHistoryIndex] = useState(0) + + // Save state to history + const saveToHistory = useCallback((newShapes, newTexts, newLines) => { + setHistory((prev) => { + // Remove any future history if we're not at the end + const newHistory = prev.slice(0, historyIndex + 1) + // Add new state + newHistory.push({ shapes: newShapes, texts: newTexts, lines: newLines || lines }) + // Limit history size + if (newHistory.length > MAX_HISTORY) { + newHistory.shift() + return newHistory + } + return newHistory + }) + setHistoryIndex((prev) => Math.min(prev + 1, MAX_HISTORY - 1)) + }, [historyIndex, lines]) + + // Undo + const handleUndo = useCallback(() => { + if (historyIndex > 0) { + const newIndex = historyIndex - 1 + setHistoryIndex(newIndex) + const state = history[newIndex] + setShapes(state.shapes) + setTexts(state.texts) + setLines(state.lines || []) + setSelectedId(null) + } + }, [historyIndex, history]) + + // Redo + const handleRedo = useCallback(() => { + if (historyIndex < history.length - 1) { + const newIndex = historyIndex + 1 + setHistoryIndex(newIndex) + const state = history[newIndex] + setShapes(state.shapes) + setTexts(state.texts) + setLines(state.lines || []) + setSelectedId(null) + } + }, [historyIndex, history]) + + const canUndo = historyIndex > 0 + const canRedo = historyIndex < history.length - 1 + + // Brush drawing handlers + const handleMouseDown = useCallback((e) => { + if (tool !== 'brush') return + setIsDrawing(true) + const pos = e.target.getStage().getPointerPosition() + setLines([...lines, { + id: `line-${Date.now()}`, + points: [pos.x, pos.y], + stroke: strokeColor, + strokeWidth: brushSize, + }]) + }, [tool, lines, strokeColor, brushSize]) + + const handleMouseMove = useCallback((e) => { + if (!isDrawing || tool !== 'brush') return + const stage = e.target.getStage() + const point = stage.getPointerPosition() + setLines((prevLines) => { + const lastLine = prevLines[prevLines.length - 1] + if (!lastLine) return prevLines + // Add point to the last line + const newLines = prevLines.slice(0, -1) + newLines.push({ + ...lastLine, + points: [...lastLine.points, point.x, point.y], + }) + return newLines + }) + }, [isDrawing, tool]) + + const handleMouseUp = useCallback(() => { + if (isDrawing && tool === 'brush') { + setIsDrawing(false) + // Save to history after drawing is complete + saveToHistory(shapes, texts, lines) + } + }, [isDrawing, tool, shapes, texts, lines, saveToHistory]) + + // Calculate image dimensions to fit canvas while maintaining aspect ratio + const getImageDimensions = useCallback(() => { + if (!image) return { x: 0, y: 0, width: 0, height: 0 } + + const imgRatio = image.width / image.height + const canvasRatio = width / height + + let imgWidth, imgHeight, imgX, imgY + + if (imgRatio > canvasRatio) { + imgWidth = width + imgHeight = width / imgRatio + imgX = 0 + imgY = (height - imgHeight) / 2 + } else { + imgHeight = height + imgWidth = height * imgRatio + imgX = (width - imgWidth) / 2 + imgY = 0 + } + + return { x: imgX, y: imgY, width: imgWidth, height: imgHeight } + }, [image, width, height]) + + // Handle stage click + const handleStageClick = (e) => { + const clickedOnEmpty = e.target === e.target.getStage() + if (clickedOnEmpty) { + setSelectedId(null) + return + } + + const pos = e.target.getStage().getPointerPosition() + + if (tool === 'rect') { + const newShape = { + id: `rect-${Date.now()}`, + type: 'rect', + x: pos.x - 50, + y: pos.y - 25, + width: 100, + height: 50, + fill: fillColor, + stroke: strokeColor, + strokeWidth: 2, + } + const newShapes = [...shapes, newShape] + setShapes(newShapes) + saveToHistory(newShapes, texts) + setSelectedId(newShape.id) + setTool('select') + } else if (tool === 'circle') { + const newShape = { + id: `circle-${Date.now()}`, + type: 'circle', + x: pos.x, + y: pos.y, + radius: 40, + fill: fillColor, + stroke: strokeColor, + strokeWidth: 2, + } + const newShapes = [...shapes, newShape] + setShapes(newShapes) + saveToHistory(newShapes, texts) + setSelectedId(newShape.id) + setTool('select') + } else if (tool === 'text') { + const newText = { + id: `text-${Date.now()}`, + type: 'text', + x: pos.x, + y: pos.y, + text: textContent, + fontSize: 24, + fill: fillColor, + fontFamily: 'Arial', + } + const newTexts = [...texts, newText] + setTexts(newTexts) + saveToHistory(shapes, newTexts) + setSelectedId(newText.id) + setTool('select') + } + } + + // Delete selected element + const handleDelete = useCallback(() => { + if (!selectedId) return + const newShapes = shapes.filter((s) => s.id !== selectedId) + const newTexts = texts.filter((t) => t.id !== selectedId) + setShapes(newShapes) + setTexts(newTexts) + saveToHistory(newShapes, newTexts) + setSelectedId(null) + }, [selectedId, shapes, texts, saveToHistory]) + + // Keyboard shortcuts + useEffect(() => { + const handleKeyDown = (e) => { + // Skip if typing in an input + if (document.activeElement.tagName === 'INPUT' || document.activeElement.tagName === 'TEXTAREA') { + return + } + + if (e.key === 'Delete' || e.key === 'Backspace') { + handleDelete() + } + // Undo: Ctrl+Z / Cmd+Z + if ((e.ctrlKey || e.metaKey) && e.key === 'z' && !e.shiftKey) { + e.preventDefault() + handleUndo() + } + // Redo: Ctrl+Shift+Z / Cmd+Shift+Z or Ctrl+Y / Cmd+Y + if ((e.ctrlKey || e.metaKey) && (e.key === 'y' || (e.key === 'z' && e.shiftKey))) { + e.preventDefault() + handleRedo() + } + } + window.addEventListener('keydown', handleKeyDown) + return () => window.removeEventListener('keydown', handleKeyDown) + }, [handleDelete, handleUndo, handleRedo]) + + // Export canvas as image + const handleExport = useCallback(() => { + if (!stageRef.current) return + const uri = stageRef.current.toDataURL() + const link = document.createElement('a') + link.download = 'image-export.png' + link.href = uri + document.body.appendChild(link) + link.click() + document.body.removeChild(link) + }, []) + + // Generate image with AI + const handleGenerate = async () => { + if (!generatePrompt.trim()) return + + setIsGenerating(true) + try { + const response = await fetch(apiUrl('/api/media/generate/image'), { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + prompt: generatePrompt, + width: 1024, + height: 1024, + }), + }) + + if (!response.ok) { + const data = await response.json() + throw new Error(data.detail || 'Failed to generate image') + } + + const data = await response.json() + if (data.url && onGenerate) { + onGenerate(data.url) + } + setShowGenerateDialog(false) + setGeneratePrompt('') + } catch (err) { + alert(`Generation failed: ${err.message}`) + } finally { + setIsGenerating(false) + } + } + + const imgDims = getImageDimensions() + + return ( +
+ {/* Toolbar */} +
+ {/* Undo/Redo */} +
+ + +
+ +
+ + + + + +
+ +
+ + +
+ + {tool === 'brush' && ( +
+ +
+ )} + +
+ +
+ + {tool === 'text' && ( +
+ setTextContent(e.target.value)} + placeholder="Text content" + className="text-input" + /> +
+ )} + +
+ {selectedId && ( + + )} + + +
+
+ + {/* Canvas */} +
+ {imageStatus === 'loading' && ( +
Loading image...
+ )} + {imageStatus === 'error' && ( +
Failed to load image
+ )} + + + {/* Background image */} + {image && ( + + )} + + {/* Shapes */} + {shapes.map((shape) => ( + setSelectedId(shape.id)} + onChange={(newAttrs) => { + setShapes(shapes.map((s) => (s.id === shape.id ? newAttrs : s))) + }} + /> + ))} + + {/* Text elements */} + {texts.map((text) => ( + setSelectedId(text.id)} + onChange={(newAttrs) => { + setTexts(texts.map((t) => (t.id === text.id ? newAttrs : t))) + }} + /> + ))} + + {/* Brush lines */} + {lines.map((line) => ( + + ))} + + +
+ + {/* Generate Dialog */} + {showGenerateDialog && ( +
setShowGenerateDialog(false)}> +
e.stopPropagation()}> +

Generate Image with AI

+