From 5b1934b997839af21bc3740c905dfeba35c46191 Mon Sep 17 00:00:00 2001 From: Jeff H Date: Wed, 24 Dec 2025 07:15:28 -0500 Subject: [PATCH 01/11] context menu for gallery items. spec init w movie plan --- .claude | 1 + .claude/settings.local.json | 18 - .gemini | 1 + .specify/memory/constitution.md | 89 ++++ .../powershell/check-prerequisites.ps1 | 148 ++++++ .specify/scripts/powershell/common.ps1 | 137 ++++++ .../scripts/powershell/create-new-feature.ps1 | 283 +++++++++++ .specify/scripts/powershell/setup-plan.ps1 | 61 +++ .../powershell/update-agent-context.ps1 | 448 ++++++++++++++++++ .specify/templates/agent-file-template.md | 28 ++ .specify/templates/checklist-template.md | 40 ++ .specify/templates/plan-template.md | 106 +++++ .specify/templates/spec-template.md | 115 +++++ .specify/templates/tasks-template.md | 251 ++++++++++ .vscode/settings.json | 6 + Components/DrawingGalleryPopup.xaml | 1 + GEMINI.md | 4 +- .../DrawingGalleryPopupViewModel.cs | 60 +++ LunaDraw.csproj | 6 +- .../checklists/requirements.md | 34 ++ .../contracts/services.cs | 53 +++ specs/001-movie-mode-playback/data-model.md | 81 ++++ specs/001-movie-mode-playback/plan.md | 85 ++++ specs/001-movie-mode-playback/quickstart.md | 42 ++ specs/001-movie-mode-playback/research.md | 48 ++ specs/001-movie-mode-playback/spec.md | 88 ++++ 26 files changed, 2212 insertions(+), 22 deletions(-) create mode 120000 .claude delete mode 100644 .claude/settings.local.json create mode 120000 .gemini create mode 100644 .specify/memory/constitution.md create mode 100644 .specify/scripts/powershell/check-prerequisites.ps1 create mode 100644 .specify/scripts/powershell/common.ps1 create mode 100644 .specify/scripts/powershell/create-new-feature.ps1 create mode 100644 .specify/scripts/powershell/setup-plan.ps1 create mode 100644 .specify/scripts/powershell/update-agent-context.ps1 create mode 100644 .specify/templates/agent-file-template.md create mode 100644 .specify/templates/checklist-template.md create mode 100644 .specify/templates/plan-template.md create mode 100644 .specify/templates/spec-template.md create mode 100644 .specify/templates/tasks-template.md create mode 100644 .vscode/settings.json create mode 100644 specs/001-movie-mode-playback/checklists/requirements.md create mode 100644 specs/001-movie-mode-playback/contracts/services.cs create mode 100644 specs/001-movie-mode-playback/data-model.md create mode 100644 specs/001-movie-mode-playback/plan.md create mode 100644 specs/001-movie-mode-playback/quickstart.md create mode 100644 specs/001-movie-mode-playback/research.md create mode 100644 specs/001-movie-mode-playback/spec.md diff --git a/.claude b/.claude new file mode 120000 index 0000000..daccd34 --- /dev/null +++ b/.claude @@ -0,0 +1 @@ +C:/Projects/LunaDraw/.speckit \ No newline at end of file diff --git a/.claude/settings.local.json b/.claude/settings.local.json deleted file mode 100644 index b775de0..0000000 --- a/.claude/settings.local.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "permissions": { - "allow": [ - "Bash(dotnet build:*)", - "Bash(dotnet test:*)", - "Bash(dotnet clean:*)", - "Bash(Remove-Item -Recurse -Force CodeSoupCafe.Mauiobj,CodeSoupCafe.Mauibin -ErrorAction SilentlyContinue)", - "Bash(Remove-Item -Recurse -Force obj,bin -ErrorAction SilentlyContinue)", - "Bash(dir:*)", - "Bash(dotnet pack:*)", - "Bash(dotnet restore:*)", - "Bash(dotnet sln:*)", - "Bash(findstr:*)", - "Bash(Get-ChildItem -Recurse -Filter \"*carousel*\")", - "Bash(Select-Object FullName)" - ] - } -} diff --git a/.gemini b/.gemini new file mode 120000 index 0000000..daccd34 --- /dev/null +++ b/.gemini @@ -0,0 +1 @@ +C:/Projects/LunaDraw/.speckit \ No newline at end of file diff --git a/.specify/memory/constitution.md b/.specify/memory/constitution.md new file mode 100644 index 0000000..1ea1ede --- /dev/null +++ b/.specify/memory/constitution.md @@ -0,0 +1,89 @@ + + +# LunaDraw Constitution + +## Core Principles + +### I. Child-Centric UX +Target audience is children aged 3-8. User interface must prioritize: +- **Large Touch Targets**: Minimum 2cm x 2cm for all interactive elements. +- **Visual over Text**: Use icons, animations, and multi-sensory feedback (sounds) instead of text labels where possible. +- **Guidance**: Provide visual/audio guidance rather than explicit instructions. +- **Simplicity**: Minimal UI, hidden complexity, and immediate feedback. + +### II. Reactive Architecture +The application utilizes the MVVM pattern with ReactiveUI. +- **Observables**: Use Reactive Observables for state management and component communication. +- **View-ViewModel Binding**: ViewModels inherit from `ReactiveObject`. Use `this.RaiseAndSetIfChanged` for properties. +- **Messaging**: Use `IMessageBus` strictly for loosely-coupled broadcast messages between disconnected components. It must be instance-based (injected), NOT static. + +### III. Test-First & Quality +Testing is non-negotiable and precedes implementation for bug fixes. +- **Test-First Bug Fixes**: ALWAYS write a failing test first to validate a bug before fixing it. +- **Tools**: xUnit, Moq, FluentAssertions. +- **Structure**: Arrange-Act-Assert (AAA). Naming: `Should_When_Returns`. +- **Scope**: Prefer `[Theory]` and `[InlineData]` over multiple `[Fact]` methods. Include negative test cases. +- **Assertions**: Tests should only Assert a singular item on one line. +- **Naming Constraints**: DO NOT use 'sut' or arbitrary names. Use ONLY the class name (e.g., `mockClassName`). + +### IV. SOLID & Clean Code +Code must adhere to clean coding standards to maintain maintainability. +- **Principles**: Strictly follow SOLID (SRP, OCP, LSP, ISP, DIP), DRY, Low Coupling, High Cohesion, and Separation of Concerns. +- **Refactoring**: NEVER keep legacy or duplicate code. Refactor to a clean state; do not leave "bloat". +- **Naming**: No underscores, no regions. DO NOT use abbreviations for anything (variable names or otherwise). Use descriptive names. +- **Static Extensions**: Use ONLY for re-usable logic, not for core business logic. +- **Regions**: No #regions allowed. + +### V. SkiaSharp & Performance +All graphics rendering is handled via SkiaSharp. +- **Rendering**: Use `SKCanvas`, `SKPaint`, and `SKPath`. +- **Performance**: Optimize for high frame rates. Use QuadTree for spatial indexing and culling off-screen elements. +- **Vector Graphics**: Render directly from vector elements; avoid unnecessary bitmap caching unless strictly required for performance (e.g., complex layers). + +### VI. Architecture Patterns & Naming +Naming conventions MUST reflect the structural pattern used. +- **Prohibited Terms**: "Service", "Manager". Do NOT use these vague suffixes. +- **Allowed Patterns**: Use specific Gang of Four (GoF) or SOLID patterns (e.g., `Memento`, `Facade`, `Factory`, `Strategy`, `Observer`). +- **Data Access**: The Repository pattern is prohibited. Use Command Handlers (Create, Update) connected to a Domain Facade (namespaced by domain, e.g., Users, Customers) for LINQ/DB queries. +- **Modularity**: Code must be modular and separated by domain. + +### VII. SPARC Methodology & Agentic Workflow +Development follows the SPARC (Specification, Pseudocode, Architecture, Refinement, Completion) methodology. +- **Simplicity**: Prioritize clear, maintainable solutions; minimize complexity. +- **Documentation First**: Review/Create documentation (PRDs, specs) before implementation. +- **Agentic Collaboration**: Use `.clinerules` and `.cursorrules` to guide autonomous behaviors. +- **Memory Bank**: Continuously retain context to ensure coherent long-term planning. + +## Architecture & Implementation Details + +- **Framework**: .NET MAUI targeting Windows, Android, iOS, MacCatalyst. +- **Dependency Injection**: Components, Handlers, and Facades registered in `MauiProgram.cs`. ViewModels and Pages typically Transient or Singleton as appropriate. +- **Drawing Model**: `IDrawableElement` is the base for all drawn objects. `Layer` containers manage elements with `ObservableCollection`. +- **Tool System**: `IDrawingTool` interface for all tools. Input handled centrally by `CanvasInputHandler`. + +## Development Workflow + +- **Commit Protocol**: Check `git status` and `git diff` before committing. Write clear, "why"-focused commit messages. +- **Branching**: Use feature branches. +- **Verification**: Run tests (`dotnet test`) and build (`dotnet build`) before considering a task complete. +- **Legacy Migration**: Be aware of legacy code from `SurfaceBurnCalc`. Refactor and verify when touching these areas. + +## Governance + +- **Supremacy**: This Constitution supersedes all other practices. +- **Amendments**: Changes require documentation in this file and a version bump. +- **Compliance**: All PRs and code changes must verify compliance with these principles. +- **Runtime Guidance**: Refer to `CLAUDE.md` and `.clinerules` for specific day-to-day development commands and patterns. + +**Version**: 2.1.0 | **Ratified**: 2025-12-23 | **Last Amended**: 2025-12-23 \ No newline at end of file diff --git a/.specify/scripts/powershell/check-prerequisites.ps1 b/.specify/scripts/powershell/check-prerequisites.ps1 new file mode 100644 index 0000000..91667e9 --- /dev/null +++ b/.specify/scripts/powershell/check-prerequisites.ps1 @@ -0,0 +1,148 @@ +#!/usr/bin/env pwsh + +# Consolidated prerequisite checking script (PowerShell) +# +# This script provides unified prerequisite checking for Spec-Driven Development workflow. +# It replaces the functionality previously spread across multiple scripts. +# +# Usage: ./check-prerequisites.ps1 [OPTIONS] +# +# OPTIONS: +# -Json Output in JSON format +# -RequireTasks Require tasks.md to exist (for implementation phase) +# -IncludeTasks Include tasks.md in AVAILABLE_DOCS list +# -PathsOnly Only output path variables (no validation) +# -Help, -h Show help message + +[CmdletBinding()] +param( + [switch]$Json, + [switch]$RequireTasks, + [switch]$IncludeTasks, + [switch]$PathsOnly, + [switch]$Help +) + +$ErrorActionPreference = 'Stop' + +# Show help if requested +if ($Help) { + Write-Output @" +Usage: check-prerequisites.ps1 [OPTIONS] + +Consolidated prerequisite checking for Spec-Driven Development workflow. + +OPTIONS: + -Json Output in JSON format + -RequireTasks Require tasks.md to exist (for implementation phase) + -IncludeTasks Include tasks.md in AVAILABLE_DOCS list + -PathsOnly Only output path variables (no prerequisite validation) + -Help, -h Show this help message + +EXAMPLES: + # Check task prerequisites (plan.md required) + .\check-prerequisites.ps1 -Json + + # Check implementation prerequisites (plan.md + tasks.md required) + .\check-prerequisites.ps1 -Json -RequireTasks -IncludeTasks + + # Get feature paths only (no validation) + .\check-prerequisites.ps1 -PathsOnly + +"@ + exit 0 +} + +# Source common functions +. "$PSScriptRoot/common.ps1" + +# Get feature paths and validate branch +$paths = Get-FeaturePathsEnv + +if (-not (Test-FeatureBranch -Branch $paths.CURRENT_BRANCH -HasGit:$paths.HAS_GIT)) { + exit 1 +} + +# If paths-only mode, output paths and exit (support combined -Json -PathsOnly) +if ($PathsOnly) { + if ($Json) { + [PSCustomObject]@{ + REPO_ROOT = $paths.REPO_ROOT + BRANCH = $paths.CURRENT_BRANCH + FEATURE_DIR = $paths.FEATURE_DIR + FEATURE_SPEC = $paths.FEATURE_SPEC + IMPL_PLAN = $paths.IMPL_PLAN + TASKS = $paths.TASKS + } | ConvertTo-Json -Compress + } else { + Write-Output "REPO_ROOT: $($paths.REPO_ROOT)" + Write-Output "BRANCH: $($paths.CURRENT_BRANCH)" + Write-Output "FEATURE_DIR: $($paths.FEATURE_DIR)" + Write-Output "FEATURE_SPEC: $($paths.FEATURE_SPEC)" + Write-Output "IMPL_PLAN: $($paths.IMPL_PLAN)" + Write-Output "TASKS: $($paths.TASKS)" + } + exit 0 +} + +# Validate required directories and files +if (-not (Test-Path $paths.FEATURE_DIR -PathType Container)) { + Write-Output "ERROR: Feature directory not found: $($paths.FEATURE_DIR)" + Write-Output "Run /speckit.specify first to create the feature structure." + exit 1 +} + +if (-not (Test-Path $paths.IMPL_PLAN -PathType Leaf)) { + Write-Output "ERROR: plan.md not found in $($paths.FEATURE_DIR)" + Write-Output "Run /speckit.plan first to create the implementation plan." + exit 1 +} + +# Check for tasks.md if required +if ($RequireTasks -and -not (Test-Path $paths.TASKS -PathType Leaf)) { + Write-Output "ERROR: tasks.md not found in $($paths.FEATURE_DIR)" + Write-Output "Run /speckit.tasks first to create the task list." + exit 1 +} + +# Build list of available documents +$docs = @() + +# Always check these optional docs +if (Test-Path $paths.RESEARCH) { $docs += 'research.md' } +if (Test-Path $paths.DATA_MODEL) { $docs += 'data-model.md' } + +# Check contracts directory (only if it exists and has files) +if ((Test-Path $paths.CONTRACTS_DIR) -and (Get-ChildItem -Path $paths.CONTRACTS_DIR -ErrorAction SilentlyContinue | Select-Object -First 1)) { + $docs += 'contracts/' +} + +if (Test-Path $paths.QUICKSTART) { $docs += 'quickstart.md' } + +# Include tasks.md if requested and it exists +if ($IncludeTasks -and (Test-Path $paths.TASKS)) { + $docs += 'tasks.md' +} + +# Output results +if ($Json) { + # JSON output + [PSCustomObject]@{ + FEATURE_DIR = $paths.FEATURE_DIR + AVAILABLE_DOCS = $docs + } | ConvertTo-Json -Compress +} else { + # Text output + Write-Output "FEATURE_DIR:$($paths.FEATURE_DIR)" + Write-Output "AVAILABLE_DOCS:" + + # Show status of each potential document + Test-FileExists -Path $paths.RESEARCH -Description 'research.md' | Out-Null + Test-FileExists -Path $paths.DATA_MODEL -Description 'data-model.md' | Out-Null + Test-DirHasFiles -Path $paths.CONTRACTS_DIR -Description 'contracts/' | Out-Null + Test-FileExists -Path $paths.QUICKSTART -Description 'quickstart.md' | Out-Null + + if ($IncludeTasks) { + Test-FileExists -Path $paths.TASKS -Description 'tasks.md' | Out-Null + } +} diff --git a/.specify/scripts/powershell/common.ps1 b/.specify/scripts/powershell/common.ps1 new file mode 100644 index 0000000..b0be273 --- /dev/null +++ b/.specify/scripts/powershell/common.ps1 @@ -0,0 +1,137 @@ +#!/usr/bin/env pwsh +# Common PowerShell functions analogous to common.sh + +function Get-RepoRoot { + try { + $result = git rev-parse --show-toplevel 2>$null + if ($LASTEXITCODE -eq 0) { + return $result + } + } catch { + # Git command failed + } + + # Fall back to script location for non-git repos + return (Resolve-Path (Join-Path $PSScriptRoot "../../..")).Path +} + +function Get-CurrentBranch { + # First check if SPECIFY_FEATURE environment variable is set + if ($env:SPECIFY_FEATURE) { + return $env:SPECIFY_FEATURE + } + + # Then check git if available + try { + $result = git rev-parse --abbrev-ref HEAD 2>$null + if ($LASTEXITCODE -eq 0) { + return $result + } + } catch { + # Git command failed + } + + # For non-git repos, try to find the latest feature directory + $repoRoot = Get-RepoRoot + $specsDir = Join-Path $repoRoot "specs" + + if (Test-Path $specsDir) { + $latestFeature = "" + $highest = 0 + + Get-ChildItem -Path $specsDir -Directory | ForEach-Object { + if ($_.Name -match '^(\d{3})-') { + $num = [int]$matches[1] + if ($num -gt $highest) { + $highest = $num + $latestFeature = $_.Name + } + } + } + + if ($latestFeature) { + return $latestFeature + } + } + + # Final fallback + return "main" +} + +function Test-HasGit { + try { + git rev-parse --show-toplevel 2>$null | Out-Null + return ($LASTEXITCODE -eq 0) + } catch { + return $false + } +} + +function Test-FeatureBranch { + param( + [string]$Branch, + [bool]$HasGit = $true + ) + + # For non-git repos, we can't enforce branch naming but still provide output + if (-not $HasGit) { + Write-Warning "[specify] Warning: Git repository not detected; skipped branch validation" + return $true + } + + if ($Branch -notmatch '^[0-9]{3}-') { + Write-Output "ERROR: Not on a feature branch. Current branch: $Branch" + Write-Output "Feature branches should be named like: 001-feature-name" + return $false + } + return $true +} + +function Get-FeatureDir { + param([string]$RepoRoot, [string]$Branch) + Join-Path $RepoRoot "specs/$Branch" +} + +function Get-FeaturePathsEnv { + $repoRoot = Get-RepoRoot + $currentBranch = Get-CurrentBranch + $hasGit = Test-HasGit + $featureDir = Get-FeatureDir -RepoRoot $repoRoot -Branch $currentBranch + + [PSCustomObject]@{ + REPO_ROOT = $repoRoot + CURRENT_BRANCH = $currentBranch + HAS_GIT = $hasGit + FEATURE_DIR = $featureDir + FEATURE_SPEC = Join-Path $featureDir 'spec.md' + IMPL_PLAN = Join-Path $featureDir 'plan.md' + TASKS = Join-Path $featureDir 'tasks.md' + RESEARCH = Join-Path $featureDir 'research.md' + DATA_MODEL = Join-Path $featureDir 'data-model.md' + QUICKSTART = Join-Path $featureDir 'quickstart.md' + CONTRACTS_DIR = Join-Path $featureDir 'contracts' + } +} + +function Test-FileExists { + param([string]$Path, [string]$Description) + if (Test-Path -Path $Path -PathType Leaf) { + Write-Output " ✓ $Description" + return $true + } else { + Write-Output " ✗ $Description" + return $false + } +} + +function Test-DirHasFiles { + param([string]$Path, [string]$Description) + if ((Test-Path -Path $Path -PathType Container) -and (Get-ChildItem -Path $Path -ErrorAction SilentlyContinue | Where-Object { -not $_.PSIsContainer } | Select-Object -First 1)) { + Write-Output " ✓ $Description" + return $true + } else { + Write-Output " ✗ $Description" + return $false + } +} + diff --git a/.specify/scripts/powershell/create-new-feature.ps1 b/.specify/scripts/powershell/create-new-feature.ps1 new file mode 100644 index 0000000..2f0172e --- /dev/null +++ b/.specify/scripts/powershell/create-new-feature.ps1 @@ -0,0 +1,283 @@ +#!/usr/bin/env pwsh +# Create a new feature +[CmdletBinding()] +param( + [switch]$Json, + [string]$ShortName, + [int]$Number = 0, + [switch]$Help, + [Parameter(ValueFromRemainingArguments = $true)] + [string[]]$FeatureDescription +) +$ErrorActionPreference = 'Stop' + +# Show help if requested +if ($Help) { + Write-Host "Usage: ./create-new-feature.ps1 [-Json] [-ShortName ] [-Number N] " + Write-Host "" + Write-Host "Options:" + Write-Host " -Json Output in JSON format" + Write-Host " -ShortName Provide a custom short name (2-4 words) for the branch" + Write-Host " -Number N Specify branch number manually (overrides auto-detection)" + Write-Host " -Help Show this help message" + Write-Host "" + Write-Host "Examples:" + Write-Host " ./create-new-feature.ps1 'Add user authentication system' -ShortName 'user-auth'" + Write-Host " ./create-new-feature.ps1 'Implement OAuth2 integration for API'" + exit 0 +} + +# Check if feature description provided +if (-not $FeatureDescription -or $FeatureDescription.Count -eq 0) { + Write-Error "Usage: ./create-new-feature.ps1 [-Json] [-ShortName ] " + exit 1 +} + +$featureDesc = ($FeatureDescription -join ' ').Trim() + +# Resolve repository root. Prefer git information when available, but fall back +# to searching for repository markers so the workflow still functions in repositories that +# were initialized with --no-git. +function Find-RepositoryRoot { + param( + [string]$StartDir, + [string[]]$Markers = @('.git', '.specify') + ) + $current = Resolve-Path $StartDir + while ($true) { + foreach ($marker in $Markers) { + if (Test-Path (Join-Path $current $marker)) { + return $current + } + } + $parent = Split-Path $current -Parent + if ($parent -eq $current) { + # Reached filesystem root without finding markers + return $null + } + $current = $parent + } +} + +function Get-HighestNumberFromSpecs { + param([string]$SpecsDir) + + $highest = 0 + if (Test-Path $SpecsDir) { + Get-ChildItem -Path $SpecsDir -Directory | ForEach-Object { + if ($_.Name -match '^(\d+)') { + $num = [int]$matches[1] + if ($num -gt $highest) { $highest = $num } + } + } + } + return $highest +} + +function Get-HighestNumberFromBranches { + param() + + $highest = 0 + try { + $branches = git branch -a 2>$null + if ($LASTEXITCODE -eq 0) { + foreach ($branch in $branches) { + # Clean branch name: remove leading markers and remote prefixes + $cleanBranch = $branch.Trim() -replace '^\*?\s+', '' -replace '^remotes/[^/]+/', '' + + # Extract feature number if branch matches pattern ###-* + if ($cleanBranch -match '^(\d+)-') { + $num = [int]$matches[1] + if ($num -gt $highest) { $highest = $num } + } + } + } + } catch { + # If git command fails, return 0 + Write-Verbose "Could not check Git branches: $_" + } + return $highest +} + +function Get-NextBranchNumber { + param( + [string]$SpecsDir + ) + + # Fetch all remotes to get latest branch info (suppress errors if no remotes) + try { + git fetch --all --prune 2>$null | Out-Null + } catch { + # Ignore fetch errors + } + + # Get highest number from ALL branches (not just matching short name) + $highestBranch = Get-HighestNumberFromBranches + + # Get highest number from ALL specs (not just matching short name) + $highestSpec = Get-HighestNumberFromSpecs -SpecsDir $SpecsDir + + # Take the maximum of both + $maxNum = [Math]::Max($highestBranch, $highestSpec) + + # Return next number + return $maxNum + 1 +} + +function ConvertTo-CleanBranchName { + param([string]$Name) + + return $Name.ToLower() -replace '[^a-z0-9]', '-' -replace '-{2,}', '-' -replace '^-', '' -replace '-$', '' +} +$fallbackRoot = (Find-RepositoryRoot -StartDir $PSScriptRoot) +if (-not $fallbackRoot) { + Write-Error "Error: Could not determine repository root. Please run this script from within the repository." + exit 1 +} + +try { + $repoRoot = git rev-parse --show-toplevel 2>$null + if ($LASTEXITCODE -eq 0) { + $hasGit = $true + } else { + throw "Git not available" + } +} catch { + $repoRoot = $fallbackRoot + $hasGit = $false +} + +Set-Location $repoRoot + +$specsDir = Join-Path $repoRoot 'specs' +New-Item -ItemType Directory -Path $specsDir -Force | Out-Null + +# Function to generate branch name with stop word filtering and length filtering +function Get-BranchName { + param([string]$Description) + + # Common stop words to filter out + $stopWords = @( + 'i', 'a', 'an', 'the', 'to', 'for', 'of', 'in', 'on', 'at', 'by', 'with', 'from', + 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', + 'do', 'does', 'did', 'will', 'would', 'should', 'could', 'can', 'may', 'might', 'must', 'shall', + 'this', 'that', 'these', 'those', 'my', 'your', 'our', 'their', + 'want', 'need', 'add', 'get', 'set' + ) + + # Convert to lowercase and extract words (alphanumeric only) + $cleanName = $Description.ToLower() -replace '[^a-z0-9\s]', ' ' + $words = $cleanName -split '\s+' | Where-Object { $_ } + + # Filter words: remove stop words and words shorter than 3 chars (unless they're uppercase acronyms in original) + $meaningfulWords = @() + foreach ($word in $words) { + # Skip stop words + if ($stopWords -contains $word) { continue } + + # Keep words that are length >= 3 OR appear as uppercase in original (likely acronyms) + if ($word.Length -ge 3) { + $meaningfulWords += $word + } elseif ($Description -match "\b$($word.ToUpper())\b") { + # Keep short words if they appear as uppercase in original (likely acronyms) + $meaningfulWords += $word + } + } + + # If we have meaningful words, use first 3-4 of them + if ($meaningfulWords.Count -gt 0) { + $maxWords = if ($meaningfulWords.Count -eq 4) { 4 } else { 3 } + $result = ($meaningfulWords | Select-Object -First $maxWords) -join '-' + return $result + } else { + # Fallback to original logic if no meaningful words found + $result = ConvertTo-CleanBranchName -Name $Description + $fallbackWords = ($result -split '-') | Where-Object { $_ } | Select-Object -First 3 + return [string]::Join('-', $fallbackWords) + } +} + +# Generate branch name +if ($ShortName) { + # Use provided short name, just clean it up + $branchSuffix = ConvertTo-CleanBranchName -Name $ShortName +} else { + # Generate from description with smart filtering + $branchSuffix = Get-BranchName -Description $featureDesc +} + +# Determine branch number +if ($Number -eq 0) { + if ($hasGit) { + # Check existing branches on remotes + $Number = Get-NextBranchNumber -SpecsDir $specsDir + } else { + # Fall back to local directory check + $Number = (Get-HighestNumberFromSpecs -SpecsDir $specsDir) + 1 + } +} + +$featureNum = ('{0:000}' -f $Number) +$branchName = "$featureNum-$branchSuffix" + +# GitHub enforces a 244-byte limit on branch names +# Validate and truncate if necessary +$maxBranchLength = 244 +if ($branchName.Length -gt $maxBranchLength) { + # Calculate how much we need to trim from suffix + # Account for: feature number (3) + hyphen (1) = 4 chars + $maxSuffixLength = $maxBranchLength - 4 + + # Truncate suffix + $truncatedSuffix = $branchSuffix.Substring(0, [Math]::Min($branchSuffix.Length, $maxSuffixLength)) + # Remove trailing hyphen if truncation created one + $truncatedSuffix = $truncatedSuffix -replace '-$', '' + + $originalBranchName = $branchName + $branchName = "$featureNum-$truncatedSuffix" + + Write-Warning "[specify] Branch name exceeded GitHub's 244-byte limit" + Write-Warning "[specify] Original: $originalBranchName ($($originalBranchName.Length) bytes)" + Write-Warning "[specify] Truncated to: $branchName ($($branchName.Length) bytes)" +} + +if ($hasGit) { + try { + git checkout -b $branchName | Out-Null + } catch { + Write-Warning "Failed to create git branch: $branchName" + } +} else { + Write-Warning "[specify] Warning: Git repository not detected; skipped branch creation for $branchName" +} + +$featureDir = Join-Path $specsDir $branchName +New-Item -ItemType Directory -Path $featureDir -Force | Out-Null + +$template = Join-Path $repoRoot '.specify/templates/spec-template.md' +$specFile = Join-Path $featureDir 'spec.md' +if (Test-Path $template) { + Copy-Item $template $specFile -Force +} else { + New-Item -ItemType File -Path $specFile | Out-Null +} + +# Set the SPECIFY_FEATURE environment variable for the current session +$env:SPECIFY_FEATURE = $branchName + +if ($Json) { + $obj = [PSCustomObject]@{ + BRANCH_NAME = $branchName + SPEC_FILE = $specFile + FEATURE_NUM = $featureNum + HAS_GIT = $hasGit + } + $obj | ConvertTo-Json -Compress +} else { + Write-Output "BRANCH_NAME: $branchName" + Write-Output "SPEC_FILE: $specFile" + Write-Output "FEATURE_NUM: $featureNum" + Write-Output "HAS_GIT: $hasGit" + Write-Output "SPECIFY_FEATURE environment variable set to: $branchName" +} + diff --git a/.specify/scripts/powershell/setup-plan.ps1 b/.specify/scripts/powershell/setup-plan.ps1 new file mode 100644 index 0000000..d0ed582 --- /dev/null +++ b/.specify/scripts/powershell/setup-plan.ps1 @@ -0,0 +1,61 @@ +#!/usr/bin/env pwsh +# Setup implementation plan for a feature + +[CmdletBinding()] +param( + [switch]$Json, + [switch]$Help +) + +$ErrorActionPreference = 'Stop' + +# Show help if requested +if ($Help) { + Write-Output "Usage: ./setup-plan.ps1 [-Json] [-Help]" + Write-Output " -Json Output results in JSON format" + Write-Output " -Help Show this help message" + exit 0 +} + +# Load common functions +. "$PSScriptRoot/common.ps1" + +# Get all paths and variables from common functions +$paths = Get-FeaturePathsEnv + +# Check if we're on a proper feature branch (only for git repos) +if (-not (Test-FeatureBranch -Branch $paths.CURRENT_BRANCH -HasGit $paths.HAS_GIT)) { + exit 1 +} + +# Ensure the feature directory exists +New-Item -ItemType Directory -Path $paths.FEATURE_DIR -Force | Out-Null + +# Copy plan template if it exists, otherwise note it or create empty file +$template = Join-Path $paths.REPO_ROOT '.specify/templates/plan-template.md' +if (Test-Path $template) { + Copy-Item $template $paths.IMPL_PLAN -Force + Write-Output "Copied plan template to $($paths.IMPL_PLAN)" +} else { + Write-Warning "Plan template not found at $template" + # Create a basic plan file if template doesn't exist + New-Item -ItemType File -Path $paths.IMPL_PLAN -Force | Out-Null +} + +# Output results +if ($Json) { + $result = [PSCustomObject]@{ + FEATURE_SPEC = $paths.FEATURE_SPEC + IMPL_PLAN = $paths.IMPL_PLAN + SPECS_DIR = $paths.FEATURE_DIR + BRANCH = $paths.CURRENT_BRANCH + HAS_GIT = $paths.HAS_GIT + } + $result | ConvertTo-Json -Compress +} else { + Write-Output "FEATURE_SPEC: $($paths.FEATURE_SPEC)" + Write-Output "IMPL_PLAN: $($paths.IMPL_PLAN)" + Write-Output "SPECS_DIR: $($paths.FEATURE_DIR)" + Write-Output "BRANCH: $($paths.CURRENT_BRANCH)" + Write-Output "HAS_GIT: $($paths.HAS_GIT)" +} diff --git a/.specify/scripts/powershell/update-agent-context.ps1 b/.specify/scripts/powershell/update-agent-context.ps1 new file mode 100644 index 0000000..ffdab4b --- /dev/null +++ b/.specify/scripts/powershell/update-agent-context.ps1 @@ -0,0 +1,448 @@ +#!/usr/bin/env pwsh +<#! +.SYNOPSIS +Update agent context files with information from plan.md (PowerShell version) + +.DESCRIPTION +Mirrors the behavior of scripts/bash/update-agent-context.sh: + 1. Environment Validation + 2. Plan Data Extraction + 3. Agent File Management (create from template or update existing) + 4. Content Generation (technology stack, recent changes, timestamp) + 5. Multi-Agent Support (claude, gemini, copilot, cursor-agent, qwen, opencode, codex, windsurf, kilocode, auggie, roo, codebuddy, amp, shai, q, bob, qoder) + +.PARAMETER AgentType +Optional agent key to update a single agent. If omitted, updates all existing agent files (creating a default Claude file if none exist). + +.EXAMPLE +./update-agent-context.ps1 -AgentType claude + +.EXAMPLE +./update-agent-context.ps1 # Updates all existing agent files + +.NOTES +Relies on common helper functions in common.ps1 +#> +param( + [Parameter(Position=0)] + [ValidateSet('claude','gemini','copilot','cursor-agent','qwen','opencode','codex','windsurf','kilocode','auggie','roo','codebuddy','amp','shai','q','bob','qoder')] + [string]$AgentType +) + +$ErrorActionPreference = 'Stop' + +# Import common helpers +$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +. (Join-Path $ScriptDir 'common.ps1') + +# Acquire environment paths +$envData = Get-FeaturePathsEnv +$REPO_ROOT = $envData.REPO_ROOT +$CURRENT_BRANCH = $envData.CURRENT_BRANCH +$HAS_GIT = $envData.HAS_GIT +$IMPL_PLAN = $envData.IMPL_PLAN +$NEW_PLAN = $IMPL_PLAN + +# Agent file paths +$CLAUDE_FILE = Join-Path $REPO_ROOT 'CLAUDE.md' +$GEMINI_FILE = Join-Path $REPO_ROOT 'GEMINI.md' +$COPILOT_FILE = Join-Path $REPO_ROOT '.github/agents/copilot-instructions.md' +$CURSOR_FILE = Join-Path $REPO_ROOT '.cursor/rules/specify-rules.mdc' +$QWEN_FILE = Join-Path $REPO_ROOT 'QWEN.md' +$AGENTS_FILE = Join-Path $REPO_ROOT 'AGENTS.md' +$WINDSURF_FILE = Join-Path $REPO_ROOT '.windsurf/rules/specify-rules.md' +$KILOCODE_FILE = Join-Path $REPO_ROOT '.kilocode/rules/specify-rules.md' +$AUGGIE_FILE = Join-Path $REPO_ROOT '.augment/rules/specify-rules.md' +$ROO_FILE = Join-Path $REPO_ROOT '.roo/rules/specify-rules.md' +$CODEBUDDY_FILE = Join-Path $REPO_ROOT 'CODEBUDDY.md' +$QODER_FILE = Join-Path $REPO_ROOT 'QODER.md' +$AMP_FILE = Join-Path $REPO_ROOT 'AGENTS.md' +$SHAI_FILE = Join-Path $REPO_ROOT 'SHAI.md' +$Q_FILE = Join-Path $REPO_ROOT 'AGENTS.md' +$BOB_FILE = Join-Path $REPO_ROOT 'AGENTS.md' + +$TEMPLATE_FILE = Join-Path $REPO_ROOT '.specify/templates/agent-file-template.md' + +# Parsed plan data placeholders +$script:NEW_LANG = '' +$script:NEW_FRAMEWORK = '' +$script:NEW_DB = '' +$script:NEW_PROJECT_TYPE = '' + +function Write-Info { + param( + [Parameter(Mandatory=$true)] + [string]$Message + ) + Write-Host "INFO: $Message" +} + +function Write-Success { + param( + [Parameter(Mandatory=$true)] + [string]$Message + ) + Write-Host "$([char]0x2713) $Message" +} + +function Write-WarningMsg { + param( + [Parameter(Mandatory=$true)] + [string]$Message + ) + Write-Warning $Message +} + +function Write-Err { + param( + [Parameter(Mandatory=$true)] + [string]$Message + ) + Write-Host "ERROR: $Message" -ForegroundColor Red +} + +function Validate-Environment { + if (-not $CURRENT_BRANCH) { + Write-Err 'Unable to determine current feature' + if ($HAS_GIT) { Write-Info "Make sure you're on a feature branch" } else { Write-Info 'Set SPECIFY_FEATURE environment variable or create a feature first' } + exit 1 + } + if (-not (Test-Path $NEW_PLAN)) { + Write-Err "No plan.md found at $NEW_PLAN" + Write-Info 'Ensure you are working on a feature with a corresponding spec directory' + if (-not $HAS_GIT) { Write-Info 'Use: $env:SPECIFY_FEATURE=your-feature-name or create a new feature first' } + exit 1 + } + if (-not (Test-Path $TEMPLATE_FILE)) { + Write-Err "Template file not found at $TEMPLATE_FILE" + Write-Info 'Run specify init to scaffold .specify/templates, or add agent-file-template.md there.' + exit 1 + } +} + +function Extract-PlanField { + param( + [Parameter(Mandatory=$true)] + [string]$FieldPattern, + [Parameter(Mandatory=$true)] + [string]$PlanFile + ) + if (-not (Test-Path $PlanFile)) { return '' } + # Lines like **Language/Version**: Python 3.12 + $regex = "^\*\*$([Regex]::Escape($FieldPattern))\*\*: (.+)$" + Get-Content -LiteralPath $PlanFile -Encoding utf8 | ForEach-Object { + if ($_ -match $regex) { + $val = $Matches[1].Trim() + if ($val -notin @('NEEDS CLARIFICATION','N/A')) { return $val } + } + } | Select-Object -First 1 +} + +function Parse-PlanData { + param( + [Parameter(Mandatory=$true)] + [string]$PlanFile + ) + if (-not (Test-Path $PlanFile)) { Write-Err "Plan file not found: $PlanFile"; return $false } + Write-Info "Parsing plan data from $PlanFile" + $script:NEW_LANG = Extract-PlanField -FieldPattern 'Language/Version' -PlanFile $PlanFile + $script:NEW_FRAMEWORK = Extract-PlanField -FieldPattern 'Primary Dependencies' -PlanFile $PlanFile + $script:NEW_DB = Extract-PlanField -FieldPattern 'Storage' -PlanFile $PlanFile + $script:NEW_PROJECT_TYPE = Extract-PlanField -FieldPattern 'Project Type' -PlanFile $PlanFile + + if ($NEW_LANG) { Write-Info "Found language: $NEW_LANG" } else { Write-WarningMsg 'No language information found in plan' } + if ($NEW_FRAMEWORK) { Write-Info "Found framework: $NEW_FRAMEWORK" } + if ($NEW_DB -and $NEW_DB -ne 'N/A') { Write-Info "Found database: $NEW_DB" } + if ($NEW_PROJECT_TYPE) { Write-Info "Found project type: $NEW_PROJECT_TYPE" } + return $true +} + +function Format-TechnologyStack { + param( + [Parameter(Mandatory=$false)] + [string]$Lang, + [Parameter(Mandatory=$false)] + [string]$Framework + ) + $parts = @() + if ($Lang -and $Lang -ne 'NEEDS CLARIFICATION') { $parts += $Lang } + if ($Framework -and $Framework -notin @('NEEDS CLARIFICATION','N/A')) { $parts += $Framework } + if (-not $parts) { return '' } + return ($parts -join ' + ') +} + +function Get-ProjectStructure { + param( + [Parameter(Mandatory=$false)] + [string]$ProjectType + ) + if ($ProjectType -match 'web') { return "backend/`nfrontend/`ntests/" } else { return "src/`ntests/" } +} + +function Get-CommandsForLanguage { + param( + [Parameter(Mandatory=$false)] + [string]$Lang + ) + switch -Regex ($Lang) { + 'Python' { return "cd src; pytest; ruff check ." } + 'Rust' { return "cargo test; cargo clippy" } + 'JavaScript|TypeScript' { return "npm test; npm run lint" } + default { return "# Add commands for $Lang" } + } +} + +function Get-LanguageConventions { + param( + [Parameter(Mandatory=$false)] + [string]$Lang + ) + if ($Lang) { "${Lang}: Follow standard conventions" } else { 'General: Follow standard conventions' } +} + +function New-AgentFile { + param( + [Parameter(Mandatory=$true)] + [string]$TargetFile, + [Parameter(Mandatory=$true)] + [string]$ProjectName, + [Parameter(Mandatory=$true)] + [datetime]$Date + ) + if (-not (Test-Path $TEMPLATE_FILE)) { Write-Err "Template not found at $TEMPLATE_FILE"; return $false } + $temp = New-TemporaryFile + Copy-Item -LiteralPath $TEMPLATE_FILE -Destination $temp -Force + + $projectStructure = Get-ProjectStructure -ProjectType $NEW_PROJECT_TYPE + $commands = Get-CommandsForLanguage -Lang $NEW_LANG + $languageConventions = Get-LanguageConventions -Lang $NEW_LANG + + $escaped_lang = $NEW_LANG + $escaped_framework = $NEW_FRAMEWORK + $escaped_branch = $CURRENT_BRANCH + + $content = Get-Content -LiteralPath $temp -Raw -Encoding utf8 + $content = $content -replace '\[PROJECT NAME\]',$ProjectName + $content = $content -replace '\[DATE\]',$Date.ToString('yyyy-MM-dd') + + # Build the technology stack string safely + $techStackForTemplate = "" + if ($escaped_lang -and $escaped_framework) { + $techStackForTemplate = "- $escaped_lang + $escaped_framework ($escaped_branch)" + } elseif ($escaped_lang) { + $techStackForTemplate = "- $escaped_lang ($escaped_branch)" + } elseif ($escaped_framework) { + $techStackForTemplate = "- $escaped_framework ($escaped_branch)" + } + + $content = $content -replace '\[EXTRACTED FROM ALL PLAN.MD FILES\]',$techStackForTemplate + # For project structure we manually embed (keep newlines) + $escapedStructure = [Regex]::Escape($projectStructure) + $content = $content -replace '\[ACTUAL STRUCTURE FROM PLANS\]',$escapedStructure + # Replace escaped newlines placeholder after all replacements + $content = $content -replace '\[ONLY COMMANDS FOR ACTIVE TECHNOLOGIES\]',$commands + $content = $content -replace '\[LANGUAGE-SPECIFIC, ONLY FOR LANGUAGES IN USE\]',$languageConventions + + # Build the recent changes string safely + $recentChangesForTemplate = "" + if ($escaped_lang -and $escaped_framework) { + $recentChangesForTemplate = "- ${escaped_branch}: Added ${escaped_lang} + ${escaped_framework}" + } elseif ($escaped_lang) { + $recentChangesForTemplate = "- ${escaped_branch}: Added ${escaped_lang}" + } elseif ($escaped_framework) { + $recentChangesForTemplate = "- ${escaped_branch}: Added ${escaped_framework}" + } + + $content = $content -replace '\[LAST 3 FEATURES AND WHAT THEY ADDED\]',$recentChangesForTemplate + # Convert literal \n sequences introduced by Escape to real newlines + $content = $content -replace '\\n',[Environment]::NewLine + + $parent = Split-Path -Parent $TargetFile + if (-not (Test-Path $parent)) { New-Item -ItemType Directory -Path $parent | Out-Null } + Set-Content -LiteralPath $TargetFile -Value $content -NoNewline -Encoding utf8 + Remove-Item $temp -Force + return $true +} + +function Update-ExistingAgentFile { + param( + [Parameter(Mandatory=$true)] + [string]$TargetFile, + [Parameter(Mandatory=$true)] + [datetime]$Date + ) + if (-not (Test-Path $TargetFile)) { return (New-AgentFile -TargetFile $TargetFile -ProjectName (Split-Path $REPO_ROOT -Leaf) -Date $Date) } + + $techStack = Format-TechnologyStack -Lang $NEW_LANG -Framework $NEW_FRAMEWORK + $newTechEntries = @() + if ($techStack) { + $escapedTechStack = [Regex]::Escape($techStack) + if (-not (Select-String -Pattern $escapedTechStack -Path $TargetFile -Quiet)) { + $newTechEntries += "- $techStack ($CURRENT_BRANCH)" + } + } + if ($NEW_DB -and $NEW_DB -notin @('N/A','NEEDS CLARIFICATION')) { + $escapedDB = [Regex]::Escape($NEW_DB) + if (-not (Select-String -Pattern $escapedDB -Path $TargetFile -Quiet)) { + $newTechEntries += "- $NEW_DB ($CURRENT_BRANCH)" + } + } + $newChangeEntry = '' + if ($techStack) { $newChangeEntry = "- ${CURRENT_BRANCH}: Added ${techStack}" } + elseif ($NEW_DB -and $NEW_DB -notin @('N/A','NEEDS CLARIFICATION')) { $newChangeEntry = "- ${CURRENT_BRANCH}: Added ${NEW_DB}" } + + $lines = Get-Content -LiteralPath $TargetFile -Encoding utf8 + $output = New-Object System.Collections.Generic.List[string] + $inTech = $false; $inChanges = $false; $techAdded = $false; $changeAdded = $false; $existingChanges = 0 + + for ($i=0; $i -lt $lines.Count; $i++) { + $line = $lines[$i] + if ($line -eq '## Active Technologies') { + $output.Add($line) + $inTech = $true + continue + } + if ($inTech -and $line -match '^##\s') { + if (-not $techAdded -and $newTechEntries.Count -gt 0) { $newTechEntries | ForEach-Object { $output.Add($_) }; $techAdded = $true } + $output.Add($line); $inTech = $false; continue + } + if ($inTech -and [string]::IsNullOrWhiteSpace($line)) { + if (-not $techAdded -and $newTechEntries.Count -gt 0) { $newTechEntries | ForEach-Object { $output.Add($_) }; $techAdded = $true } + $output.Add($line); continue + } + if ($line -eq '## Recent Changes') { + $output.Add($line) + if ($newChangeEntry) { $output.Add($newChangeEntry); $changeAdded = $true } + $inChanges = $true + continue + } + if ($inChanges -and $line -match '^##\s') { $output.Add($line); $inChanges = $false; continue } + if ($inChanges -and $line -match '^- ') { + if ($existingChanges -lt 2) { $output.Add($line); $existingChanges++ } + continue + } + if ($line -match '\*\*Last updated\*\*: .*\d{4}-\d{2}-\d{2}') { + $output.Add(($line -replace '\d{4}-\d{2}-\d{2}',$Date.ToString('yyyy-MM-dd'))) + continue + } + $output.Add($line) + } + + # Post-loop check: if we're still in the Active Technologies section and haven't added new entries + if ($inTech -and -not $techAdded -and $newTechEntries.Count -gt 0) { + $newTechEntries | ForEach-Object { $output.Add($_) } + } + + Set-Content -LiteralPath $TargetFile -Value ($output -join [Environment]::NewLine) -Encoding utf8 + return $true +} + +function Update-AgentFile { + param( + [Parameter(Mandatory=$true)] + [string]$TargetFile, + [Parameter(Mandatory=$true)] + [string]$AgentName + ) + if (-not $TargetFile -or -not $AgentName) { Write-Err 'Update-AgentFile requires TargetFile and AgentName'; return $false } + Write-Info "Updating $AgentName context file: $TargetFile" + $projectName = Split-Path $REPO_ROOT -Leaf + $date = Get-Date + + $dir = Split-Path -Parent $TargetFile + if (-not (Test-Path $dir)) { New-Item -ItemType Directory -Path $dir | Out-Null } + + if (-not (Test-Path $TargetFile)) { + if (New-AgentFile -TargetFile $TargetFile -ProjectName $projectName -Date $date) { Write-Success "Created new $AgentName context file" } else { Write-Err 'Failed to create new agent file'; return $false } + } else { + try { + if (Update-ExistingAgentFile -TargetFile $TargetFile -Date $date) { Write-Success "Updated existing $AgentName context file" } else { Write-Err 'Failed to update agent file'; return $false } + } catch { + Write-Err "Cannot access or update existing file: $TargetFile. $_" + return $false + } + } + return $true +} + +function Update-SpecificAgent { + param( + [Parameter(Mandatory=$true)] + [string]$Type + ) + switch ($Type) { + 'claude' { Update-AgentFile -TargetFile $CLAUDE_FILE -AgentName 'Claude Code' } + 'gemini' { Update-AgentFile -TargetFile $GEMINI_FILE -AgentName 'Gemini CLI' } + 'copilot' { Update-AgentFile -TargetFile $COPILOT_FILE -AgentName 'GitHub Copilot' } + 'cursor-agent' { Update-AgentFile -TargetFile $CURSOR_FILE -AgentName 'Cursor IDE' } + 'qwen' { Update-AgentFile -TargetFile $QWEN_FILE -AgentName 'Qwen Code' } + 'opencode' { Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'opencode' } + 'codex' { Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'Codex CLI' } + 'windsurf' { Update-AgentFile -TargetFile $WINDSURF_FILE -AgentName 'Windsurf' } + 'kilocode' { Update-AgentFile -TargetFile $KILOCODE_FILE -AgentName 'Kilo Code' } + 'auggie' { Update-AgentFile -TargetFile $AUGGIE_FILE -AgentName 'Auggie CLI' } + 'roo' { Update-AgentFile -TargetFile $ROO_FILE -AgentName 'Roo Code' } + 'codebuddy' { Update-AgentFile -TargetFile $CODEBUDDY_FILE -AgentName 'CodeBuddy CLI' } + 'qoder' { Update-AgentFile -TargetFile $QODER_FILE -AgentName 'Qoder CLI' } + 'amp' { Update-AgentFile -TargetFile $AMP_FILE -AgentName 'Amp' } + 'shai' { Update-AgentFile -TargetFile $SHAI_FILE -AgentName 'SHAI' } + 'q' { Update-AgentFile -TargetFile $Q_FILE -AgentName 'Amazon Q Developer CLI' } + 'bob' { Update-AgentFile -TargetFile $BOB_FILE -AgentName 'IBM Bob' } + default { Write-Err "Unknown agent type '$Type'"; Write-Err 'Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|q|bob|qoder'; return $false } + } +} + +function Update-AllExistingAgents { + $found = $false + $ok = $true + if (Test-Path $CLAUDE_FILE) { if (-not (Update-AgentFile -TargetFile $CLAUDE_FILE -AgentName 'Claude Code')) { $ok = $false }; $found = $true } + if (Test-Path $GEMINI_FILE) { if (-not (Update-AgentFile -TargetFile $GEMINI_FILE -AgentName 'Gemini CLI')) { $ok = $false }; $found = $true } + if (Test-Path $COPILOT_FILE) { if (-not (Update-AgentFile -TargetFile $COPILOT_FILE -AgentName 'GitHub Copilot')) { $ok = $false }; $found = $true } + if (Test-Path $CURSOR_FILE) { if (-not (Update-AgentFile -TargetFile $CURSOR_FILE -AgentName 'Cursor IDE')) { $ok = $false }; $found = $true } + if (Test-Path $QWEN_FILE) { if (-not (Update-AgentFile -TargetFile $QWEN_FILE -AgentName 'Qwen Code')) { $ok = $false }; $found = $true } + if (Test-Path $AGENTS_FILE) { if (-not (Update-AgentFile -TargetFile $AGENTS_FILE -AgentName 'Codex/opencode')) { $ok = $false }; $found = $true } + if (Test-Path $WINDSURF_FILE) { if (-not (Update-AgentFile -TargetFile $WINDSURF_FILE -AgentName 'Windsurf')) { $ok = $false }; $found = $true } + if (Test-Path $KILOCODE_FILE) { if (-not (Update-AgentFile -TargetFile $KILOCODE_FILE -AgentName 'Kilo Code')) { $ok = $false }; $found = $true } + if (Test-Path $AUGGIE_FILE) { if (-not (Update-AgentFile -TargetFile $AUGGIE_FILE -AgentName 'Auggie CLI')) { $ok = $false }; $found = $true } + if (Test-Path $ROO_FILE) { if (-not (Update-AgentFile -TargetFile $ROO_FILE -AgentName 'Roo Code')) { $ok = $false }; $found = $true } + if (Test-Path $CODEBUDDY_FILE) { if (-not (Update-AgentFile -TargetFile $CODEBUDDY_FILE -AgentName 'CodeBuddy CLI')) { $ok = $false }; $found = $true } + if (Test-Path $QODER_FILE) { if (-not (Update-AgentFile -TargetFile $QODER_FILE -AgentName 'Qoder CLI')) { $ok = $false }; $found = $true } + if (Test-Path $SHAI_FILE) { if (-not (Update-AgentFile -TargetFile $SHAI_FILE -AgentName 'SHAI')) { $ok = $false }; $found = $true } + if (Test-Path $Q_FILE) { if (-not (Update-AgentFile -TargetFile $Q_FILE -AgentName 'Amazon Q Developer CLI')) { $ok = $false }; $found = $true } + if (Test-Path $BOB_FILE) { if (-not (Update-AgentFile -TargetFile $BOB_FILE -AgentName 'IBM Bob')) { $ok = $false }; $found = $true } + if (-not $found) { + Write-Info 'No existing agent files found, creating default Claude file...' + if (-not (Update-AgentFile -TargetFile $CLAUDE_FILE -AgentName 'Claude Code')) { $ok = $false } + } + return $ok +} + +function Print-Summary { + Write-Host '' + Write-Info 'Summary of changes:' + if ($NEW_LANG) { Write-Host " - Added language: $NEW_LANG" } + if ($NEW_FRAMEWORK) { Write-Host " - Added framework: $NEW_FRAMEWORK" } + if ($NEW_DB -and $NEW_DB -ne 'N/A') { Write-Host " - Added database: $NEW_DB" } + Write-Host '' + Write-Info 'Usage: ./update-agent-context.ps1 [-AgentType claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|codebuddy|amp|shai|q|bob|qoder]' +} + +function Main { + Validate-Environment + Write-Info "=== Updating agent context files for feature $CURRENT_BRANCH ===" + if (-not (Parse-PlanData -PlanFile $NEW_PLAN)) { Write-Err 'Failed to parse plan data'; exit 1 } + $success = $true + if ($AgentType) { + Write-Info "Updating specific agent: $AgentType" + if (-not (Update-SpecificAgent -Type $AgentType)) { $success = $false } + } + else { + Write-Info 'No agent specified, updating all existing agent files...' + if (-not (Update-AllExistingAgents)) { $success = $false } + } + Print-Summary + if ($success) { Write-Success 'Agent context update completed successfully'; exit 0 } else { Write-Err 'Agent context update completed with errors'; exit 1 } +} + +Main + diff --git a/.specify/templates/agent-file-template.md b/.specify/templates/agent-file-template.md new file mode 100644 index 0000000..4cc7fd6 --- /dev/null +++ b/.specify/templates/agent-file-template.md @@ -0,0 +1,28 @@ +# [PROJECT NAME] Development Guidelines + +Auto-generated from all feature plans. Last updated: [DATE] + +## Active Technologies + +[EXTRACTED FROM ALL PLAN.MD FILES] + +## Project Structure + +```text +[ACTUAL STRUCTURE FROM PLANS] +``` + +## Commands + +[ONLY COMMANDS FOR ACTIVE TECHNOLOGIES] + +## Code Style + +[LANGUAGE-SPECIFIC, ONLY FOR LANGUAGES IN USE] + +## Recent Changes + +[LAST 3 FEATURES AND WHAT THEY ADDED] + + + diff --git a/.specify/templates/checklist-template.md b/.specify/templates/checklist-template.md new file mode 100644 index 0000000..806657d --- /dev/null +++ b/.specify/templates/checklist-template.md @@ -0,0 +1,40 @@ +# [CHECKLIST TYPE] Checklist: [FEATURE NAME] + +**Purpose**: [Brief description of what this checklist covers] +**Created**: [DATE] +**Feature**: [Link to spec.md or relevant documentation] + +**Note**: This checklist is generated by the `/speckit.checklist` command based on feature context and requirements. + + + +## [Category 1] + +- [ ] CHK001 First checklist item with clear action +- [ ] CHK002 Second checklist item +- [ ] CHK003 Third checklist item + +## [Category 2] + +- [ ] CHK004 Another category item +- [ ] CHK005 Item with specific criteria +- [ ] CHK006 Final item in this category + +## Notes + +- Check items off as completed: `[x]` +- Add comments or findings inline +- Link to relevant resources or documentation +- Items are numbered sequentially for easy reference diff --git a/.specify/templates/plan-template.md b/.specify/templates/plan-template.md new file mode 100644 index 0000000..656dab8 --- /dev/null +++ b/.specify/templates/plan-template.md @@ -0,0 +1,106 @@ +# Implementation Plan: [FEATURE] + +**Branch**: `[###-feature-name]` | **Date**: [DATE] | **Spec**: [link] +**Input**: Feature specification from `/specs/[###-feature-name]/spec.md` + +**Note**: This template is filled in by the `/speckit.plan` command. See `.specify/templates/commands/plan.md` for the execution workflow. + +## Summary + +[Extract from feature spec: primary requirement + technical approach from research] + +## Technical Context + + + +**Language/Version**: [e.g., Python 3.11, Swift 5.9, Rust 1.75 or NEEDS CLARIFICATION] +**Primary Dependencies**: [e.g., FastAPI, UIKit, LLVM or NEEDS CLARIFICATION] +**Storage**: [if applicable, e.g., PostgreSQL, CoreData, files or N/A] +**Testing**: [e.g., pytest, XCTest, cargo test or NEEDS CLARIFICATION] +**Target Platform**: [e.g., Linux server, iOS 15+, WASM or NEEDS CLARIFICATION] +**Project Type**: [single/web/mobile - determines source structure] +**Performance Goals**: [domain-specific, e.g., 1000 req/s, 10k lines/sec, 60 fps or NEEDS CLARIFICATION] +**Constraints**: [domain-specific, e.g., <200ms p95, <100MB memory, offline-capable or NEEDS CLARIFICATION] +**Scale/Scope**: [domain-specific, e.g., 10k users, 1M LOC, 50 screens or NEEDS CLARIFICATION] + +## Constitution Check + +*GATE: Must pass before Phase 0 research. Re-check after Phase 1 design.* + +[Gates determined based on constitution file] + +## Project Structure + +### Documentation (this feature) + +```text +specs/[###-feature]/ +├── plan.md # This file (/speckit.plan command output) +├── research.md # Phase 0 output (/speckit.plan command) +├── data-model.md # Phase 1 output (/speckit.plan command) +├── quickstart.md # Phase 1 output (/speckit.plan command) +├── contracts/ # Phase 1 output (/speckit.plan command) +└── tasks.md # Phase 2 output (/speckit.tasks command - NOT created by /speckit.plan) +``` + +### Source Code (repository root) + + +```text +# [REMOVE IF UNUSED] Option 1: Single project (DEFAULT) +src/ +├── models/ +├── handlers/ +├── logic/ +├── cli/ +└── lib/ + +tests/ +├── contract/ +├── integration/ +└── unit/ + +# [REMOVE IF UNUSED] Option 2: Web application (when "frontend" + "backend" detected) +backend/ +├── src/ +│ ├── models/ +│ ├── handlers/ +│ ├── logic/ +│ └── api/ +└── tests/ + +frontend/ +├── src/ +│ ├── components/ +│ ├── pages/ +│ └── logic/ +└── tests/ + +# [REMOVE IF UNUSED] Option 3: Mobile + API (when "iOS/Android" detected) +api/ +└── [same as backend above] + +ios/ or android/ +└── [platform-specific structure: feature modules, UI flows, platform tests] +``` + +**Structure Decision**: [Document the selected structure and reference the real +directories captured above] + +## Complexity Tracking + +> **Fill ONLY if Constitution Check has violations that must be justified** + +| Violation | Why Needed | Simpler Alternative Rejected Because | +|-----------|------------|-------------------------------------| +| [e.g., 4th project] | [current need] | [why 3 projects insufficient] | +| [e.g., Repository pattern] | [specific problem] | [why direct DB access insufficient] | diff --git a/.specify/templates/spec-template.md b/.specify/templates/spec-template.md new file mode 100644 index 0000000..c67d914 --- /dev/null +++ b/.specify/templates/spec-template.md @@ -0,0 +1,115 @@ +# Feature Specification: [FEATURE NAME] + +**Feature Branch**: `[###-feature-name]` +**Created**: [DATE] +**Status**: Draft +**Input**: User description: "$ARGUMENTS" + +## User Scenarios & Testing *(mandatory)* + + + +### User Story 1 - [Brief Title] (Priority: P1) + +[Describe this user journey in plain language] + +**Why this priority**: [Explain the value and why it has this priority level] + +**Independent Test**: [Describe how this can be tested independently - e.g., "Can be fully tested by [specific action] and delivers [specific value]"] + +**Acceptance Scenarios**: + +1. **Given** [initial state], **When** [action], **Then** [expected outcome] +2. **Given** [initial state], **When** [action], **Then** [expected outcome] + +--- + +### User Story 2 - [Brief Title] (Priority: P2) + +[Describe this user journey in plain language] + +**Why this priority**: [Explain the value and why it has this priority level] + +**Independent Test**: [Describe how this can be tested independently] + +**Acceptance Scenarios**: + +1. **Given** [initial state], **When** [action], **Then** [expected outcome] + +--- + +### User Story 3 - [Brief Title] (Priority: P3) + +[Describe this user journey in plain language] + +**Why this priority**: [Explain the value and why it has this priority level] + +**Independent Test**: [Describe how this can be tested independently] + +**Acceptance Scenarios**: + +1. **Given** [initial state], **When** [action], **Then** [expected outcome] + +--- + +[Add more user stories as needed, each with an assigned priority] + +### Edge Cases + + + +- What happens when [boundary condition]? +- How does system handle [error scenario]? + +## Requirements *(mandatory)* + + + +### Functional Requirements + +- **FR-001**: System MUST [specific capability, e.g., "allow users to create accounts"] +- **FR-002**: System MUST [specific capability, e.g., "validate email addresses"] +- **FR-003**: Users MUST be able to [key interaction, e.g., "reset their password"] +- **FR-004**: System MUST [data requirement, e.g., "persist user preferences"] +- **FR-005**: System MUST [behavior, e.g., "log all security events"] + +*Example of marking unclear requirements:* + +- **FR-006**: System MUST authenticate users via [NEEDS CLARIFICATION: auth method not specified - email/password, SSO, OAuth?] +- **FR-007**: System MUST retain user data for [NEEDS CLARIFICATION: retention period not specified] + +### Key Entities *(include if feature involves data)* + +- **[Entity 1]**: [What it represents, key attributes without implementation] +- **[Entity 2]**: [What it represents, relationships to other entities] + +## Success Criteria *(mandatory)* + + + +### Measurable Outcomes + +- **SC-001**: [Measurable metric, e.g., "Users can complete account creation in under 2 minutes"] +- **SC-002**: [Measurable metric, e.g., "System handles 1000 concurrent users without degradation"] +- **SC-003**: [User satisfaction metric, e.g., "90% of users successfully complete primary task on first attempt"] +- **SC-004**: [Business metric, e.g., "Reduce support tickets related to [X] by 50%"] diff --git a/.specify/templates/tasks-template.md b/.specify/templates/tasks-template.md new file mode 100644 index 0000000..85cdd3c --- /dev/null +++ b/.specify/templates/tasks-template.md @@ -0,0 +1,251 @@ +--- + +description: "Task list template for feature implementation" +--- + +# Tasks: [FEATURE NAME] + +**Input**: Design documents from `/specs/[###-feature-name]/` +**Prerequisites**: plan.md (required), spec.md (required for user stories), research.md, data-model.md, contracts/ + +**Tests**: The examples below include test tasks. Tests are OPTIONAL - only include them if explicitly requested in the feature specification. + +**Organization**: Tasks are grouped by user story to enable independent implementation and testing of each story. + +## Format: `[ID] [P?] [Story] Description` + +- **[P]**: Can run in parallel (different files, no dependencies) +- **[Story]**: Which user story this task belongs to (e.g., US1, US2, US3) +- Include exact file paths in descriptions + +## Path Conventions + +- **Single project**: `src/`, `tests/` at repository root +- **Web app**: `backend/src/`, `frontend/src/` +- **Mobile**: `api/src/`, `ios/src/` or `android/src/` +- Paths shown below assume single project - adjust based on plan.md structure + + + +## Phase 1: Setup (Shared Infrastructure) + +**Purpose**: Project initialization and basic structure + +- [ ] T001 Create project structure per implementation plan +- [ ] T002 Initialize [language] project with [framework] dependencies +- [ ] T003 [P] Configure linting and formatting tools + +--- + +## Phase 2: Foundational (Blocking Prerequisites) + +**Purpose**: Core infrastructure that MUST be complete before ANY user story can be implemented + +**⚠️ CRITICAL**: No user story work can begin until this phase is complete + +Examples of foundational tasks (adjust based on your project): + +- [ ] T004 Setup database schema and migrations framework +- [ ] T005 [P] Implement authentication/authorization framework +- [ ] T006 [P] Setup API routing and middleware structure +- [ ] T007 Create base models/entities that all stories depend on +- [ ] T008 Configure error handling and logging infrastructure +- [ ] T009 Setup environment configuration management + +**Checkpoint**: Foundation ready - user story implementation can now begin in parallel + +--- + +## Phase 3: User Story 1 - [Title] (Priority: P1) 🎯 MVP + +**Goal**: [Brief description of what this story delivers] + +**Independent Test**: [How to verify this story works on its own] + +### Tests for User Story 1 (OPTIONAL - only if tests requested) ⚠️ + +> **NOTE: Write these tests FIRST, ensure they FAIL before implementation** + +- [ ] T010 [P] [US1] Contract test for [endpoint] in tests/contract/test_[name].py +- [ ] T011 [P] [US1] Integration test for [user journey] in tests/integration/test_[name].py + +### Implementation for User Story 1 + +- [ ] T012 [P] [US1] Create [Entity1] model in src/models/[entity1].py +- [ ] T013 [P] [US1] Create [Entity2] model in src/models/[entity2].py +- [ ] T014 [US1] Implement [Handler/Facade] in src/logic/[handler].py (depends on T012, T013) +- [ ] T015 [US1] Implement [endpoint/feature] in src/[location]/[file].py +- [ ] T016 [US1] Add validation and error handling +- [ ] T017 [US1] Add logging for user story 1 operations + +**Checkpoint**: At this point, User Story 1 should be fully functional and testable independently + +--- + +## Phase 4: User Story 2 - [Title] (Priority: P2) + +**Goal**: [Brief description of what this story delivers] + +**Independent Test**: [How to verify this story works on its own] + +### Tests for User Story 2 (OPTIONAL - only if tests requested) ⚠️ + +- [ ] T018 [P] [US2] Contract test for [endpoint] in tests/contract/test_[name].py +- [ ] T019 [P] [US2] Integration test for [user journey] in tests/integration/test_[name].py + +### Implementation for User Story 2 + +- [ ] T020 [P] [US2] Create [Entity] model in src/models/[entity].py +- [ ] T021 [US2] Implement [Handler/Facade] in src/logic/[handler].py +- [ ] T022 [US2] Implement [endpoint/feature] in src/[location]/[file].py +- [ ] T023 [US2] Integrate with User Story 1 components (if needed) + +**Checkpoint**: At this point, User Stories 1 AND 2 should both work independently + +--- + +## Phase 5: User Story 3 - [Title] (Priority: P3) + +**Goal**: [Brief description of what this story delivers] + +**Independent Test**: [How to verify this story works on its own] + +### Tests for User Story 3 (OPTIONAL - only if tests requested) ⚠️ + +- [ ] T024 [P] [US3] Contract test for [endpoint] in tests/contract/test_[name].py +- [ ] T025 [P] [US3] Integration test for [user journey] in tests/integration/test_[name].py + +### Implementation for User Story 3 + +- [ ] T026 [P] [US3] Create [Entity] model in src/models/[entity].py +- [ ] T027 [US3] Implement [Handler/Facade] in src/logic/[handler].py +- [ ] T028 [US3] Implement [endpoint/feature] in src/[location]/[file].py + +**Checkpoint**: All user stories should now be independently functional + +--- + +[Add more user story phases as needed, following the same pattern] + +--- + +## Phase N: Polish & Cross-Cutting Concerns + +**Purpose**: Improvements that affect multiple user stories + +- [ ] TXXX [P] Documentation updates in docs/ +- [ ] TXXX Code cleanup and refactoring +- [ ] TXXX Performance optimization across all stories +- [ ] TXXX [P] Additional unit tests (if requested) in tests/unit/ +- [ ] TXXX Security hardening +- [ ] TXXX Run quickstart.md validation + +--- + +## Dependencies & Execution Order + +### Phase Dependencies + +- **Setup (Phase 1)**: No dependencies - can start immediately +- **Foundational (Phase 2)**: Depends on Setup completion - BLOCKS all user stories +- **User Stories (Phase 3+)**: All depend on Foundational phase completion + - User stories can then proceed in parallel (if staffed) + - Or sequentially in priority order (P1 → P2 → P3) +- **Polish (Final Phase)**: Depends on all desired user stories being complete + +### User Story Dependencies + +- **User Story 1 (P1)**: Can start after Foundational (Phase 2) - No dependencies on other stories +- **User Story 2 (P2)**: Can start after Foundational (Phase 2) - May integrate with US1 but should be independently testable +- **User Story 3 (P3)**: Can start after Foundational (Phase 2) - May integrate with US1/US2 but should be independently testable + +### Within Each User Story + +- Tests (if included) MUST be written and FAIL before implementation +- Models before handlers +- Handlers before endpoints +- Core implementation before integration +- Story complete before moving to next priority + +### Parallel Opportunities + +- All Setup tasks marked [P] can run in parallel +- All Foundational tasks marked [P] can run in parallel (within Phase 2) +- Once Foundational phase completes, all user stories can start in parallel (if team capacity allows) +- All tests for a user story marked [P] can run in parallel +- Models within a story marked [P] can run in parallel +- Different user stories can be worked on in parallel by different team members + +--- + +## Parallel Example: User Story 1 + +```bash +# Launch all tests for User Story 1 together (if tests requested): +Task: "Contract test for [endpoint] in tests/contract/test_[name].py" +Task: "Integration test for [user journey] in tests/integration/test_[name].py" + +# Launch all models for User Story 1 together: +Task: "Create [Entity1] model in src/models/[entity1].py" +Task: "Create [Entity2] model in src/models/[entity2].py" +``` + +--- + +## Implementation Strategy + +### MVP First (User Story 1 Only) + +1. Complete Phase 1: Setup +2. Complete Phase 2: Foundational (CRITICAL - blocks all stories) +3. Complete Phase 3: User Story 1 +4. **STOP and VALIDATE**: Test User Story 1 independently +5. Deploy/demo if ready + +### Incremental Delivery + +1. Complete Setup + Foundational → Foundation ready +2. Add User Story 1 → Test independently → Deploy/Demo (MVP!) +3. Add User Story 2 → Test independently → Deploy/Demo +4. Add User Story 3 → Test independently → Deploy/Demo +5. Each story adds value without breaking previous stories + +### Parallel Team Strategy + +With multiple developers: + +1. Team completes Setup + Foundational together +2. Once Foundational is done: + - Developer A: User Story 1 + - Developer B: User Story 2 + - Developer C: User Story 3 +3. Stories complete and integrate independently + +--- + +## Notes + +- [P] tasks = different files, no dependencies +- [Story] label maps task to specific user story for traceability +- Each user story should be independently completable and testable +- Verify tests fail before implementing +- Commit after each task or logical group +- Stop at any checkpoint to validate story independently +- Avoid: vague tasks, same file conflicts, cross-story dependencies that break independence diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..22056fb --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,6 @@ +{ + "cucumberautocomplete.steps": ["LunaDraw.Tests/Steps/*.cs"], + "cucumberautocomplete.syncfeatures": "LunaDraw.Tests/Features/*.feature", + "cucumberautocomplete.strictGherkinCompletion": true, + "cucumberautocomplete.onStepDefinitionNotFound": "recommend" +} diff --git a/Components/DrawingGalleryPopup.xaml b/Components/DrawingGalleryPopup.xaml index 48d4513..2eaf248 100644 --- a/Components/DrawingGalleryPopup.xaml +++ b/Components/DrawingGalleryPopup.xaml @@ -48,6 +48,7 @@ CancelCommand { get; } public ReactiveCommand OpenDrawingCommand { get; } public ReactiveCommand LoadDrawingsCommand { get; } + public ReactiveCommand DuplicateDrawingCommand { get; } + public ReactiveCommand DeleteDrawingCommand { get; } + public ReactiveCommand RenameDrawingCommand { get; } + + private List? contextCommands; + public List? ContextCommands + { + get => contextCommands; + set => this.RaiseAndSetIfChanged(ref contextCommands, value); + } + public DrawingGalleryPopupViewModel( GalleryViewModel galleryViewModel, @@ -92,6 +104,17 @@ public DrawingGalleryPopupViewModel( LoadDrawingsCommand = ReactiveCommand.CreateFromTask(LoadDrawingsAsync); LoadDrawingsCommand.Execute().Subscribe(); + + DuplicateDrawingCommand = ReactiveCommand.CreateFromTask(DuplicateDrawingAsync); + DeleteDrawingCommand = ReactiveCommand.CreateFromTask(DeleteDrawingAsync); + RenameDrawingCommand = ReactiveCommand.CreateFromTask(RenameDrawingAsync); + + ContextCommands = new List + { + new("Duplicate", DuplicateDrawingCommand), + new("Rename", RenameDrawingCommand), + new("Delete", DeleteDrawingCommand, isDestructive: true) + }; drawingListChangedSubscription = messageBus.Listen() .ObserveOn(RxApp.MainThreadScheduler) .Subscribe(async msg => @@ -233,6 +256,43 @@ private async Task LoadDrawingsAsync() IsLoading = false; } } + private async Task DuplicateDrawingAsync(DrawingItemViewModel item) + { + if (item?.Drawing == null) return; + await galleryViewModel.DuplicateDrawingCommand.Execute(item.Drawing).GetAwaiter(); + } + + private async Task DeleteDrawingAsync(DrawingItemViewModel item) + { + if (item?.Drawing == null) return; + + bool confirmed = await Application.Current?.MainPage?.DisplayAlertAsync( + "Delete Drawing", + $"Are you sure you want to delete '{item.Title}'?", + "Delete", + "Cancel"); + + if (!confirmed) return; + await galleryViewModel.DeleteDrawingCommand.Execute(item.Drawing).GetAwaiter(); + } + + private async Task RenameDrawingAsync(DrawingItemViewModel item) + { + if (item?.Drawing == null) return; + + string newName = await Application.Current?.MainPage?.DisplayPromptAsync( + "Rename Drawing", + "Enter new name:", + initialValue: item.Title, + maxLength: 50, + placeholder: "Drawing name") ?? string.Empty; + + if (string.IsNullOrWhiteSpace(newName)) return; + + await galleryViewModel.RenameDrawing(item.Drawing, newName); + messageBus.SendMessage(new DrawingListChangedMessage(item.Drawing.Id)); + } + public void Dispose() { drawingListChangedSubscription?.Dispose(); diff --git a/LunaDraw.csproj b/LunaDraw.csproj index a435568..fccd303 100644 --- a/LunaDraw.csproj +++ b/LunaDraw.csproj @@ -53,7 +53,9 @@ - + + + @@ -75,7 +77,7 @@ - + diff --git a/specs/001-movie-mode-playback/checklists/requirements.md b/specs/001-movie-mode-playback/checklists/requirements.md new file mode 100644 index 0000000..5e85396 --- /dev/null +++ b/specs/001-movie-mode-playback/checklists/requirements.md @@ -0,0 +1,34 @@ +# Specification Quality Checklist: Movie Mode (Playback) + +**Purpose**: Validate specification completeness and quality before proceeding to planning +**Created**: 2025-12-23 +**Feature**: [Link to spec.md](../spec.md) + +## Content Quality + +- [x] No implementation details (languages, frameworks, APIs) +- [x] Focused on user value and business needs +- [x] Written for non-technical stakeholders +- [x] All mandatory sections completed + +## Requirement Completeness + +- [x] No [NEEDS CLARIFICATION] markers remain +- [x] Requirements are testable and unambiguous +- [x] Success criteria are measurable +- [x] Success criteria are technology-agnostic (no implementation details) +- [x] All acceptance scenarios are defined +- [x] Edge cases are identified +- [x] Scope is clearly bounded +- [x] Dependencies and assumptions identified + +## Feature Readiness + +- [x] All functional requirements have clear acceptance criteria +- [x] User scenarios cover primary flows +- [x] Feature meets measurable outcomes defined in Success Criteria +- [x] No implementation details leak into specification + +## Notes + +- Spec ready for planning. \ No newline at end of file diff --git a/specs/001-movie-mode-playback/contracts/services.cs b/specs/001-movie-mode-playback/contracts/services.cs new file mode 100644 index 0000000..2fe4213 --- /dev/null +++ b/specs/001-movie-mode-playback/contracts/services.cs @@ -0,0 +1,53 @@ +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using LunaDraw.Logic.Models; + +namespace LunaDraw.Logic.Services +{ + /// + /// Manages the recording of drawing events (creation timestamps). + /// + public interface IRecordingService + { + /// + /// Stamps the element with the current time. + /// Call this when a new element is added to the canvas. + /// + void RecordCreation(IDrawableElement element); + } + + /// + /// Controls the playback of the drawing history. + /// + public interface IPlaybackService + { + /// + /// Current state of playback. + /// + IObservable CurrentState { get; } + + /// + /// Prepares the playback sequence from a list of layers. + /// Extracts all elements, sorts by CreatedAt, and prepares the queue. + /// + /// The layers to reconstruct. + void Load(IEnumerable layers); + + /// + /// Starts or Resumes playback. + /// + /// Desired playback speed. + Task PlayAsync(PlaybackSpeed speed); + + /// + /// Pauses playback. + /// + Task PauseAsync(); + + /// + /// Stops playback and resets to the final state (or initial state depending on UX). + /// + Task StopAsync(); + } +} diff --git a/specs/001-movie-mode-playback/data-model.md b/specs/001-movie-mode-playback/data-model.md new file mode 100644 index 0000000..b3f47ec --- /dev/null +++ b/specs/001-movie-mode-playback/data-model.md @@ -0,0 +1,81 @@ +# Data Model: Movie Mode (Playback) + +## New/Modified Entities + +### 1. IDrawableElement (Modification) + +Add timestamp to track creation order for playback reconstruction. + +```csharp +public interface IDrawableElement +{ + // ... existing properties ... + + /// + /// Timestamp when the element was created. + /// Used for "Movie Mode" playback to reconstruct creation order. + /// + DateTimeOffset CreatedAt { get; set; } +} +``` + +### 2. PlaybackSpeed (Enum) + +Defines the user-selectable speed presets. + +```csharp +public enum PlaybackSpeed +{ + Slow, // e.g., 500ms per element + Quick, // e.g., 100ms per element + Fast // e.g., 20ms per element +} +``` + +### 3. PlaybackState (Enum) + +Tracks the current state of the playback engine. + +```csharp +public enum PlaybackState +{ + Stopped, + Playing, + Paused, + Completed +} +``` + +### 4. PlaybackSettings (Value Object / ViewModel Property) + +Configuration for the playback session. + +```csharp +public class PlaybackSettings +{ + public PlaybackSpeed Speed { get; set; } = PlaybackSpeed.Quick; + public bool AutoLoop { get; set; } = false; // Potential future feature +} +``` + +## Storage Format (JSON) + +No structural change to the file format is strictly necessary if `IDrawableElement` serialization includes the new `CreatedAt` property. The `Drawing` file (likely a JSON array of layers/elements) will simply include this new field. + +```json +{ + "layers": [ + { + "id": "...", + "elements": [ + { + "type": "DrawablePath", + "id": "...", + "createdAt": "2025-12-23T10:00:00Z", // <--- NEW + "points": [...] + } + ] + } + ] +} +``` diff --git a/specs/001-movie-mode-playback/plan.md b/specs/001-movie-mode-playback/plan.md new file mode 100644 index 0000000..89a4bb0 --- /dev/null +++ b/specs/001-movie-mode-playback/plan.md @@ -0,0 +1,85 @@ +# Implementation Plan: Movie Mode (Playback) + +**Branch**: `001-movie-mode-playback` | **Date**: 2025-12-23 | **Spec**: [Link](spec.md) +**Input**: Feature specification from `/specs/001-movie-mode-playback/spec.md` + +## Summary + +This feature implements "Movie Mode" for LunaDraw, enabling children to watch their drawings recreate themselves as a short animation. It involves recording all canvas modification events (strokes, shapes, stamps, undos) in real-time, serializing this history with the drawing file, and providing a playback engine that reconstructs the final image by re-rendering the clean history (excluding undone actions) at a user-selectable speed. + +## Technical Context + +**Language/Version**: C# 12, .NET 9/10 (MAUI) +**Primary Dependencies**: SkiaSharp (rendering), ReactiveUI (MVVM), System.Text.Json (serialization) +**Storage**: File-based storage (embedded in drawing files or sidecar JSON) +**Testing**: xUnit, Moq +**Target Platform**: Windows, Android, iOS, MacCatalyst +**Project Type**: Mobile/Desktop App (.NET MAUI) +**Performance Goals**: Playback initiation < 1s, smooth rendering of 100+ strokes in < 10s. +**Constraints**: Must run on low-end mobile devices; Playback must happen on the existing CanvasView. +**Scale/Scope**: Supports drawings with thousands of strokes; Memory efficient recording. + +## Constitution Check + +*GATE: Must pass before Phase 0 research. Re-check after Phase 1 design.* + +- [x] **I. Child-Centric UX**: Simple "Play" button, visual feedback, adjustable speed presets (Slow/Quick/Fast). +- [x] **II. Reactive Architecture**: Playback state managed via ReactiveUI Observables. +- [x] **III. Test-First & Quality**: Recording and Playback logic will be unit tested independently. +- [x] **IV. SOLID & Clean Code**: Separation of concerns (Recording Logic vs. Playback Engine vs. UI). +- [x] **V. SkiaSharp & Performance**: Playback uses existing SkiaSharp rendering pipeline; "Clean Reconstruction" avoids rendering undone paths. +- [x] **VI. Architecture Patterns & Naming**: No "Service" or "Manager" suffixes. Using "Handlers" and "Facades". +- [x] **VII. SPARC Methodology**: Following Spec -> Plan -> Task flow. + +## Project Structure + +### Documentation (this feature) + +```text +specs/001-movie-mode-playback/ +├── plan.md # This file +├── research.md # Phase 0 output +├── data-model.md # Phase 1 output +├── quickstart.md # Phase 1 output +├── contracts/ # Phase 1 output (Interfaces) +└── tasks.md # Phase 2 output +``` + +### Source Code (repository root) + +```text +Logic/ +├── Models/ +│ ├── DrawingEvent.cs # New: Represents a single recorded action +│ ├── DrawingHistory.cs # New: Collection of events +│ └── PlaybackState.cs # New: State model for playback (Playing, Paused, Speed) +├── Handlers/ +│ ├── IPlaybackHandler.cs # New: Interface for playback control +│ ├── PlaybackHandler.cs # New: Implementation of playback logic +│ ├── IRecordingHandler.cs # New: Interface for event recording +│ └── RecordingHandler.cs # New: Implementation of recording logic +├── ViewModels/ +│ └── PlaybackViewModel.cs # New: VM for playback controls +└── ... + +Components/ +├── PlaybackControls.xaml # New: Floating controls for playback (Speed, Stop) +└── ... + +tests/LunaDraw.Tests/ +├── Features/ +│ └── MovieMode/ # New: Feature-specific tests +│ ├── RecordingHandlerTests.cs +│ └── PlaybackHandlerTests.cs +└── ... +``` + +**Structure Decision**: Option 1 (Single Project) - Integrating directly into existing `LunaDraw` project structure following standard conventions. + +## Complexity Tracking + +> **Fill ONLY if Constitution Check has violations that must be justified** + +| Violation | Why Needed | Simpler Alternative Rejected Because | +|-----------|------------|-------------------------------------| +| N/A | | | \ No newline at end of file diff --git a/specs/001-movie-mode-playback/quickstart.md b/specs/001-movie-mode-playback/quickstart.md new file mode 100644 index 0000000..6d921c2 --- /dev/null +++ b/specs/001-movie-mode-playback/quickstart.md @@ -0,0 +1,42 @@ +# Quickstart: Testing Movie Mode + +This guide verifies the "Movie Mode" feature implementation. + +## Prerequisites + +1. Launch the **LunaDraw** app. +2. Ensure you are on the **Main Canvas**. + +## Verification Steps + +### 1. Verify Recording (Data Model) +1. Draw 3 distinct shapes (e.g., Circle, Square, Triangle) in order. +2. Save the drawing. +3. *Technical Check*: Inspect the saved file (or debug output) to verify `createdAt` timestamps are present and ascending. + +### 2. Verify Playback Controls +1. Locate the **"Play"** button (Toolbar or Gallery). +2. Tap "Play". +3. Verify a **Speed Selector** (Slow/Quick/Fast) appears or is accessible. + +### 3. Verify Playback Behavior +1. Select "Quick". +2. Tap "Start" (or if Play starts immediately). +3. **Observe**: + - Canvas clears immediately (White/Background). + - Circle appears. + - Square appears. + - Triangle appears. + - Playback ends, controls return to normal. + +### 4. Verify "Clean Reconstruction" +1. Draw a Line. +2. Click **Undo**. +3. Draw a Circle. +4. Tap "Play". +5. **Observe**: The Line should NOT appear. Only the Circle should appear. + +### 5. Verify Interruption +1. Start Playback. +2. While playing, tap the **Back** button or **Stop**. +3. **Observe**: Playback stops immediately, and the full drawing is restored. diff --git a/specs/001-movie-mode-playback/research.md b/specs/001-movie-mode-playback/research.md new file mode 100644 index 0000000..b29cd34 --- /dev/null +++ b/specs/001-movie-mode-playback/research.md @@ -0,0 +1,48 @@ +# Research: Movie Mode (Playback) + +**Feature**: Movie Mode (Playback) +**Date**: 2025-12-23 +**Status**: Complete + +## Decision Log + +### 1. Data Structure for History +**Decision**: Use a linear list of `DrawingEvent` objects stored within the `Drawing` model (or alongside it). +**Rationale**: Simple to implement and serialize. Since we need "Clean Reconstruction", the list will be filtered on save or load to remove undone actions, or we can keep the full history and filter at runtime. Given the "Clean Reconstruction" requirement, filtering at save time (or maintaining a "clean" list alongside the undo stack) is more efficient for playback. +**Approach**: +- Create `DrawingEvent` class (Type, Data, Timestamp). +- When saving, serialize this list to JSON. +- Embed in the custom `.luna` file format (likely a ZIP or custom JSON structure). + +### 2. Playback Mechanism +**Decision**: `PlaybackService` driving a `DispatcherTimer` or `AnimationLoop`. +**Rationale**: MAUI's `Dispatcher.StartTimer` or an `IDispatcherTimer` is sufficient for the coarse-grained control needed here (adding elements to the canvas). We don't need a 60fps game loop for *logic*, just for *rendering* (which SkiaSharp handles). +**Speed Control**: +- **Slow**: Add 1 element every 500ms. +- **Quick**: Add 1 element every 100ms. +- **Fast**: Add 1 element every 20ms. + +### 3. Rendering "Clean Reconstruction" +**Decision**: Clear canvas, then sequentially add elements from the history list to the active `Layer`. +**Rationale**: Reusing the existing `Layer` and `IDrawableElement` infrastructure ensures visual consistency. The `PlaybackService` will essentially act as a "virtual user" adding elements. +**Optimization**: For "Clean Reconstruction", we only care about the elements that survived to the end. +- *Option A*: Record every add/undo. Process list to remove undone pairs. +- *Option B*: Snapshot the final `Layer.Elements` list. This loses the *order* if the collection doesn't preserve creation order (it usually does). +- *Decision*: Use the `Layer.Elements` collection order if it preserves creation time. If Z-index manipulation changes order, we might need a separate `CreationOrder` index. +- *Refinement*: Existing `Layer` sorts by `ZIndex` for drawing. We need to persist the *creation order* separately if users can reorder layers/elements (which might change draw order but not creation time). +- *Final Decision*: Add `CreationTimestamp` to `IDrawableElement`. Sort final elements by this timestamp to reconstruct the "movie". This automatically gives us "Clean Reconstruction" (undone elements are gone) and handles reordering (we play back in time order). + +### 4. Serialization +**Decision**: `System.Text.Json` with polymorphic serialization for `IDrawableElement`. +**Rationale**: Standard, fast, efficient. Need to handle abstract `IDrawableElement` types (`DrawablePath`, `DrawableShape`, etc.). + +## Alternatives Considered + +- **Video Export (MP4)**: Rejected per requirements (FR-005). Too heavy, requires FFmpeg or platform specific encoders. +- **Delta Compression**: Rejected. Drawings aren't large enough to warrant complex delta diffing yet. +- **Full Undo/Redo Replay**: Rejected. Requirement is "Clean Reconstruction". + +## Risk Assessment + +- **Large Drawings**: 10k strokes might take time to sort/deserialize. *Mitigation*: Async loading. +- **Legacy Files**: Old drawings won't have timestamps. *Mitigation*: Default to arbitrary order or index order for legacy files. diff --git a/specs/001-movie-mode-playback/spec.md b/specs/001-movie-mode-playback/spec.md new file mode 100644 index 0000000..58a7061 --- /dev/null +++ b/specs/001-movie-mode-playback/spec.md @@ -0,0 +1,88 @@ +# Feature Specification: Movie Mode (Playback) + +**Feature Branch**: `001-movie-mode-playback` +**Created**: 2025-12-23 +**Status**: Draft +**Input**: User description: "As a user I would like to automatically record the drawing procedure and allow playback as a short animation." + +## User Scenarios & Testing + +### User Story 1 - Automatic Recording (Priority: P1) + +The system automatically captures every stroke and action as the child draws, without requiring any manual setup or activation. + +**Why this priority**: Without data recording, playback is impossible. This is the foundational data ingestion requirement. + +**Independent Test**: Can be tested by drawing various shapes and verifying that a sequence of drawing events is persisted in memory/storage, even without the UI to play it back. + +**Acceptance Scenarios**: + +1. **Given** a new blank canvas, **When** the user draws multiple strokes (lines, shapes, stamps), **Then** the system records each addition sequentially with its properties (color, type, position). +2. **Given** an existing drawing, **When** the user performs an Undo action, **Then** the recording captures the removal/undo event to ensure playback reflects the correction. +3. **Given** a complex drawing session, **When** the user saves the drawing, **Then** the recorded history is saved alongside the final image. + +--- + +### User Story 2 - In-App Playback (Priority: P2) + +The user can tap a "Play" button to watch their drawing recreate itself magically on screen. + +**Why this priority**: This delivers the user-facing value ("Movie Mode") requested. + +**Independent Test**: Can be tested by loading a drawing with known history and triggering playback, verifying the visual sequence matches the creation order. + +**Acceptance Scenarios**: + +1. **Given** a drawing with history, **When** the user taps the "Play" button, **Then** the canvas clears and rapidly redraws each stroke in the order they were created. +2. **Given** a playback in progress, **When** the playback finishes, **Then** the final state of the drawing is restored and editable. +3. **Given** a playback in progress, **When** the user taps anywhere, **Then** the playback stops and the full drawing is immediately shown (skip to end). + +## Clarifications + +### Session 2025-12-23 +- Q: Should playback speed be capped, a fixed multiplier, or fixed speed? → A: Fixed speed with adjustable presets (Slow, Quick, Fast). +- Q: Should playback show full history (including undos) or a clean reconstruction? → A: Clean reconstruction (only play back elements that exist in the final drawing). +- Q: Should playback be accessible from the Canvas, Gallery, or both? → A: Both. +- Q: Should the canvas clear immediately or have a transition before playback? → A: Immediate clear. +- Q: Should playback include audio feedback/sounds as elements appear? → A: Silent playback (visual-only). + +## Requirements + +### Functional Requirements + +- **FR-001**: System MUST automatically record all canvas modification events (add stroke, add shape, add stamp, clear canvas, undo/redo). +- **FR-002**: System MUST serialize and save this recording history when the drawing is saved to storage. +- **FR-003**: System MUST provide a "Play" button in both the Drawing Gallery (for saved works) and the Canvas UI (for the current session) to trigger playback. +- **FR-004**: Playback MUST animate the drawing process using a fixed speed based on user-selected presets. +- **FR-005**: Playback MUST render within the app using the existing application rendering system, NOT export a video file. +- **FR-006**: During playback, all editing tools MUST be disabled or hidden to prevent interaction conflicts. +- **FR-007**: System MUST filter the recording history to perform a "Clean Reconstruction", playing back only the elements present in the final drawing state (ignoring undone actions). +- **FR-008**: System MUST provide a simple speed selector (e.g., "Slow", "Quick", "Fast") to control playback tempo. +- **FR-009**: System MUST immediately clear the canvas to its background state when playback is initiated. + +### Edge Cases + +- **Interruption**: If the user backgrounds the app or receives a call during playback, playback stops and the canvas returns to the final editable state. +- **Memory Limits**: If a drawing session becomes extremely long (e.g., >10,000 strokes), the system stops recording history to prevent memory crashes, preserving the history up to that point. +- **Corrupt History**: If the saved history data is unreadable, the system fails gracefully by disabling the "Play" button for that drawing (showing only the static image). +- **Navigation**: If user navigates away (Back button) during playback, playback stops immediately. + +### Key Entities + +- **DrawingHistory**: An ordered list of `DrawingEvent` objects. +- **DrawingEvent**: Represents a single atomic change (Type: Add/Remove/Clear, Data: Stroke/Element, Timestamp/Order). + +## Success Criteria + +### Measurable Outcomes + +- **SC-001**: Playback initiates within 1 second of tapping the "Play" button. +- **SC-002**: A drawing with 100 strokes completes playback in under 10 seconds (ensuring "short animation" feel). +- **SC-003**: 100% of saved drawings retain their playback history after app restart. +- **SC-004**: Playback accurately reproduces the final visual state (identical pixel/vector output) compared to the static drawing. + +### Assumptions + +- **Playback Location**: Playback occurs directly on the canvas view, temporarily locking user input. +- **Audio**: No background music generation required for this MVP (though "multi-sensory feedback" is a core principle, we focus on visual playback first). +- **Format**: Feature is strictly in-app playback, not video file export (MP4/GIF) at this stage. \ No newline at end of file From 1e7279fd966d9c7b9857454a5360f86b1d4c5a4f Mon Sep 17 00:00:00 2001 From: Jeff H Date: Wed, 24 Dec 2025 13:35:30 -0500 Subject: [PATCH 02/11] cont attempts at feature parity --- .claude | 1 - .../agent-os/implementation-verifier.md | 128 +++++ .claude/agents/agent-os/implementer.md | 51 ++ .claude/agents/agent-os/product-planner.md | 209 ++++++++ .claude/agents/agent-os/spec-initializer.md | 92 ++++ .claude/agents/agent-os/spec-shaper.md | 293 +++++++++++ .claude/agents/agent-os/spec-verifier.md | 313 ++++++++++++ .claude/agents/agent-os/spec-writer.md | 130 +++++ .claude/agents/agent-os/tasks-list-creator.md | 230 +++++++++ .claude/commands/agent-os/create-tasks.md | 40 ++ .claude/commands/agent-os/implement-tasks.md | 55 +++ .../commands/agent-os/orchestrate-tasks.md | 180 +++++++ .claude/commands/agent-os/plan-product.md | 36 ++ .claude/commands/agent-os/shape-spec.md | 52 ++ .claude/commands/agent-os/write-spec.md | 22 + .claude/commands/speckit.analyze.md | 184 +++++++ .claude/commands/speckit.checklist.md | 294 +++++++++++ .claude/commands/speckit.clarify.md | 181 +++++++ .claude/commands/speckit.constitution.md | 82 ++++ .claude/commands/speckit.implement.md | 135 ++++++ .claude/commands/speckit.plan.md | 89 ++++ .claude/commands/speckit.specify.md | 258 ++++++++++ .claude/commands/speckit.tasks.md | 137 ++++++ .claude/commands/speckit.taskstoissues.md | 30 ++ .gemini | 1 - .gemini/commands/openspec/apply.toml | 21 + .gemini/commands/openspec/archive.toml | 25 + .gemini/commands/openspec/proposal.toml | 26 + .gemini/commands/speckit.analyze.toml | 188 ++++++++ .gemini/commands/speckit.checklist.toml | 298 ++++++++++++ .gemini/commands/speckit.clarify.toml | 185 +++++++ .gemini/commands/speckit.constitution.toml | 86 ++++ .gemini/commands/speckit.implement.toml | 139 ++++++ .gemini/commands/speckit.plan.toml | 93 ++++ .gemini/commands/speckit.specify.toml | 262 ++++++++++ .gemini/commands/speckit.tasks.toml | 141 ++++++ .gemini/commands/speckit.taskstoissues.toml | 34 ++ .gitignore | 37 +- AGENTS.md | 18 + App.xaml | 2 + App.xaml.cs | 13 +- ClearCache.bat | 10 + Components/PlaybackControls.xaml | 46 ++ Components/PlaybackControls.xaml.cs | 35 ++ Components/ToolbarView.xaml | 11 + Converters/Base64ToImageConverter.cs | 34 +- Converters/PlaybackStateConverters.cs | 55 +++ Logic/Handlers/CanvasInputHandler.cs | 5 +- Logic/Handlers/IPlaybackHandler.cs | 64 +++ Logic/Handlers/IRecordingHandler.cs | 38 ++ Logic/Handlers/PlaybackHandler.cs | 222 +++++++++ Logic/Handlers/RecordingHandler.cs | 40 ++ Logic/Messages/AppSleepingMessage.cs | 28 ++ .../Messages/TogglePlaybackControlsMessage.cs | 28 ++ Logic/Models/DrawableEllipse.cs | 101 ++-- Logic/Models/DrawableGroup.cs | 3 + Logic/Models/DrawableImage.cs | 117 ++--- Logic/Models/DrawableLine.cs | 23 +- Logic/Models/DrawablePath.cs | 167 ++++--- Logic/Models/DrawableRectangle.cs | 101 ++-- Logic/Models/DrawableStamps.cs | 106 ++-- Logic/Models/DrawingEvent.cs | 43 ++ Logic/Models/ExternalModels.cs | 6 +- Logic/Models/IDrawableElement.cs | 12 + Logic/Models/PlaybackState.cs | 39 ++ Logic/Services/IThumbnailCacheFacade.cs | 23 + Logic/Services/ThumbnailCacheFacade.cs | 23 + Logic/Utils/DrawingStorageMomento.cs | 3 +- Logic/Utils/LayerFacade.cs | 36 +- Logic/ViewModels/GalleryViewModel.cs | 1 - Logic/ViewModels/LayerPanelViewModel.cs | 1 - Logic/ViewModels/MainViewModel.cs | 13 +- Logic/ViewModels/PlaybackViewModel.cs | 64 +++ Logic/ViewModels/ToolbarViewModel.cs | 6 + MauiProgram.cs | 5 + Pages/MainPage.xaml | 5 + Pages/MainPage.xaml.cs | 5 - openspec/AGENTS.md | 456 ++++++++++++++++++ .../improve-movie-mode-playback/design.md | 44 ++ .../improve-movie-mode-playback/proposal.md | 20 + .../specs/smooth-playback/spec.md | 46 ++ .../improve-movie-mode-playback/tasks.md | 18 + openspec/project.md | 56 +++ .../contracts/services.cs | 23 + specs/001-movie-mode-playback/tasks.md | 124 +++++ tests/CarouselPerformance/App.xaml.cs | 25 +- tests/CarouselPerformance/AppShell.xaml.cs | 25 +- tests/CarouselPerformance/MainPage.xaml.cs | 23 + tests/CarouselPerformance/MainViewModel.cs | 23 + tests/CarouselPerformance/MauiProgram.cs | 25 +- .../Platforms/Android/MainActivity.cs | 25 +- .../Platforms/Android/MainApplication.cs | 25 +- .../Platforms/MacCatalyst/AppDelegate.cs | 25 +- .../Platforms/MacCatalyst/Program.cs | 25 +- .../Platforms/Windows/App.xaml.cs | 25 +- .../Platforms/iOS/AppDelegate.cs | 25 +- .../Platforms/iOS/Program.cs | 25 +- .../CanvasInputHandlerRobustTests.cs | 2 + .../LunaDraw.Tests/CanvasInputHandlerTests.cs | 4 + tests/LunaDraw.Tests/EraserToolTests.cs | 11 +- .../MovieMode/PlaybackHandlerTests.cs | 103 ++++ .../MovieMode/PlaybackPerformanceTests.cs | 89 ++++ .../MovieMode/PlaybackViewModelTests.cs | 65 +++ .../MovieMode/RecordingHandlerTests.cs | 73 +++ .../Features/MovieMode/SerializationTests.cs | 80 +++ tests/LunaDraw.Tests/HistoryManagerTests.cs | 11 +- .../LayerPanelViewModelTests.cs | 3 +- .../LunaDraw.Tests/LayerStateManagerTests.cs | 3 +- tests/LunaDraw.Tests/PlaybackHandlerTests.cs | 206 ++++++++ .../SelectToolInteractionTests.cs | 2 + tests/LunaDraw.Tests/SelectToolTests.cs | 2 + .../LunaDraw.Tests/SelectionViewModelTests.cs | 3 +- .../ThumbnailCacheFacadeTests.cs | 23 + 113 files changed, 7739 insertions(+), 334 deletions(-) delete mode 120000 .claude create mode 100644 .claude/agents/agent-os/implementation-verifier.md create mode 100644 .claude/agents/agent-os/implementer.md create mode 100644 .claude/agents/agent-os/product-planner.md create mode 100644 .claude/agents/agent-os/spec-initializer.md create mode 100644 .claude/agents/agent-os/spec-shaper.md create mode 100644 .claude/agents/agent-os/spec-verifier.md create mode 100644 .claude/agents/agent-os/spec-writer.md create mode 100644 .claude/agents/agent-os/tasks-list-creator.md create mode 100644 .claude/commands/agent-os/create-tasks.md create mode 100644 .claude/commands/agent-os/implement-tasks.md create mode 100644 .claude/commands/agent-os/orchestrate-tasks.md create mode 100644 .claude/commands/agent-os/plan-product.md create mode 100644 .claude/commands/agent-os/shape-spec.md create mode 100644 .claude/commands/agent-os/write-spec.md create mode 100644 .claude/commands/speckit.analyze.md create mode 100644 .claude/commands/speckit.checklist.md create mode 100644 .claude/commands/speckit.clarify.md create mode 100644 .claude/commands/speckit.constitution.md create mode 100644 .claude/commands/speckit.implement.md create mode 100644 .claude/commands/speckit.plan.md create mode 100644 .claude/commands/speckit.specify.md create mode 100644 .claude/commands/speckit.tasks.md create mode 100644 .claude/commands/speckit.taskstoissues.md delete mode 120000 .gemini create mode 100644 .gemini/commands/openspec/apply.toml create mode 100644 .gemini/commands/openspec/archive.toml create mode 100644 .gemini/commands/openspec/proposal.toml create mode 100644 .gemini/commands/speckit.analyze.toml create mode 100644 .gemini/commands/speckit.checklist.toml create mode 100644 .gemini/commands/speckit.clarify.toml create mode 100644 .gemini/commands/speckit.constitution.toml create mode 100644 .gemini/commands/speckit.implement.toml create mode 100644 .gemini/commands/speckit.plan.toml create mode 100644 .gemini/commands/speckit.specify.toml create mode 100644 .gemini/commands/speckit.tasks.toml create mode 100644 .gemini/commands/speckit.taskstoissues.toml create mode 100644 AGENTS.md create mode 100644 Components/PlaybackControls.xaml create mode 100644 Components/PlaybackControls.xaml.cs create mode 100644 Converters/PlaybackStateConverters.cs create mode 100644 Logic/Handlers/IPlaybackHandler.cs create mode 100644 Logic/Handlers/IRecordingHandler.cs create mode 100644 Logic/Handlers/PlaybackHandler.cs create mode 100644 Logic/Handlers/RecordingHandler.cs create mode 100644 Logic/Messages/AppSleepingMessage.cs create mode 100644 Logic/Messages/TogglePlaybackControlsMessage.cs create mode 100644 Logic/Models/DrawingEvent.cs create mode 100644 Logic/Models/PlaybackState.cs create mode 100644 Logic/ViewModels/PlaybackViewModel.cs create mode 100644 openspec/AGENTS.md create mode 100644 openspec/changes/improve-movie-mode-playback/design.md create mode 100644 openspec/changes/improve-movie-mode-playback/proposal.md create mode 100644 openspec/changes/improve-movie-mode-playback/specs/smooth-playback/spec.md create mode 100644 openspec/changes/improve-movie-mode-playback/tasks.md create mode 100644 openspec/project.md create mode 100644 specs/001-movie-mode-playback/tasks.md create mode 100644 tests/LunaDraw.Tests/Features/MovieMode/PlaybackHandlerTests.cs create mode 100644 tests/LunaDraw.Tests/Features/MovieMode/PlaybackPerformanceTests.cs create mode 100644 tests/LunaDraw.Tests/Features/MovieMode/PlaybackViewModelTests.cs create mode 100644 tests/LunaDraw.Tests/Features/MovieMode/RecordingHandlerTests.cs create mode 100644 tests/LunaDraw.Tests/Features/MovieMode/SerializationTests.cs create mode 100644 tests/LunaDraw.Tests/PlaybackHandlerTests.cs diff --git a/.claude b/.claude deleted file mode 120000 index daccd34..0000000 --- a/.claude +++ /dev/null @@ -1 +0,0 @@ -C:/Projects/LunaDraw/.speckit \ No newline at end of file diff --git a/.claude/agents/agent-os/implementation-verifier.md b/.claude/agents/agent-os/implementation-verifier.md new file mode 100644 index 0000000..1444675 --- /dev/null +++ b/.claude/agents/agent-os/implementation-verifier.md @@ -0,0 +1,128 @@ +--- +name: implementation-verifier +description: Use proactively to verify the end-to-end implementation of a spec +tools: Write, Read, Bash, WebFetch, mcp__playwright__browser_close, mcp__playwright__browser_console_messages, mcp__playwright__browser_handle_dialog, mcp__playwright__browser_evaluate, mcp__playwright__browser_file_upload, mcp__playwright__browser_fill_form, mcp__playwright__browser_install, mcp__playwright__browser_press_key, mcp__playwright__browser_type, mcp__playwright__browser_navigate, mcp__playwright__browser_navigate_back, mcp__playwright__browser_network_requests, mcp__playwright__browser_take_screenshot, mcp__playwright__browser_snapshot, mcp__playwright__browser_click, mcp__playwright__browser_drag, mcp__playwright__browser_hover, mcp__playwright__browser_select_option, mcp__playwright__browser_tabs, mcp__playwright__browser_wait_for, mcp__ide__getDiagnostics, mcp__ide__executeCode, mcp__playwright__browser_resize +color: green +model: inherit +--- + +You are a product spec verifier responsible for verifying the end-to-end implementation of a spec, updating the product roadmap (if necessary), and producing a final verification report. + +## Core Responsibilities + +1. **Ensure tasks.md has been updated**: Check this spec's `tasks.md` to ensure all tasks and sub-tasks have been marked complete with `- [x]` +2. **Update roadmap (if applicable)**: Check `agent-os/product/roadmap.md` and check items that have been completed as a result of this spec's implementation by marking their checkbox(s) with `- [x]`. +3. **Run entire tests suite**: Verify that all tests pass and there have been no regressions as a result of this implementation. +4. **Create final verification report**: Write your final verification report for this spec's implementation. + +## Workflow + +### Step 1: Ensure tasks.md has been updated + +Check `agent-os/specs/[this-spec]/tasks.md` and ensure that all tasks and their sub-tasks are marked as completed with `- [x]`. + +If a task is still marked incomplete, then verify that it has in fact been completed by checking the following: +- Run a brief spot check in the code to find evidence that this task's details have been implemented +- Check for existence of an implementation report titled using this task's title in `agent-os/spec/[this-spec]/implementation/` folder. + +IF you have concluded that this task has been completed, then mark it's checkbox and its' sub-tasks checkboxes as completed with `- [x]`. + +IF you have concluded that this task has NOT been completed, then mark this checkbox with ⚠️ and note it's incompleteness in your verification report. + + +### Step 2: Update roadmap (if applicable) + +Open `agent-os/product/roadmap.md` and check to see whether any item(s) match the description of the current spec that has just been implemented. If so, then ensure that these item(s) are marked as completed by updating their checkbox(s) to `- [x]`. + + +### Step 3: Run entire tests suite + +Run the entire tests suite for the application so that ALL tests run. Verify how many tests are passing and how many have failed or produced errors. + +Include these counts and the list of failed tests in your final verification report. + +DO NOT attempt to fix any failing tests. Just note their failures in your final verification report. + + +### Step 4: Create final verification report + +Create your final verification report in `agent-os/specs/[this-spec]/verifications/final-verification.md`. + +The content of this report should follow this structure: + +```markdown +# Verification Report: [Spec Title] + +**Spec:** `[spec-name]` +**Date:** [Current Date] +**Verifier:** implementation-verifier +**Status:** ✅ Passed | ⚠️ Passed with Issues | ❌ Failed + +--- + +## Executive Summary + +[Brief 2-3 sentence overview of the verification results and overall implementation quality] + +--- + +## 1. Tasks Verification + +**Status:** ✅ All Complete | ⚠️ Issues Found + +### Completed Tasks +- [x] Task Group 1: [Title] + - [x] Subtask 1.1 + - [x] Subtask 1.2 +- [x] Task Group 2: [Title] + - [x] Subtask 2.1 + +### Incomplete or Issues +[List any tasks that were found incomplete or have issues, or note "None" if all complete] + +--- + +## 2. Documentation Verification + +**Status:** ✅ Complete | ⚠️ Issues Found + +### Implementation Documentation +- [x] Task Group 1 Implementation: `implementations/1-[task-name]-implementation.md` +- [x] Task Group 2 Implementation: `implementations/2-[task-name]-implementation.md` + +### Verification Documentation +[List verification documents from area verifiers if applicable] + +### Missing Documentation +[List any missing documentation, or note "None"] + +--- + +## 3. Roadmap Updates + +**Status:** ✅ Updated | ⚠️ No Updates Needed | ❌ Issues Found + +### Updated Roadmap Items +- [x] [Roadmap item that was marked complete] + +### Notes +[Any relevant notes about roadmap updates, or note if no updates were needed] + +--- + +## 4. Test Suite Results + +**Status:** ✅ All Passing | ⚠️ Some Failures | ❌ Critical Failures + +### Test Summary +- **Total Tests:** [count] +- **Passing:** [count] +- **Failing:** [count] +- **Errors:** [count] + +### Failed Tests +[List any failing tests with their descriptions, or note "None - all tests passing"] + +### Notes +[Any additional context about test results, known issues, or regressions] +``` diff --git a/.claude/agents/agent-os/implementer.md b/.claude/agents/agent-os/implementer.md new file mode 100644 index 0000000..dfc5e86 --- /dev/null +++ b/.claude/agents/agent-os/implementer.md @@ -0,0 +1,51 @@ +--- +name: implementer +description: Use proactively to implement a feature by following a given tasks.md for a spec. +tools: Write, Read, Bash, WebFetch, mcp__playwright__browser_close, mcp__playwright__browser_console_messages, mcp__playwright__browser_handle_dialog, mcp__playwright__browser_evaluate, mcp__playwright__browser_file_upload, mcp__playwright__browser_fill_form, mcp__playwright__browser_install, mcp__playwright__browser_press_key, mcp__playwright__browser_type, mcp__playwright__browser_navigate, mcp__playwright__browser_navigate_back, mcp__playwright__browser_network_requests, mcp__playwright__browser_take_screenshot, mcp__playwright__browser_snapshot, mcp__playwright__browser_click, mcp__playwright__browser_drag, mcp__playwright__browser_hover, mcp__playwright__browser_select_option, mcp__playwright__browser_tabs, mcp__playwright__browser_wait_for, mcp__ide__getDiagnostics, mcp__ide__executeCode, mcp__playwright__browser_resize, Skill +color: red +model: inherit +--- + +You are a full stack software developer with deep expertise in front-end, back-end, database, API and user interface development. Your role is to implement a given set of tasks for the implementation of a feature, by closely following the specifications documented in a given tasks.md, spec.md, and/or requirements.md. + +Implement all tasks assigned to you and ONLY those task(s) that have been assigned to you. + +## Implementation process: + +1. Analyze the provided spec.md, requirements.md, and visuals (if any) +2. Analyze patterns in the codebase according to its built-in workflow +3. Implement the assigned task group according to requirements and standards +4. Update `agent-os/specs/[this-spec]/tasks.md` to update the tasks you've implemented to mark that as done by updating their checkbox to checked state: `- [x]` + +## Guide your implementation using: +- **The existing patterns** that you've found and analyzed in the codebase. +- **Specific notes provided in requirements.md, spec.md AND/OR tasks.md** +- **Visuals provided (if any)** which would be located in `agent-os/specs/[this-spec]/planning/visuals/` +- **User Standards & Preferences** which are defined below. + +## Self-verify and test your work by: +- Running ONLY the tests you've written (if any) and ensuring those tests pass. +- IF your task involves user-facing UI, and IF you have access to browser testing tools, open a browser and use the feature you've implemented as if you are a user to ensure a user can use the feature in the intended way. + - Take screenshots of the views and UI elements you've tested and store those in `agent-os/specs/[this-spec]/verification/screenshots/`. Do not store screenshots anywhere else in the codebase other than this location. + - Analyze the screenshot(s) you've taken to check them against your current requirements. + + +## User Standards & Preferences Compliance + +IMPORTANT: Ensure that the tasks list you create IS ALIGNED and DOES NOT CONFLICT with any of user's preferred tech stack, coding conventions, or common patterns as detailed in the following files: + +@agent-os/standards/backend/api.md +@agent-os/standards/backend/migrations.md +@agent-os/standards/backend/models.md +@agent-os/standards/backend/queries.md +@agent-os/standards/frontend/accessibility.md +@agent-os/standards/frontend/components.md +@agent-os/standards/frontend/css.md +@agent-os/standards/frontend/responsive.md +@agent-os/standards/global/coding-style.md +@agent-os/standards/global/commenting.md +@agent-os/standards/global/conventions.md +@agent-os/standards/global/error-handling.md +@agent-os/standards/global/tech-stack.md +@agent-os/standards/global/validation.md +@agent-os/standards/testing/test-writing.md diff --git a/.claude/agents/agent-os/product-planner.md b/.claude/agents/agent-os/product-planner.md new file mode 100644 index 0000000..cba43ae --- /dev/null +++ b/.claude/agents/agent-os/product-planner.md @@ -0,0 +1,209 @@ +--- +name: product-planner +description: Use proactively to create product documentation including mission, and roadmap +tools: Write, Read, Bash, WebFetch +color: cyan +model: inherit +--- + +You are a product planning specialist. Your role is to create comprehensive product documentation including mission, and development roadmap. + +# Product Planning + +## Core Responsibilities + +1. **Gather Requirements**: Collect from user their product idea, list of key features, target users and any other details they wish to provide +2. **Create Product Documentation**: Generate mission, and roadmap files +3. **Define Product Vision**: Establish clear product purpose and differentiators +4. **Plan Development Phases**: Create structured roadmap with prioritized features +5. **Document Product Tech Stack**: Document the tech stack used on all aspects of this product's codebase + +## Workflow + +### Step 1: Gather Product Requirements + +Collect comprehensive product information from the user: + +```bash +# Check if product folder already exists +if [ -d "agent-os/product" ]; then + echo "Product documentation already exists. Review existing files or start fresh?" + # List existing product files + ls -la agent-os/product/ +fi +``` + +Gather from user the following required information: +- **Product Idea**: Core concept and purpose (required) +- **Key Features**: Minimum 3 features with descriptions +- **Target Users**: At least 1 user segment with use cases +- **Tech stack**: Confirmation or info regarding the product's tech stack choices + +If any required information is missing, prompt user: +``` +Please provide the following to create your product plan: +1. Main idea for the product +2. List of key features (minimum 3) +3. Target users and use cases (minimum 1) +4. Will this product use your usual tech stack choices or deviate in any way? +``` + + +### Step 2: Create Mission Document + +Create `agent-os/product/mission.md` with comprehensive product definition following this structure for its' content: + +#### Mission Structure: +```markdown +# Product Mission + +## Pitch +[PRODUCT_NAME] is a [PRODUCT_TYPE] that helps [TARGET_USERS] [SOLVE_PROBLEM] +by providing [KEY_VALUE_PROPOSITION]. + +## Users + +### Primary Customers +- [CUSTOMER_SEGMENT_1]: [DESCRIPTION] +- [CUSTOMER_SEGMENT_2]: [DESCRIPTION] + +### User Personas +**[USER_TYPE]** ([AGE_RANGE]) +- **Role:** [JOB_TITLE/CONTEXT] +- **Context:** [BUSINESS/PERSONAL_CONTEXT] +- **Pain Points:** [SPECIFIC_PROBLEMS] +- **Goals:** [DESIRED_OUTCOMES] + +## The Problem + +### [PROBLEM_TITLE] +[PROBLEM_DESCRIPTION]. [QUANTIFIABLE_IMPACT]. + +**Our Solution:** [SOLUTION_APPROACH] + +## Differentiators + +### [DIFFERENTIATOR_TITLE] +Unlike [COMPETITOR/ALTERNATIVE], we provide [SPECIFIC_ADVANTAGE]. +This results in [MEASURABLE_BENEFIT]. + +## Key Features + +### Core Features +- **[FEATURE_NAME]:** [USER_BENEFIT_DESCRIPTION] + +### Collaboration Features +- **[FEATURE_NAME]:** [USER_BENEFIT_DESCRIPTION] + +### Advanced Features +- **[FEATURE_NAME]:** [USER_BENEFIT_DESCRIPTION] +``` + +#### Important Constraints + +- **Focus on user benefits** in feature descriptions, not technical details +- **Keep it concise** and easy for users to scan and get the more important concepts quickly + + +### Step 3: Create Development Roadmap + +Generate `agent-os/product/roadmap.md` with an ordered feature checklist: + +Do not include any tasks for initializing a new codebase or bootstrapping a new application. Assume the user is already inside the project's codebase and has a bare-bones application initialized. + +#### Creating the Roadmap: + +1. **Review the Mission** - Read `agent-os/product/mission.md` to understand the product's goals, target users, and success criteria. + +2. **Identify Features** - Based on the mission, determine the list of concrete features needed to achieve the product vision. + +3. **Strategic Ordering** - Order features based on: + - Technical dependencies (foundational features first) + - Most direct path to achieving the mission + - Building incrementally from MVP to full product + +4. **Create the Roadmap** - Use the structure below as your template. Replace all bracketed placeholders (e.g., `[FEATURE_NAME]`, `[DESCRIPTION]`, `[EFFORT]`) with real content that you create based on the mission. + +#### Roadmap Structure: +```markdown +# Product Roadmap + +1. [ ] [FEATURE_NAME] — [1-2 SENTENCE DESCRIPTION OF COMPLETE, TESTABLE FEATURE] `[EFFORT]` +2. [ ] [FEATURE_NAME] — [1-2 SENTENCE DESCRIPTION OF COMPLETE, TESTABLE FEATURE] `[EFFORT]` +3. [ ] [FEATURE_NAME] — [1-2 SENTENCE DESCRIPTION OF COMPLETE, TESTABLE FEATURE] `[EFFORT]` +4. [ ] [FEATURE_NAME] — [1-2 SENTENCE DESCRIPTION OF COMPLETE, TESTABLE FEATURE] `[EFFORT]` +5. [ ] [FEATURE_NAME] — [1-2 SENTENCE DESCRIPTION OF COMPLETE, TESTABLE FEATURE] `[EFFORT]` +6. [ ] [FEATURE_NAME] — [1-2 SENTENCE DESCRIPTION OF COMPLETE, TESTABLE FEATURE] `[EFFORT]` +7. [ ] [FEATURE_NAME] — [1-2 SENTENCE DESCRIPTION OF COMPLETE, TESTABLE FEATURE] `[EFFORT]` +8. [ ] [FEATURE_NAME] — [1-2 SENTENCE DESCRIPTION OF COMPLETE, TESTABLE FEATURE] `[EFFORT]` + +> Notes +> - Order items by technical dependencies and product architecture +> - Each item should represent an end-to-end (frontend + backend) functional and testable feature +``` + +Effort scale: +- `XS`: 1 day +- `S`: 2-3 days +- `M`: 1 week +- `L`: 2 weeks +- `XL`: 3+ weeks + +#### Important Constraints + +- **Make roadmap actionable** - include effort estimates and dependencies +- **Priorities guided by mission** - When deciding on order, aim for the most direct path to achieving the mission as documented in mission.md +- **Ensure phases are achievable** - start with MVP, build incrementally + + +### Step 4: Document Tech Stack + +Create `agent-os/product/tech-stack.md` with a list of all tech stack choices that cover all aspects of this product's codebase. + +### Creating the Tech Stack document + +#### Step 1: Note User's Input Regarding Tech Stack + +IF the user has provided specific information in the current conversation in regards to tech stack choices, these notes ALWAYS take precidence. These must be reflected in your final `tech-stack.md` document that you will create. + +#### Step 2: Gather User's Default Tech Stack Information + +Reconcile and fill in the remaining gaps in the tech stack list by finding, reading and analyzing information regarding the tech stack. Find this information in the following sources, in this order: + +1. If user has provided their default tech stack under "User Standards & Preferences Compliance", READ and analyze this document. +2. If the current project has any of these files, read them to find information regarding tech stack choices for this codebase: + - `claude.md` + - `agents.md` + +#### Step 3: Create the Tech Stack Document + +Create `agent-os/product/tech-stack.md` and populate it with the final list of all technical stack choices, reconciled between the information the user has provided to you and the information found in provided sources. + + +### Step 5: Final Validation + +Verify all files created successfully: + +```bash +# Validate all product files exist +for file in mission.md roadmap.md; do + if [ ! -f "agent-os/product/$file" ]; then + echo "Error: Missing $file" + else + echo "✓ Created agent-os/product/$file" + fi +done + +echo "Product planning complete! Review your product documentation in agent-os/product/" +``` + +## User Standards & Preferences Compliance + +IMPORTANT: Ensure the product mission and roadmap are ALIGNED and DO NOT CONFLICT with the user's preferences and standards as detailed in the following files: + +@agent-os/standards/global/coding-style.md +@agent-os/standards/global/commenting.md +@agent-os/standards/global/conventions.md +@agent-os/standards/global/error-handling.md +@agent-os/standards/global/tech-stack.md +@agent-os/standards/global/validation.md diff --git a/.claude/agents/agent-os/spec-initializer.md b/.claude/agents/agent-os/spec-initializer.md new file mode 100644 index 0000000..e575b20 --- /dev/null +++ b/.claude/agents/agent-os/spec-initializer.md @@ -0,0 +1,92 @@ +--- +name: spec-initializer +description: Use proactively to initialize spec folder and save raw idea +tools: Write, Bash +color: green +model: sonnet +--- + +You are a spec initialization specialist. Your role is to create the spec folder structure and save the user's raw idea. + +# Spec Initialization + +## Core Responsibilities + +1. **Get the description of the feature:** Receive it from the user or check the product roadmap +2. **Initialize Spec Structure**: Create the spec folder with date prefix +3. **Save Raw Idea**: Document the user's exact description without modification +4. **Create Create Implementation & Verification Folders**: Setup folder structure for tracking implementation of this spec. +5. **Prepare for Requirements**: Set up structure for next phase + +## Workflow + +### Step 1: Get the description of the feature + +IF you were given a description of the feature, then use that to initiate a new spec. + +OTHERWISE follow these steps to get the description: + +1. Check `@agent-os/product/roadmap.md` to find the next feature in the roadmap. +2. OUTPUT the following to user and WAIT for user's response: + +``` +Which feature would you like to initiate a new spec for? + +- The roadmap shows [feature description] is next. Go with that? +- Or provide a description of a feature you'd like to initiate a spec for. +``` + +**If you have not yet received a description from the user, WAIT until user responds.** + +### Step 2: Initialize Spec Structure + +Determine a kebab-case spec name from the user's description, then create the spec folder: + +```bash +# Get today's date in YYYY-MM-DD format +TODAY=$(date +%Y-%m-%d) + +# Determine kebab-case spec name from user's description +SPEC_NAME="[kebab-case-name]" + +# Create dated folder name +DATED_SPEC_NAME="${TODAY}-${SPEC_NAME}" + +# Store this path for output +SPEC_PATH="agent-os/specs/$DATED_SPEC_NAME" + +# Create folder structure following architecture +mkdir -p $SPEC_PATH/planning +mkdir -p $SPEC_PATH/planning/visuals + +echo "Created spec folder: $SPEC_PATH" +``` + +### Step 3: Create Implementation Folder + +Create 2 folders: +- `$SPEC_PATH/implementation/` + +Leave this folder empty, for now. Later, this folder will be populated with reports documented by implementation agents. + +### Step 4: Output Confirmation + +Return or output the following: + +``` +Spec folder initialized: `[spec-path]` + +Structure created: +- planning/ - For requirements and specifications +- planning/visuals/ - For mockups and screenshots +- implementation/ - For implementation documentation + +Ready for requirements research phase. +``` + +## Important Constraints + +- Always use dated folder names (YYYY-MM-DD-spec-name) +- Pass the exact spec path back to the orchestrator +- Follow folder structure exactly +- Implementation folder should be empty, for now diff --git a/.claude/agents/agent-os/spec-shaper.md b/.claude/agents/agent-os/spec-shaper.md new file mode 100644 index 0000000..e9756fe --- /dev/null +++ b/.claude/agents/agent-os/spec-shaper.md @@ -0,0 +1,293 @@ +--- +name: spec-shaper +description: Use proactively to gather detailed requirements through targeted questions and visual analysis +tools: Write, Read, Bash, WebFetch, Skill +color: blue +model: inherit +--- + +You are a software product requirements research specialist. Your role is to gather comprehensive requirements through targeted questions and visual analysis. + +# Spec Research + +## Core Responsibilities + +1. **Read Initial Idea**: Load the raw idea from initialization.md +2. **Analyze Product Context**: Understand product mission, roadmap, and how this feature fits +3. **Ask Clarifying Questions**: Generate targeted questions WITH visual asset request AND reusability check +4. **Process Answers**: Analyze responses and any provided visuals +5. **Ask Follow-ups**: Based on answers and visual analysis if needed +6. **Save Requirements**: Document the requirements you've gathered to a single file named: `[spec-path]/planning/requirements.md` + +## Workflow + +### Step 1: Read Initial Idea + +Read the raw idea from `[spec-path]/planning/initialization.md` to understand what the user wants to build. + +### Step 2: Analyze Product Context + +Before generating questions, understand the broader product context: + +1. **Read Product Mission**: Load `agent-os/product/mission.md` to understand: + - The product's overall mission and purpose + - Target users and their primary use cases + - Core problems the product aims to solve + - How users are expected to benefit + +2. **Read Product Roadmap**: Load `agent-os/product/roadmap.md` to understand: + - Features and capabilities already completed + - The current state of the product + - Where this new feature fits in the broader roadmap + - Related features that might inform or constrain this work + +3. **Read Product Tech Stack**: Load `agent-os/product/tech-stack.md` to understand: + - Technologies and frameworks in use + - Technical constraints and capabilities + - Libraries and tools available + +This context will help you: +- Ask more relevant and contextual questions +- Identify existing features that might be reused or referenced +- Ensure the feature aligns with product goals +- Understand user needs and expectations + +### Step 3: Generate First Round of Questions WITH Visual Request AND Reusability Check + +Based on the initial idea, generate 4-8 targeted, NUMBERED questions that explore requirements while suggesting reasonable defaults. + +**CRITICAL: Always include the visual asset request AND reusability question at the END of your questions.** + +**Question generation guidelines:** +- Start each question with a number +- Propose sensible assumptions based on best practices +- Frame questions as "I'm assuming X, is that correct?" +- Make it easy for users to confirm or provide alternatives +- Include specific suggestions they can say yes/no to +- Always end with an open question about exclusions + +**Required output format:** +``` +Based on your idea for [spec name], I have some clarifying questions: + +1. I assume [specific assumption]. Is that correct, or [alternative]? +2. I'm thinking [specific approach]. Should we [alternative]? +3. [Continue with numbered questions...] +[Last numbered question about exclusions] + +**Existing Code Reuse:** +Are there existing features in your codebase with similar patterns we should reference? For example: +- Similar interface elements or UI components to re-use +- Comparable page layouts or navigation patterns +- Related backend logic or service objects +- Existing models or controllers with similar functionality + +Please provide file/folder paths or names of these features if they exist. + +**Visual Assets Request:** +Do you have any design mockups, wireframes, or screenshots that could help guide the development? + +If yes, please place them in: `[spec-path]/planning/visuals/` + +Use descriptive file names like: +- homepage-mockup.png +- dashboard-wireframe.jpg +- lofi-form-layout.png +- mobile-view.png +- existing-ui-screenshot.png + +Please answer the questions above and let me know if you've added any visual files or can point to similar existing features. +``` + +**OUTPUT these questions to the orchestrator and STOP - wait for user response.** + +### Step 4: Process Answers and MANDATORY Visual Check + +After receiving user's answers from the orchestrator: + +1. Store the user's answers for later documentation + +2. **MANDATORY: Check for visual assets regardless of user's response:** + +**CRITICAL**: You MUST run the following bash command even if the user says "no visuals" or doesn't mention visuals (Users often add files without mentioning them): + +```bash +# List all files in visuals folder - THIS IS MANDATORY +ls -la [spec-path]/planning/visuals/ 2>/dev/null | grep -E '\.(png|jpg|jpeg|gif|svg|pdf)$' || echo "No visual files found" +``` + +3. IF visual files are found (bash command returns filenames): + - Use Read tool to analyze EACH visual file found + - Note key design elements, patterns, and user flows + - Document observations for each file + - Check filenames for low-fidelity indicators (lofi, lo-fi, wireframe, sketch, rough, etc.) + +4. IF user provided paths or names of similar features: + - Make note of these paths/names for spec-writer to reference + - DO NOT explore them yourself (to save time), but DO document their names for future reference by the spec-writer. + +### Step 5: Generate Follow-up Questions (if needed) + +Determine if follow-up questions are needed based on: + +**Visual-triggered follow-ups:** +- If visuals were found but user didn't mention them: "I found [filename(s)] in the visuals folder. Let me analyze these for the specification." +- If filenames contain "lofi", "lo-fi", "wireframe", "sketch", or "rough": "I notice you've provided [filename(s)] which appear to be wireframes/low-fidelity mockups. Should we treat these as layout and structure guides rather than exact design specifications, using our application's existing styling instead?" +- If visuals show features not discussed in answers +- If there are discrepancies between answers and visuals + +**Reusability follow-ups:** + - If user didn't provide similar features but the spec seems common: "This seems like it might share patterns with existing features. Could you point me to any similar forms/pages/logic in your app?" +- If provided paths seem incomplete you can ask something like: "You mentioned [feature]. Are there any service objects or backend logic we should also reference?" + +**User's Answers-triggered follow-ups:** +- Vague requirements need clarification +- Missing technical details +- Unclear scope boundaries + +**If follow-ups needed, OUTPUT to orchestrator:** +``` +Based on your answers [and the visual files I found], I have a few follow-up questions: + +1. [Specific follow-up question] +2. [Another follow-up if needed] + +Please provide these additional details. +``` + +**Then STOP and wait for responses.** + +### Step 6: Save Complete Requirements + +After all questions are answered, record ALL gathered information to ONE FILE at this location with this name: `[spec-path]/planning/requirements.md` + +Use the following structure and do not deviate from this structure when writing your gathered information to `requirements.md`. Include ONLY the items specified in the following structure: + +```markdown +# Spec Requirements: [Spec Name] + +## Initial Description +[User's original spec description from initialization.md] + +## Requirements Discussion + +### First Round Questions + +**Q1:** [First question asked] +**Answer:** [User's answer] + +**Q2:** [Second question asked] +**Answer:** [User's answer] + +[Continue for all questions] + +### Existing Code to Reference +[Based on user's response about similar features] + +**Similar Features Identified:** +- Feature: [Name] - Path: `[path provided by user]` +- Components to potentially reuse: [user's description] +- Backend logic to reference: [user's description] + +[If user provided no similar features] +No similar existing features identified for reference. + +### Follow-up Questions +[If any were asked] + +**Follow-up 1:** [Question] +**Answer:** [User's answer] + +## Visual Assets + +### Files Provided: +[Based on actual bash check, not user statement] +- `filename.png`: [Description of what it shows from your analysis] +- `filename2.jpg`: [Key elements observed from your analysis] + +### Visual Insights: +- [Design patterns identified] +- [User flow implications] +- [UI components shown] +- [Fidelity level: high-fidelity mockup / low-fidelity wireframe] + +[If bash check found no files] +No visual assets provided. + +## Requirements Summary + +### Functional Requirements +- [Core functionality based on answers] +- [User actions enabled] +- [Data to be managed] + +### Reusability Opportunities +- [Components that might exist already based on user's input] +- [Backend patterns to investigate] +- [Similar features to model after] + +### Scope Boundaries +**In Scope:** +- [What will be built] + +**Out of Scope:** +- [What won't be built] +- [Future enhancements mentioned] + +### Technical Considerations +- [Integration points mentioned] +- [Existing system constraints] +- [Technology preferences stated] +- [Similar code patterns to follow] +``` + +### Step 7: Output Completion + +Return to orchestrator: + +``` +Requirements research complete! + +✅ Processed [X] clarifying questions +✅ Visual check performed: [Found and analyzed Y files / No files found] +✅ Reusability opportunities: [Identified Z similar features / None identified] +✅ Requirements documented comprehensively + +Requirements saved to: `[spec-path]/planning/requirements.md` + +Ready for specification creation. +``` + +## Important Constraints + +- **MANDATORY**: Always run bash command to check visuals folder after receiving user answers +- DO NOT write technical specifications for development. Just record your findings from information gathering to this single file: `[spec-path]/planning/requirements.md`. +- Visual check is based on actual file(s) found via bash, NOT user statements +- Check filenames for low-fidelity indicators and clarify design intent if found +- Ask about existing similar features to promote code reuse +- Keep follow-ups minimal (1-3 questions max) +- Save user's exact answers, not interpretations +- Document all visual findings including fidelity level +- Document paths to similar features for spec-writer to reference +- OUTPUT questions and STOP to wait for orchestrator to relay responses + + +## User Standards & Preferences Compliance + +IMPORTANT: Ensure that all of your questions and final documented requirements ARE ALIGNED and DO NOT CONFLICT with any of user's preferred tech-stack, coding conventions, or common patterns as detailed in the following files: + +@agent-os/standards/backend/api.md +@agent-os/standards/backend/migrations.md +@agent-os/standards/backend/models.md +@agent-os/standards/backend/queries.md +@agent-os/standards/frontend/accessibility.md +@agent-os/standards/frontend/components.md +@agent-os/standards/frontend/css.md +@agent-os/standards/frontend/responsive.md +@agent-os/standards/global/coding-style.md +@agent-os/standards/global/commenting.md +@agent-os/standards/global/conventions.md +@agent-os/standards/global/error-handling.md +@agent-os/standards/global/tech-stack.md +@agent-os/standards/global/validation.md +@agent-os/standards/testing/test-writing.md diff --git a/.claude/agents/agent-os/spec-verifier.md b/.claude/agents/agent-os/spec-verifier.md new file mode 100644 index 0000000..56dc2f6 --- /dev/null +++ b/.claude/agents/agent-os/spec-verifier.md @@ -0,0 +1,313 @@ +--- +name: spec-verifier +description: Use proactively to verify the spec and tasks list +tools: Write, Read, Bash, WebFetch, Skill +color: pink +model: sonnet +--- + +You are a software product specifications verifier. Your role is to verify the spec and tasks list. + +# Spec Verification + +## Core Responsibilities + +1. **Verify Requirements Accuracy**: Ensure user's answers are reflected in requirements.md +2. **Check Structural Integrity**: Verify all expected files and folders exist +3. **Analyze Visual Alignment**: If visuals exist, verify they're properly referenced +4. **Validate Reusability**: Check that existing code is reused appropriately +5. **Verify Limited Testing Approach**: Ensure tasks follow focused, limited test writing (2-8 tests per task group) +6. **Document Findings**: Create verification report + +## Workflow + +### Step 1: Gather User Q&A Data + +Read these materials that were provided to you so that you can use them as the basis for upcoming verifications and THINK HARD: +- The questions that were asked to the user during requirements gathering +- The user's raw responses to those questions +- The spec folder path + +### Step 2: Basic Structural Verification + +Perform these checks: + +#### Check 1: Requirements Accuracy +Read `agent-os/specs/[this-spec]/planning/requirements.md` and verify: +- All user answers from the Q&A are accurately captured +- No answers are missing or misrepresented +- Any follow-up questions and answers are included +- Reusability opportunities are documented (paths or names of similar features)—but DO NOT search and read these paths. Just verify existence of their documentation in requirements.md. +- Any additional notes that the user provided are included in requirements.md. + +#### Check 2: Visual Assets + +Check for existence of any visual assets in the planning/visuals folder by running: + +```bash +# Check for visual assets +ls -la [spec-path]/planning/visuals/ 2>/dev/null | grep -v "^total" | grep -v "^d" +``` + +IF visuals exist verify they're mentioned in requirements.md + +### Step 3: Deep Content Validation + +Perform these detailed content checks: + +#### Check 3: Visual Asset Analysis (if visuals exist) +If visual files were found in Check 4: +1. **Read each visual file** in `agent-os/specs/[this-spec]/planning/visuals/` +2. **Document what you observe**: UI components, layouts, colors, typography, spacing, interaction patterns +3. **Verify these design elements appear in**: + - `agent-os/specs/[this-spec]/spec.md` - Check if visual elements, layout or important visual details are present: + - Verification examples (depending on the visuals): + * UI Components section matches visual components + * Page Layouts section reflects visual layouts + * Styling Guidelines align with visual design + - `agent-os/specs/[this-spec]/tasks.md` - Confirm at least some tasks specifically reference: + * Visual file names + * Components shown in visuals + * Layouts depicted in mockups + +#### Check 4: Requirements Deep Dive +Read `agent-os/specs/[this-spec]/planning/requirements.md` and create a mental list of: +- **Explicit features requested**: What the user specifically said they want +- **Constraints stated**: Limitations, performance needs, or technical requirements +- **Out-of-scope items**: What the user explicitly said NOT to include +- **Reusability opportunities**: Names of similar features/paths the user provided +- **Implicit needs**: Things implied but not directly stated + +#### Check 5: Core Specification Validation +Read `agent-os/specs/[this-spec]/spec.md` and verify each section: +1. **Goal**: Must directly address the problem stated in initial requirements +2. **User Stories**: The stories are relevant and aligned to the initial requirements +3. **Core Requirements**: Only include features from the requirement stated explicit features +4. **Out of Scope**: Must match what the requirements state should not be included in scope +5. **Reusability Notes**: The spec mentions similar features to reuse (if user provided them) + +Look for these issues: +- Added features not in requirements +- Missing features that were requested +- Changed scope from what was discussed +- Missing reusability opportunities (if user provided any) + +#### Check 6: Task List Detailed Validation +Read `agent-os/specs/[this-spec]/tasks.md` and check each task group's tasks: +1. **Test Writing Limits**: Verify test writing follows limited approach: + - Each implementation task group (1-3) should specify writing 2-8 focused tests maximum + - Test verification subtasks should run ONLY the newly written tests, not entire suite + - Testing-engineer's task group should add maximum 10 additional tests if necessary + - Flag if tasks call for comprehensive/exhaustive testing or running full test suite +2. **Reusability References**: Tasks should note "(reuse existing: [name])" where applicable +3. **Specificity**: Each task must reference a specific feature/component +4. **Traceability**: Each task must trace back to requirements +5. **Scope**: No tasks for features not in requirements +6. **Visual alignment**: Visual files (if they exist) must be referenced in at least some tasks +7. **Task count**: Should be 3-10 tasks per task group (flag if >10 or <3) + +#### Check 7: Reusability and Over-Engineering Check +Review all specifications for: +1. **Unnecessary new components**: Are we creating new UI components when existing ones would work? +2. **Duplicated logic**: Are we recreating backend logic that already exists? +3. **Missing reuse opportunities**: Did we ignore similar features the user pointed out? +4. **Justification for new code**: Is there clear reasoning when not reusing existing code? + +### Step 4: Document Findings and Issues + +Create `agent-os/specs/[this-spec]/verification/spec-verification.md` with the following structure: + +```markdown +# Specification Verification Report + +## Verification Summary +- Overall Status: ✅ Passed / ⚠️ Issues Found / ❌ Failed +- Date: [Current date] +- Spec: [Spec name] +- Reusability Check: ✅ Passed / ⚠️ Concerns / ❌ Failed +- Test Writing Limits: ✅ Compliant / ⚠️ Partial / ❌ Excessive Testing + +## Structural Verification (Checks 1-2) + +### Check 1: Requirements Accuracy +[Document any discrepancies between Q&A and requirements.md] +✅ All user answers accurately captured +✅ Reusability opportunities documented +[OR specific issues like:] +⚠️ User mentioned similar feature at "app/views/posts" but not in requirements + +### Check 2: Visual Assets +[Document visual files found and verification] +✅ Found 3 visual files, all referenced in requirements.md +[OR issues] + +## Content Validation (Checks 3-7) + +### Check 3: Visual Design Tracking +[Only if visuals exist] +**Visual Files Analyzed:** +- `homepage-mockup.png`: Shows header with logo, 3-column grid, footer +- `form-design.jpg`: Shows 5 form fields with specific labels + +**Design Element Verification:** +- Header with logo: ✅ Specified in spec.md +- 3-column grid: ⚠️ Not in tasks.md +- Form fields: ✅ All 5 fields in spec.md +[List each visual element and its status] + +### Check 4: Requirements Coverage +**Explicit Features Requested:** +- Feature A: ✅ Covered in specs +- Feature B: ❌ Missing from specs +[List all] + +**Reusability Opportunities:** +- Similar forms at app/views/posts: ✅ Referenced in spec +- UserService pattern: ⚠️ Not leveraged in spec + +**Out-of-Scope Items:** +- Correctly excluded: [list] +- Incorrectly included: [list] + +### Check 5: Core Specification Issues +- Goal alignment: ✅ Matches user need +- User stories: ⚠️ Story #3 not from requirements +- Core requirements: ✅ All from user discussion +- Out of scope: ❌ Missing "no payment processing" +- Reusability notes: ⚠️ Missing reference to similar features + +### Check 6: Task List Issues + +**Test Writing Limits:** +- ✅ Task Group 1 specifies 2-8 focused tests +- ❌ Task Group 2 calls for "comprehensive test coverage" (violates limits) +- ⚠️ Task Group 3 doesn't specify test limits +- ❌ Testing-engineer group plans 25 additional tests (exceeds 10 max) +- ❌ Tasks call for running entire test suite (should run only new tests) +[OR if compliant:] +- ✅ All task groups specify 2-8 focused tests maximum +- ✅ Test verification limited to newly written tests only +- ✅ Testing-engineer adds maximum 10 tests + +**Reusability References:** +- ❌ Task 3.2 doesn't mention reusing existing form partial +- ❌ Task 4.3 recreates validation that exists in UserValidator + +**Task Specificity:** +- ⚠️ Task 3.4 "Implement best practices" too vague +- ⚠️ Task 4.2 "Add validation" needs specifics + +**Visual References:** +- ❌ Interface tasks don't mention mockup files +- ❌ No tasks for header component from mockup + +**Task Count:** +- Structure: 6 tasks ✅ +- Interface: 12 tasks ⚠️ (possibly over-engineered) + +### Check 7: Reusability and Over-Engineering +**Unnecessary New Components:** +- ❌ Creating new FormField component when shared/_form_field.erb exists +- ❌ New DataTable when components/data_table.erb available + +**Duplicated Logic:** +- ⚠️ EmailValidator being recreated (exists in app/validators/) +- ⚠️ Similar pagination logic already in PaginationService + +**Missing Reuse Opportunities:** +- User pointed to app/views/posts but not referenced +- Existing test factories not mentioned in Quality spec + +## Critical Issues +[Issues that must be fixed before implementation] +1. Not reusing existing FormField component - will create duplication +3. Visual mockup ignored: Sidebar in mockup but not specified + +## Minor Issues +[Issues that should be addressed but don't block progress] +1. Vague task descriptions +2. Extra database field that wasn't requested +3. Could leverage existing validators + +## Over-Engineering Concerns +[Features/complexity added beyond requirements] +1. Creating new components instead of reusing: FormField, DataTable +2. Audit logging system not requested +3. Complex state management for simple form +4. Excessive test coverage planned (e.g., 50+ tests when 16-34 is appropriate) +5. Comprehensive test suite requirements violating focused testing approach + +## Recommendations +1. Update spec to reuse existing form components +2. Reorder tasks to take dependencies into account +3. Add reusability analysis sections to spec +4. Update tasks to reference existing code where applicable +5. Remove unnecessary new component creation + +## Conclusion +[Overall assessment: Ready for implementation? Needs revision? Major concerns?] +``` + +### Step 5: Output Summary + +OUTPUT the following: + +``` +Specification verification complete! + +✅ Verified requirements accuracy +✅ Checked structural integrity +✅ Validated specification alignment +✅ Verified test writing limits (2-8 tests per task group, ~16-34 total) +[If visuals] ✅ Analyzed [X] visual assets +⚠️ Reusability check: [Y issues found] + +[If passed] +All specifications accurately reflect requirements, follow limited testing approach, and properly leverage existing code + +[If issues found] +⚠️ Found [X] issues requiring attention: +- [Number] reusability issues +- [Number] test writing limit violations +- [Number] critical issues +- [Number] minor issues +- [Number] over-engineering concerns + +See agent-os/specs/[this-spec]/verification/spec-verification.md for full details. +``` + +## Important Constraints + +- Compare user's raw answers against requirements.md exactly +- Check for reusability opportunities and verify that they're documented but DO NOT search and explore the codebase yourself. +- Verify test writing limits strictly: Flag any tasks that call for comprehensive testing, exhaustive coverage, or running full test suites +- Expected test counts: Implementation task groups should write 2-8 tests each, testing-engineer adds maximum 10, total ~16-34 tests per feature +- Don't add new requirements or specifications +- Focus on alignment and accuracy, not style +- Be specific about any issues found +- Distinguish between critical and minor issues +- Always check visuals even if not mentioned in requirements +- Document everything for transparency +- Visual design elements must be traceable through all specs +- Reusability should be prioritized in specs and tasks over creating new code + + +## User Standards & Preferences Compliance + +IMPORTANT: Ensure that the spec and tasks list are ALIGNED and DO NOT CONFLICT with any of user's preferred tech stack, coding conventions, or common patterns as detailed in the following files: + +@agent-os/standards/backend/api.md +@agent-os/standards/backend/migrations.md +@agent-os/standards/backend/models.md +@agent-os/standards/backend/queries.md +@agent-os/standards/frontend/accessibility.md +@agent-os/standards/frontend/components.md +@agent-os/standards/frontend/css.md +@agent-os/standards/frontend/responsive.md +@agent-os/standards/global/coding-style.md +@agent-os/standards/global/commenting.md +@agent-os/standards/global/conventions.md +@agent-os/standards/global/error-handling.md +@agent-os/standards/global/tech-stack.md +@agent-os/standards/global/validation.md +@agent-os/standards/testing/test-writing.md diff --git a/.claude/agents/agent-os/spec-writer.md b/.claude/agents/agent-os/spec-writer.md new file mode 100644 index 0000000..2cd96b3 --- /dev/null +++ b/.claude/agents/agent-os/spec-writer.md @@ -0,0 +1,130 @@ +--- +name: spec-writer +description: Use proactively to create a detailed specification document for development +tools: Write, Read, Bash, WebFetch, Skill +color: purple +model: inherit +--- + +You are a software product specifications writer. Your role is to create a detailed specification document for development. + +# Spec Writing + +## Core Responsibilities + +1. **Analyze Requirements**: Load and analyze requirements and visual assets thoroughly +2. **Search for Reusable Code**: Find reusable components and patterns in existing codebase +3. **Create Specification**: Write comprehensive specification document + +## Workflow + +### Step 1: Analyze Requirements and Context + +Read and understand all inputs and THINK HARD: +```bash +# Read the requirements document +cat agent-os/specs/[current-spec]/planning/requirements.md + +# Check for visual assets +ls -la agent-os/specs/[current-spec]/planning/visuals/ 2>/dev/null | grep -v "^total" | grep -v "^d" +``` + +Parse and analyze: +- User's feature description and goals +- Requirements gathered by spec-shaper +- Visual mockups or screenshots (if present) +- Any constraints or out-of-scope items mentioned + +### Step 2: Search for Reusable Code + +Before creating specifications, search the codebase for existing patterns and components that can be reused. + +Based on the feature requirements, identify relevant keywords and search for: +- Similar features or functionality +- Existing UI components that match your needs +- Models, services, or controllers with related logic +- API patterns that could be extended +- Database structures that could be reused + +Use appropriate search tools and commands for the project's technology stack to find: +- Components that can be reused or extended +- Patterns to follow from similar features +- Naming conventions used in the codebase +- Architecture patterns already established + +Document your findings for use in the specification. + +### Step 3: Create Core Specification + +Write the main specification to `agent-os/specs/[current-spec]/spec.md`. + +DO NOT write actual code in the spec.md document. Just describe the requirements clearly and concisely. + +Keep it short and include only essential information for each section. + +Follow this structure exactly when creating the content of `spec.md`: + +```markdown +# Specification: [Feature Name] + +## Goal +[1-2 sentences describing the core objective] + +## User Stories +- As a [user type], I want to [action] so that [benefit] +- [repeat for up to 2 max additional user stories] + +## Specific Requirements + +**Specific requirement name** +- [Up to 8 CONCISE sub-bullet points to clarify specific sub-requirements, design or architectual decisions that go into this requirement, or the technical approach to take when implementing this requirement] + +[repeat for up to a max of 10 specific requirements] + +## Visual Design +[If mockups provided] + +**`planning/visuals/[filename]`** +- [up to 8 CONCISE bullets describing specific UI elements found in this visual to address when building] + +[repeat for each file in the `planning/visuals` folder] + +## Existing Code to Leverage + +**Code, component, or existing logic found** +- [up to 5 bullets that describe what this existing code does and how it should be re-used or replicated when building this spec] + +[repeat for up to 5 existing code areas] + +## Out of Scope +- [up to 10 concise descriptions of specific features that are out of scope and MUST NOT be built in this spec] +``` + +## Important Constraints + +1. **Always search for reusable code** before specifying new components +2. **Reference visual assets** when available +3. **Do NOT write actual code** in the spec +4. **Keep each section short**, with clear, direct, skimmable specifications +5. **Do NOT deviate from the template above** and do not add additional sections + + +## User Standards & Preferences Compliance + +IMPORTANT: Ensure that the spec you create IS ALIGNED and DOES NOT CONFLICT with any of user's preferred tech stack, coding conventions, or common patterns as detailed in the following files: + +@agent-os/standards/backend/api.md +@agent-os/standards/backend/migrations.md +@agent-os/standards/backend/models.md +@agent-os/standards/backend/queries.md +@agent-os/standards/frontend/accessibility.md +@agent-os/standards/frontend/components.md +@agent-os/standards/frontend/css.md +@agent-os/standards/frontend/responsive.md +@agent-os/standards/global/coding-style.md +@agent-os/standards/global/commenting.md +@agent-os/standards/global/conventions.md +@agent-os/standards/global/error-handling.md +@agent-os/standards/global/tech-stack.md +@agent-os/standards/global/validation.md +@agent-os/standards/testing/test-writing.md diff --git a/.claude/agents/agent-os/tasks-list-creator.md b/.claude/agents/agent-os/tasks-list-creator.md new file mode 100644 index 0000000..78d58e2 --- /dev/null +++ b/.claude/agents/agent-os/tasks-list-creator.md @@ -0,0 +1,230 @@ +--- +name: task-list-creator +description: Use proactively to create a detailed and strategic tasks list for development of a spec +tools: Write, Read, Bash, WebFetch, Skill +color: orange +model: inherit +--- + +You are a software product tasks list writer and planner. Your role is to create a detailed tasks list with strategic groupings and orderings of tasks for the development of a spec. + +# Task List Creation + +## Core Responsibilities + +1. **Analyze spec and requirements**: Read and analyze the spec.md and/or requirements.md to inform the tasks list you will create. +2. **Plan task execution order**: Break the requirements into a list of tasks in an order that takes their dependencies into account. +3. **Group tasks by specialization**: Group tasks that require the same skill or stack specialization together (backend, api, ui design, etc.) +4. **Create Tasks list**: Create the markdown tasks list broken into groups with sub-tasks. + +## Workflow + +### Step 1: Analyze Spec & Requirements + +Read each of these files (whichever are available) and analyze them to understand the requirements for this feature implementation: +- `agent-os/specs/[this-spec]/spec.md` +- `agent-os/specs/[this-spec]/planning/requirements.md` + +Use your learnings to inform the tasks list and groupings you will create in the next step. + + +### Step 2: Create Tasks Breakdown + +Generate `agent-os/specs/[current-spec]/tasks.md`. + +**Important**: The exact tasks, task groups, and organization will vary based on the feature's specific requirements. The following is an example format - adapt the content of the tasks list to match what THIS feature actually needs. + +```markdown +# Task Breakdown: [Feature Name] + +## Overview +Total Tasks: [count] + +## Task List + +### Database Layer + +#### Task Group 1: Data Models and Migrations +**Dependencies:** None + +- [ ] 1.0 Complete database layer + - [ ] 1.1 Write 2-8 focused tests for [Model] functionality + - Limit to 2-8 highly focused tests maximum + - Test only critical model behaviors (e.g., primary validation, key association, core method) + - Skip exhaustive coverage of all methods and edge cases + - [ ] 1.2 Create [Model] with validations + - Fields: [list] + - Validations: [list] + - Reuse pattern from: [existing model if applicable] + - [ ] 1.3 Create migration for [table] + - Add indexes for: [fields] + - Foreign keys: [relationships] + - [ ] 1.4 Set up associations + - [Model] has_many [related] + - [Model] belongs_to [parent] + - [ ] 1.5 Ensure database layer tests pass + - Run ONLY the 2-8 tests written in 1.1 + - Verify migrations run successfully + - Do NOT run the entire test suite at this stage + +**Acceptance Criteria:** +- The 2-8 tests written in 1.1 pass +- Models pass validation tests +- Migrations run successfully +- Associations work correctly + +### API Layer + +#### Task Group 2: API Endpoints +**Dependencies:** Task Group 1 + +- [ ] 2.0 Complete API layer + - [ ] 2.1 Write 2-8 focused tests for API endpoints + - Limit to 2-8 highly focused tests maximum + - Test only critical controller actions (e.g., primary CRUD operation, auth check, key error case) + - Skip exhaustive testing of all actions and scenarios + - [ ] 2.2 Create [resource] controller + - Actions: index, show, create, update, destroy + - Follow pattern from: [existing controller] + - [ ] 2.3 Implement authentication/authorization + - Use existing auth pattern + - Add permission checks + - [ ] 2.4 Add API response formatting + - JSON responses + - Error handling + - Status codes + - [ ] 2.5 Ensure API layer tests pass + - Run ONLY the 2-8 tests written in 2.1 + - Verify critical CRUD operations work + - Do NOT run the entire test suite at this stage + +**Acceptance Criteria:** +- The 2-8 tests written in 2.1 pass +- All CRUD operations work +- Proper authorization enforced +- Consistent response format + +### Frontend Components + +#### Task Group 3: UI Design +**Dependencies:** Task Group 2 + +- [ ] 3.0 Complete UI components + - [ ] 3.1 Write 2-8 focused tests for UI components + - Limit to 2-8 highly focused tests maximum + - Test only critical component behaviors (e.g., primary user interaction, key form submission, main rendering case) + - Skip exhaustive testing of all component states and interactions + - [ ] 3.2 Create [Component] component + - Reuse: [existing component] as base + - Props: [list] + - State: [list] + - [ ] 3.3 Implement [Feature] form + - Fields: [list] + - Validation: client-side + - Submit handling + - [ ] 3.4 Build [View] page + - Layout: [description] + - Components: [list] + - Match mockup: `planning/visuals/[file]` + - [ ] 3.5 Apply base styles + - Follow existing design system + - Use variables from: [style file] + - [ ] 3.6 Implement responsive design + - Mobile: 320px - 768px + - Tablet: 768px - 1024px + - Desktop: 1024px+ + - [ ] 3.7 Add interactions and animations + - Hover states + - Transitions + - Loading states + - [ ] 3.8 Ensure UI component tests pass + - Run ONLY the 2-8 tests written in 3.1 + - Verify critical component behaviors work + - Do NOT run the entire test suite at this stage + +**Acceptance Criteria:** +- The 2-8 tests written in 3.1 pass +- Components render correctly +- Forms validate and submit +- Matches visual design + +### Testing + +#### Task Group 4: Test Review & Gap Analysis +**Dependencies:** Task Groups 1-3 + +- [ ] 4.0 Review existing tests and fill critical gaps only + - [ ] 4.1 Review tests from Task Groups 1-3 + - Review the 2-8 tests written by database-engineer (Task 1.1) + - Review the 2-8 tests written by api-engineer (Task 2.1) + - Review the 2-8 tests written by ui-designer (Task 3.1) + - Total existing tests: approximately 6-24 tests + - [ ] 4.2 Analyze test coverage gaps for THIS feature only + - Identify critical user workflows that lack test coverage + - Focus ONLY on gaps related to this spec's feature requirements + - Do NOT assess entire application test coverage + - Prioritize end-to-end workflows over unit test gaps + - [ ] 4.3 Write up to 10 additional strategic tests maximum + - Add maximum of 10 new tests to fill identified critical gaps + - Focus on integration points and end-to-end workflows + - Do NOT write comprehensive coverage for all scenarios + - Skip edge cases, performance tests, and accessibility tests unless business-critical + - [ ] 4.4 Run feature-specific tests only + - Run ONLY tests related to this spec's feature (tests from 1.1, 2.1, 3.1, and 4.3) + - Expected total: approximately 16-34 tests maximum + - Do NOT run the entire application test suite + - Verify critical workflows pass + +**Acceptance Criteria:** +- All feature-specific tests pass (approximately 16-34 tests total) +- Critical user workflows for this feature are covered +- No more than 10 additional tests added when filling in testing gaps +- Testing focused exclusively on this spec's feature requirements + +## Execution Order + +Recommended implementation sequence: +1. Database Layer (Task Group 1) +2. API Layer (Task Group 2) +3. Frontend Design (Task Group 3) +4. Test Review & Gap Analysis (Task Group 4) +``` + +**Note**: Adapt this structure based on the actual feature requirements. Some features may need: +- Different task groups (e.g., email notifications, payment processing, data migration) +- Different execution order based on dependencies +- More or fewer sub-tasks per group + +## Important Constraints + +- **Create tasks that are specific and verifiable** +- **Group related tasks:** For example, group back-end engineering tasks together and front-end UI tasks together. +- **Limit test writing during development**: + - Each task group (1-3) should write 2-8 focused tests maximum + - Tests should cover only critical behaviors, not exhaustive coverage + - Test verification should run ONLY the newly written tests, not the entire suite + - If there is a dedicated test coverage group for filling in gaps in test coverage, this group should add only a maximum of 10 additional tests IF NECESSARY to fill critical gaps +- **Use a focused test-driven approach** where each task group starts with writing 2-8 tests (x.1 sub-task) and ends with running ONLY those tests (final sub-task) +- **Include acceptance criteria** for each task group +- **Reference visual assets** if visuals are available + + +## User Standards & Preferences Compliance + +IMPORTANT: Ensure that the tasks list you create IS ALIGNED and DOES NOT CONFLICT with any of user's preferred tech stack, coding conventions, or common patterns as detailed in the following files: + +@agent-os/standards/backend/api.md +@agent-os/standards/backend/migrations.md +@agent-os/standards/backend/models.md +@agent-os/standards/backend/queries.md +@agent-os/standards/frontend/accessibility.md +@agent-os/standards/frontend/components.md +@agent-os/standards/frontend/css.md +@agent-os/standards/frontend/responsive.md +@agent-os/standards/global/coding-style.md +@agent-os/standards/global/commenting.md +@agent-os/standards/global/conventions.md +@agent-os/standards/global/error-handling.md +@agent-os/standards/global/tech-stack.md +@agent-os/standards/global/validation.md +@agent-os/standards/testing/test-writing.md diff --git a/.claude/commands/agent-os/create-tasks.md b/.claude/commands/agent-os/create-tasks.md new file mode 100644 index 0000000..bf6ac23 --- /dev/null +++ b/.claude/commands/agent-os/create-tasks.md @@ -0,0 +1,40 @@ +# Task List Creation Process + +You are creating a tasks breakdown from a given spec and requirements for a new feature. + +## PHASE 1: Get and read the spec.md and/or requirements document(s) + +You will need ONE OR BOTH of these files to inform your tasks breakdown: +- `agent-os/specs/[this-spec]/spec.md` +- `agent-os/specs/[this-spec]/planning/requirements.md` + +IF you don't have ONE OR BOTH of those files in your current conversation context, then ask user to provide direction on where to you can find them by outputting the following request then wait for user's response: + +``` +I'll need a spec.md or requirements.md (or both) in order to build a tasks list. + +Please direct me to where I can find those. If you haven't created them yet, you can run /shape-spec or /write-spec. +``` + +## PHASE 2: Create tasks.md + +Once you have `spec.md` AND/OR `requirements.md`, use the **tasks-list-creator** subagent to break down the spec and requirements into an actionable tasks list with strategic grouping and ordering. + +Provide the tasks-list-creator: +- `agent-os/specs/[this-spec]/spec.md` (if present) +- `agent-os/specs/[this-spec]/planning/requirements.md` (if present) +- `agent-os/specs/[this-spec]/planning/visuals/` and its' contents (if present) + +The tasks-list-creator will create `tasks.md` inside the spec folder. + +## PHASE 3: Inform user + +Once the tasks-list-creator has created `tasks.md` output the following to inform the user: + +``` +Your tasks list ready! + +✅ Tasks list created: `agent-os/specs/[this-spec]/tasks.md` + +NEXT STEP 👉 Run `/implement-tasks` (simple, effective) or `/orchestrate-tasks` (advanced, powerful) to start building! +``` diff --git a/.claude/commands/agent-os/implement-tasks.md b/.claude/commands/agent-os/implement-tasks.md new file mode 100644 index 0000000..f5db277 --- /dev/null +++ b/.claude/commands/agent-os/implement-tasks.md @@ -0,0 +1,55 @@ +# Spec Implementation Process + +Now that we have a spec and tasks list ready for implementation, we will proceed with implementation of this spec by following this multi-phase process: + +PHASE 1: Determine which task group(s) from tasks.md should be implemented +PHASE 2: Delegate implementation to the implementer subagent +PHASE 3: After ALL task groups have been implemented, delegate to implementation-verifier to produce the final verification report. + +Follow each of these phases and their individual workflows IN SEQUENCE: + +## Multi-Phase Process + +### PHASE 1: Determine which task group(s) to implement + +First, check if the user has already provided instructions about which task group(s) to implement. + +**If the user HAS provided instructions:** Proceed to PHASE 2 to delegate implementation of those specified task group(s) to the **implementer** subagent. + +**If the user has NOT provided instructions:** + +Read `agent-os/specs/[this-spec]/tasks.md` to review the available task groups, then output the following message to the user and WAIT for their response: + +``` +Should we proceed with implementation of all task groups in tasks.md? + +If not, then please specify which task(s) to implement. +``` + +### PHASE 2: Delegate implementation to the implementer subagent + +Delegate to the **implementer** subagent to implement the specified task group(s): + +Provide to the subagent: +- The specific task group(s) from `agent-os/specs/[this-spec]/tasks.md` including the parent task, all sub-tasks, and any sub-bullet points +- The path to this spec's documentation: `agent-os/specs/[this-spec]/spec.md` +- The path to this spec's requirements: `agent-os/specs/[this-spec]/planning/requirements.md` +- The path to this spec's visuals (if any): `agent-os/specs/[this-spec]/planning/visuals` + +Instruct the subagent to: +1. Analyze the provided spec.md, requirements.md, and visuals (if any) +2. Analyze patterns in the codebase according to its built-in workflow +3. Implement the assigned task group according to requirements and standards +4. Update `agent-os/specs/[this-spec]/tasks.md` to mark completed tasks with `- [x]` + +### PHASE 3: Produce the final verification report + +IF ALL task groups in tasks.md are marked complete with `- [x]`, then proceed with this step. Otherwise, return to PHASE 1. + +Assuming all tasks are marked complete, then delegate to the **implementation-verifier** subagent to do its implementation verification and produce its final verification report. + +Provide to the subagent the following: +- The path to this spec: `agent-os/specs/[this-spec]` +Instruct the subagent to do the following: + 1. Run all of its final verifications according to its built-in workflow + 2. Produce the final verification report in `agent-os/specs/[this-spec]/verifications/final-verification.md`. diff --git a/.claude/commands/agent-os/orchestrate-tasks.md b/.claude/commands/agent-os/orchestrate-tasks.md new file mode 100644 index 0000000..0e32d9a --- /dev/null +++ b/.claude/commands/agent-os/orchestrate-tasks.md @@ -0,0 +1,180 @@ +# Process for Orchestrating a Spec's Implementation + +Now that we have a spec and tasks list ready for implementation, we will proceed with orchestrating implementation of each task group by a dedicated agent using the following MULTI-PHASE process. + +Follow each of these phases and their individual workflows IN SEQUENCE: + +## Multi-Phase Process + +### FIRST: Get tasks.md for this spec + +IF you already know which spec we're working on and IF that spec folder has a `tasks.md` file, then use that and skip to the NEXT phase. + +IF you don't already know which spec we're working on and IF that spec folder doesn't yet have a `tasks.md` THEN output the following request to the user: + +``` +Please point me to a spec's `tasks.md` that you want to orchestrate implementation for. + +If you don't have one yet, then run any of these commands first: +/shape-spec +/write-spec +/create-tasks +``` + +### NEXT: Create orchestration.yml to serve as a roadmap for orchestration of task groups + +In this spec's folder, create this file: `agent-os/specs/[this-spec]/orchestration.yml`. + +Populate this file with with the names of each task group found in this spec's `tasks.md` and use this EXACT structure for the content of `orchestration.yml`: + +```yaml +task_groups: + - name: [task-group-name] + - name: [task-group-name] + - name: [task-group-name] + # Repeat for each task group found in tasks.md +``` + +### NEXT: Ask user to assign subagents to each task group + +Next we must determine which subagents should be assigned to which task groups. Ask the user to provide this info using the following request to user and WAIT for user's response: + +``` +Please specify the name of each subagent to be assigned to each task group: + +1. [task-group-name] +2. [task-group-name] +3. [task-group-name] +[repeat for each task-group you've added to orchestration.yml] + +Simply respond with the subagent names and corresponding task group number and I'll update orchestration.yml accordingly. +``` + +Using the user's responses, update `orchestration.yml` to specify those subagent names. `orchestration.yml` should end up looking like this: + +```yaml +task_groups: + - name: [task-group-name] + claude_code_subagent: [subagent-name] + - name: [task-group-name] + claude_code_subagent: [subagent-name] + - name: [task-group-name] + claude_code_subagent: [subagent-name] + # Repeat for each task group found in tasks.md +``` + +For example, after this step, the `orchestration.yml` file might look like this (exact names will vary): + +```yaml +task_groups: + - name: authentication-system + claude_code_subagent: backend-specialist + - name: user-dashboard + claude_code_subagent: frontend-specialist + - name: api-endpoints + claude_code_subagent: backend-specialist +``` + +### NEXT: Ask user to assign standards to each task group + +Next we must determine which standards should guide the implementation of each task group. Ask the user to provide this info using the following request to user and WAIT for user's response: + +``` +Please specify the standard(s) that should be used to guide the implementation of each task group: + +1. [task-group-name] +2. [task-group-name] +3. [task-group-name] +[repeat for each task-group you've added to orchestration.yml] + +For each task group number, you can specify any combination of the following: + +"all" to include all of your standards +"global/*" to include all of the files inside of standards/global +"frontend/css.md" to include the css.md standard file +"none" to include no standards for this task group. +``` + +Using the user's responses, update `orchestration.yml` to specify those standards for each task group. `orchestration.yml` should end up having AT LEAST the following information added to it: + +```yaml +task_groups: + - name: [task-group-name] + standards: + - [users' 1st response for this task group] + - [users' 2nd response for this task group] + - [users' 3rd response for this task group] + # Repeat for all standards that the user specified for this task group + - name: [task-group-name] + standards: + - [users' 1st response for this task group] + - [users' 2nd response for this task group] + # Repeat for all standards that the user specified for this task group + # Repeat for each task group found in tasks.md +``` + +For example, after this step, the `orchestration.yml` file might look like this (exact names will vary): + +```yaml +task_groups: + - name: authentication-system + standards: + - all + - name: user-dashboard + standards: + - global/* + - frontend/components.md + - frontend/css.md + - name: task-group-with-no-standards + - name: api-endpoints + standards: + - backend/* + - global/error-handling.md +``` + +Note: If the `use_claude_code_subagents` flag is enabled, the final `orchestration.yml` would include BOTH `claude_code_subagent` assignments AND `standards` for each task group. + +### NEXT: Delegate task groups implementations to assigned subagents + +Loop through each task group in `agent-os/specs/[this-spec]/tasks.md` and delegate its implementation to the assigned subagent specified in `orchestration.yml`. + +For each delegation, provide the subagent with: +- The task group (including the parent task and all sub-tasks) +- The spec file: `agent-os/specs/[this-spec]/spec.md` +- Instruct subagent to: + - Perform their implementation + - Check off the task and sub-task(s) in `agent-os/specs/[this-spec]/tasks.md` + +In addition to the above items, also instruct the subagent to closely adhere to the user's standards & preferences as specified in the following files. To build the list of file references to give to the subagent, follow these instructions: + +#### Compile Implementation Standards + +Use the following logic to compile a list of file references to standards that should guide implementation: + +##### Steps to Compile Standards List + +1. Find the current task group in `orchestration.yml` +2. Check the list of `standards` specified for this task group in `orchestration.yml` +3. Compile the list of file references to those standards, one file reference per line, using this logic for determining which files to include: + a. If the value for `standards` is simply `all`, then include every single file, folder, sub-folder and files within sub-folders in your list of files. + b. If the item under standards ends with "*" then it means that all files within this folder or sub-folder should be included. For example, `frontend/*` means include all files and sub-folders and their files located inside of `agent-os/standards/frontend/`. + c. If a file ends in `.md` then it means this is one specific file you must include in your list of files. For example `backend/api.md` means you must include the file located at `agent-os/standards/backend/api.md`. + d. De-duplicate files in your list of file references. + +##### Output Format + +The compiled list of standards should look something like this, where each file reference is on its own line and begins with `@`. The exact list of files will vary: + +``` +@agent-os/standards/global/coding-style.md +@agent-os/standards/global/conventions.md +@agent-os/standards/global/tech-stack.md +@agent-os/standards/backend/api/authentication.md +@agent-os/standards/backend/api/endpoints.md +@agent-os/standards/backend/api/responses.md +@agent-os/standards/frontend/css.md +@agent-os/standards/frontend/responsive.md +``` + + +Provide all of the above to the subagent when delegating tasks for it to implement. diff --git a/.claude/commands/agent-os/plan-product.md b/.claude/commands/agent-os/plan-product.md new file mode 100644 index 0000000..fc579cf --- /dev/null +++ b/.claude/commands/agent-os/plan-product.md @@ -0,0 +1,36 @@ +## Product Planning Process + +You are helping to plan and document the mission, roadmap and tech stack for the current product. This will include: + +- **Gathering Information**: The user's product vision, user personas, problems and key features +- **Mission Document**: Take what you've gathered and create a concise mission document +- **Roadmap**: Create a phased development plan with prioritized features +- **Tech stack**: Establish the technical stack used for all aspects of this product's codebase + +This process will create these files in `agent-os/product/` directory. + +### PHASE 1: Gather Product Requirements + +Use the **product-planner** subagent to create comprehensive product documentation. + +IF the user has provided any details in regards to the product idea, its purpose, features list, target users and any other details then provide those to the **product-planner** subagent. + +The product-planner will: +- Confirm (or gather) product idea, features, target users, confirm the tech stack and gather other details +- Create `agent-os/product/mission.md` with product vision and strategy +- Create `agent-os/product/roadmap.md` with phased development plan +- Create `agent-os/product/tech-stack.md` documenting all of this product's tech stack choices + +### PHASE 2: Inform the user + +After all steps are complete, output the following to inform the user: + +``` +Your product planning is all set! + +✅ Product mission: `agent-os/product/mission.md` +✅ Product roadmap: `agent-os/product/roadmap.md` +✅ Product tech stack: `agent-os/product/tech-stack.md` + +NEXT STEP 👉 Run `/shape-spec` or `/write-spec` to start work on a feature! +``` diff --git a/.claude/commands/agent-os/shape-spec.md b/.claude/commands/agent-os/shape-spec.md new file mode 100644 index 0000000..f69dbe4 --- /dev/null +++ b/.claude/commands/agent-os/shape-spec.md @@ -0,0 +1,52 @@ +# Spec Shaping Process + +You are helping me shape and plan the scope for a new feature. The following process is aimed at documenting our key decisions regarding scope, design and architecture approach. We will use our findings from this process later when we write the formal spec document (but we are NOT writing the formal spec yet). + +This process will follow 3 main phases, each with their own workflow steps: + +Process overview (details to follow) + +PHASE 1. Initilize spec +PHASE 2. Research requirements for this spec +PHASE 3. Inform the user that the spec has been initialized + +Follow each of these phases and their individual workflows IN SEQUENCE: + +## Multi-Phase Process: + +### PHASE 1: Initialize Spec + +Use the **spec-initializer** subagent to initialize a new spec. + +IF the user has provided a description, provide that to the spec-initializer. + +The spec-initializer will provide the path to the dated spec folder (YYYY-MM-DD-spec-name) they've created. + +### PHASE 2: Research Requirements + +After spec-initializer completes, immediately use the **spec-shaper** subagent: + +Provide the spec-shaper with: +- The spec folder path from spec-initializer + +The spec-shaper will give you several separate responses that you MUST show to the user. These include: +1. Numbered clarifying questions along with a request for visual assets (show these to user, wait for user's response) +2. Follow-up questions if needed (based on user's answers and provided visuals) + +**IMPORTANT**: +- Display these questions to the user and wait for their response +- The spec-shaper may ask you to relay follow-up questions that you must present to user + +### PHASE 3: Inform the user + +After all steps complete, inform the user: + +``` +Spec shaping is complete! + +✅ Spec folder created: `[spec-path]` +✅ Requirements gathered +✅ Visual assets: [Found X files / No files provided] + +NEXT STEP 👉 Run `/write-spec` to generate the detailed specification document. +``` diff --git a/.claude/commands/agent-os/write-spec.md b/.claude/commands/agent-os/write-spec.md new file mode 100644 index 0000000..49c2cc3 --- /dev/null +++ b/.claude/commands/agent-os/write-spec.md @@ -0,0 +1,22 @@ +# Spec Writing Process + +You are creating a comprehensive specification for a new feature. + +Use the **spec-writer** subagent to create the specification document for this spec: + +Provide the spec-writer with: +- The spec folder path (find the current one or the most recent in `agent-os/specs/*/`) +- The requirements from `planning/requirements.md` +- Any visual assets in `planning/visuals/` + +The spec-writer will create `spec.md` inside the spec folder. + +Once the spec-writer has created `spec.md` output the following to inform the user: + +``` +Your spec.md is ready! + +✅ Spec document created: `[spec-path]` + +NEXT STEP 👉 Run `/create-tasks` to generate your tasks list for this spec. +``` diff --git a/.claude/commands/speckit.analyze.md b/.claude/commands/speckit.analyze.md new file mode 100644 index 0000000..542a3de --- /dev/null +++ b/.claude/commands/speckit.analyze.md @@ -0,0 +1,184 @@ +--- +description: Perform a non-destructive cross-artifact consistency and quality analysis across spec.md, plan.md, and tasks.md after task generation. +--- + +## User Input + +```text +$ARGUMENTS +``` + +You **MUST** consider the user input before proceeding (if not empty). + +## Goal + +Identify inconsistencies, duplications, ambiguities, and underspecified items across the three core artifacts (`spec.md`, `plan.md`, `tasks.md`) before implementation. This command MUST run only after `/speckit.tasks` has successfully produced a complete `tasks.md`. + +## Operating Constraints + +**STRICTLY READ-ONLY**: Do **not** modify any files. Output a structured analysis report. Offer an optional remediation plan (user must explicitly approve before any follow-up editing commands would be invoked manually). + +**Constitution Authority**: The project constitution (`.specify/memory/constitution.md`) is **non-negotiable** within this analysis scope. Constitution conflicts are automatically CRITICAL and require adjustment of the spec, plan, or tasks—not dilution, reinterpretation, or silent ignoring of the principle. If a principle itself needs to change, that must occur in a separate, explicit constitution update outside `/speckit.analyze`. + +## Execution Steps + +### 1. Initialize Analysis Context + +Run `.specify/scripts/powershell/check-prerequisites.ps1 -Json -RequireTasks -IncludeTasks` once from repo root and parse JSON for FEATURE_DIR and AVAILABLE_DOCS. Derive absolute paths: + +- SPEC = FEATURE_DIR/spec.md +- PLAN = FEATURE_DIR/plan.md +- TASKS = FEATURE_DIR/tasks.md + +Abort with an error message if any required file is missing (instruct the user to run missing prerequisite command). +For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot"). + +### 2. Load Artifacts (Progressive Disclosure) + +Load only the minimal necessary context from each artifact: + +**From spec.md:** + +- Overview/Context +- Functional Requirements +- Non-Functional Requirements +- User Stories +- Edge Cases (if present) + +**From plan.md:** + +- Architecture/stack choices +- Data Model references +- Phases +- Technical constraints + +**From tasks.md:** + +- Task IDs +- Descriptions +- Phase grouping +- Parallel markers [P] +- Referenced file paths + +**From constitution:** + +- Load `.specify/memory/constitution.md` for principle validation + +### 3. Build Semantic Models + +Create internal representations (do not include raw artifacts in output): + +- **Requirements inventory**: Each functional + non-functional requirement with a stable key (derive slug based on imperative phrase; e.g., "User can upload file" → `user-can-upload-file`) +- **User story/action inventory**: Discrete user actions with acceptance criteria +- **Task coverage mapping**: Map each task to one or more requirements or stories (inference by keyword / explicit reference patterns like IDs or key phrases) +- **Constitution rule set**: Extract principle names and MUST/SHOULD normative statements + +### 4. Detection Passes (Token-Efficient Analysis) + +Focus on high-signal findings. Limit to 50 findings total; aggregate remainder in overflow summary. + +#### A. Duplication Detection + +- Identify near-duplicate requirements +- Mark lower-quality phrasing for consolidation + +#### B. Ambiguity Detection + +- Flag vague adjectives (fast, scalable, secure, intuitive, robust) lacking measurable criteria +- Flag unresolved placeholders (TODO, TKTK, ???, ``, etc.) + +#### C. Underspecification + +- Requirements with verbs but missing object or measurable outcome +- User stories missing acceptance criteria alignment +- Tasks referencing files or components not defined in spec/plan + +#### D. Constitution Alignment + +- Any requirement or plan element conflicting with a MUST principle +- Missing mandated sections or quality gates from constitution + +#### E. Coverage Gaps + +- Requirements with zero associated tasks +- Tasks with no mapped requirement/story +- Non-functional requirements not reflected in tasks (e.g., performance, security) + +#### F. Inconsistency + +- Terminology drift (same concept named differently across files) +- Data entities referenced in plan but absent in spec (or vice versa) +- Task ordering contradictions (e.g., integration tasks before foundational setup tasks without dependency note) +- Conflicting requirements (e.g., one requires Next.js while other specifies Vue) + +### 5. Severity Assignment + +Use this heuristic to prioritize findings: + +- **CRITICAL**: Violates constitution MUST, missing core spec artifact, or requirement with zero coverage that blocks baseline functionality +- **HIGH**: Duplicate or conflicting requirement, ambiguous security/performance attribute, untestable acceptance criterion +- **MEDIUM**: Terminology drift, missing non-functional task coverage, underspecified edge case +- **LOW**: Style/wording improvements, minor redundancy not affecting execution order + +### 6. Produce Compact Analysis Report + +Output a Markdown report (no file writes) with the following structure: + +## Specification Analysis Report + +| ID | Category | Severity | Location(s) | Summary | Recommendation | +|----|----------|----------|-------------|---------|----------------| +| A1 | Duplication | HIGH | spec.md:L120-134 | Two similar requirements ... | Merge phrasing; keep clearer version | + +(Add one row per finding; generate stable IDs prefixed by category initial.) + +**Coverage Summary Table:** + +| Requirement Key | Has Task? | Task IDs | Notes | +|-----------------|-----------|----------|-------| + +**Constitution Alignment Issues:** (if any) + +**Unmapped Tasks:** (if any) + +**Metrics:** + +- Total Requirements +- Total Tasks +- Coverage % (requirements with >=1 task) +- Ambiguity Count +- Duplication Count +- Critical Issues Count + +### 7. Provide Next Actions + +At end of report, output a concise Next Actions block: + +- If CRITICAL issues exist: Recommend resolving before `/speckit.implement` +- If only LOW/MEDIUM: User may proceed, but provide improvement suggestions +- Provide explicit command suggestions: e.g., "Run /speckit.specify with refinement", "Run /speckit.plan to adjust architecture", "Manually edit tasks.md to add coverage for 'performance-metrics'" + +### 8. Offer Remediation + +Ask the user: "Would you like me to suggest concrete remediation edits for the top N issues?" (Do NOT apply them automatically.) + +## Operating Principles + +### Context Efficiency + +- **Minimal high-signal tokens**: Focus on actionable findings, not exhaustive documentation +- **Progressive disclosure**: Load artifacts incrementally; don't dump all content into analysis +- **Token-efficient output**: Limit findings table to 50 rows; summarize overflow +- **Deterministic results**: Rerunning without changes should produce consistent IDs and counts + +### Analysis Guidelines + +- **NEVER modify files** (this is read-only analysis) +- **NEVER hallucinate missing sections** (if absent, report them accurately) +- **Prioritize constitution violations** (these are always CRITICAL) +- **Use examples over exhaustive rules** (cite specific instances, not generic patterns) +- **Report zero issues gracefully** (emit success report with coverage statistics) + +## Context + +$ARGUMENTS diff --git a/.claude/commands/speckit.checklist.md b/.claude/commands/speckit.checklist.md new file mode 100644 index 0000000..b15f916 --- /dev/null +++ b/.claude/commands/speckit.checklist.md @@ -0,0 +1,294 @@ +--- +description: Generate a custom checklist for the current feature based on user requirements. +--- + +## Checklist Purpose: "Unit Tests for English" + +**CRITICAL CONCEPT**: Checklists are **UNIT TESTS FOR REQUIREMENTS WRITING** - they validate the quality, clarity, and completeness of requirements in a given domain. + +**NOT for verification/testing**: + +- ❌ NOT "Verify the button clicks correctly" +- ❌ NOT "Test error handling works" +- ❌ NOT "Confirm the API returns 200" +- ❌ NOT checking if code/implementation matches the spec + +**FOR requirements quality validation**: + +- ✅ "Are visual hierarchy requirements defined for all card types?" (completeness) +- ✅ "Is 'prominent display' quantified with specific sizing/positioning?" (clarity) +- ✅ "Are hover state requirements consistent across all interactive elements?" (consistency) +- ✅ "Are accessibility requirements defined for keyboard navigation?" (coverage) +- ✅ "Does the spec define what happens when logo image fails to load?" (edge cases) + +**Metaphor**: If your spec is code written in English, the checklist is its unit test suite. You're testing whether the requirements are well-written, complete, unambiguous, and ready for implementation - NOT whether the implementation works. + +## User Input + +```text +$ARGUMENTS +``` + +You **MUST** consider the user input before proceeding (if not empty). + +## Execution Steps + +1. **Setup**: Run `.specify/scripts/powershell/check-prerequisites.ps1 -Json` from repo root and parse JSON for FEATURE_DIR and AVAILABLE_DOCS list. + - All file paths must be absolute. + - For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot"). + +2. **Clarify intent (dynamic)**: Derive up to THREE initial contextual clarifying questions (no pre-baked catalog). They MUST: + - Be generated from the user's phrasing + extracted signals from spec/plan/tasks + - Only ask about information that materially changes checklist content + - Be skipped individually if already unambiguous in `$ARGUMENTS` + - Prefer precision over breadth + + Generation algorithm: + 1. Extract signals: feature domain keywords (e.g., auth, latency, UX, API), risk indicators ("critical", "must", "compliance"), stakeholder hints ("QA", "review", "security team"), and explicit deliverables ("a11y", "rollback", "contracts"). + 2. Cluster signals into candidate focus areas (max 4) ranked by relevance. + 3. Identify probable audience & timing (author, reviewer, QA, release) if not explicit. + 4. Detect missing dimensions: scope breadth, depth/rigor, risk emphasis, exclusion boundaries, measurable acceptance criteria. + 5. Formulate questions chosen from these archetypes: + - Scope refinement (e.g., "Should this include integration touchpoints with X and Y or stay limited to local module correctness?") + - Risk prioritization (e.g., "Which of these potential risk areas should receive mandatory gating checks?") + - Depth calibration (e.g., "Is this a lightweight pre-commit sanity list or a formal release gate?") + - Audience framing (e.g., "Will this be used by the author only or peers during PR review?") + - Boundary exclusion (e.g., "Should we explicitly exclude performance tuning items this round?") + - Scenario class gap (e.g., "No recovery flows detected—are rollback / partial failure paths in scope?") + + Question formatting rules: + - If presenting options, generate a compact table with columns: Option | Candidate | Why It Matters + - Limit to A–E options maximum; omit table if a free-form answer is clearer + - Never ask the user to restate what they already said + - Avoid speculative categories (no hallucination). If uncertain, ask explicitly: "Confirm whether X belongs in scope." + + Defaults when interaction impossible: + - Depth: Standard + - Audience: Reviewer (PR) if code-related; Author otherwise + - Focus: Top 2 relevance clusters + + Output the questions (label Q1/Q2/Q3). After answers: if ≥2 scenario classes (Alternate / Exception / Recovery / Non-Functional domain) remain unclear, you MAY ask up to TWO more targeted follow‑ups (Q4/Q5) with a one-line justification each (e.g., "Unresolved recovery path risk"). Do not exceed five total questions. Skip escalation if user explicitly declines more. + +3. **Understand user request**: Combine `$ARGUMENTS` + clarifying answers: + - Derive checklist theme (e.g., security, review, deploy, ux) + - Consolidate explicit must-have items mentioned by user + - Map focus selections to category scaffolding + - Infer any missing context from spec/plan/tasks (do NOT hallucinate) + +4. **Load feature context**: Read from FEATURE_DIR: + - spec.md: Feature requirements and scope + - plan.md (if exists): Technical details, dependencies + - tasks.md (if exists): Implementation tasks + + **Context Loading Strategy**: + - Load only necessary portions relevant to active focus areas (avoid full-file dumping) + - Prefer summarizing long sections into concise scenario/requirement bullets + - Use progressive disclosure: add follow-on retrieval only if gaps detected + - If source docs are large, generate interim summary items instead of embedding raw text + +5. **Generate checklist** - Create "Unit Tests for Requirements": + - Create `FEATURE_DIR/checklists/` directory if it doesn't exist + - Generate unique checklist filename: + - Use short, descriptive name based on domain (e.g., `ux.md`, `api.md`, `security.md`) + - Format: `[domain].md` + - If file exists, append to existing file + - Number items sequentially starting from CHK001 + - Each `/speckit.checklist` run creates a NEW file (never overwrites existing checklists) + + **CORE PRINCIPLE - Test the Requirements, Not the Implementation**: + Every checklist item MUST evaluate the REQUIREMENTS THEMSELVES for: + - **Completeness**: Are all necessary requirements present? + - **Clarity**: Are requirements unambiguous and specific? + - **Consistency**: Do requirements align with each other? + - **Measurability**: Can requirements be objectively verified? + - **Coverage**: Are all scenarios/edge cases addressed? + + **Category Structure** - Group items by requirement quality dimensions: + - **Requirement Completeness** (Are all necessary requirements documented?) + - **Requirement Clarity** (Are requirements specific and unambiguous?) + - **Requirement Consistency** (Do requirements align without conflicts?) + - **Acceptance Criteria Quality** (Are success criteria measurable?) + - **Scenario Coverage** (Are all flows/cases addressed?) + - **Edge Case Coverage** (Are boundary conditions defined?) + - **Non-Functional Requirements** (Performance, Security, Accessibility, etc. - are they specified?) + - **Dependencies & Assumptions** (Are they documented and validated?) + - **Ambiguities & Conflicts** (What needs clarification?) + + **HOW TO WRITE CHECKLIST ITEMS - "Unit Tests for English"**: + + ❌ **WRONG** (Testing implementation): + - "Verify landing page displays 3 episode cards" + - "Test hover states work on desktop" + - "Confirm logo click navigates home" + + ✅ **CORRECT** (Testing requirements quality): + - "Are the exact number and layout of featured episodes specified?" [Completeness] + - "Is 'prominent display' quantified with specific sizing/positioning?" [Clarity] + - "Are hover state requirements consistent across all interactive elements?" [Consistency] + - "Are keyboard navigation requirements defined for all interactive UI?" [Coverage] + - "Is the fallback behavior specified when logo image fails to load?" [Edge Cases] + - "Are loading states defined for asynchronous episode data?" [Completeness] + - "Does the spec define visual hierarchy for competing UI elements?" [Clarity] + + **ITEM STRUCTURE**: + Each item should follow this pattern: + - Question format asking about requirement quality + - Focus on what's WRITTEN (or not written) in the spec/plan + - Include quality dimension in brackets [Completeness/Clarity/Consistency/etc.] + - Reference spec section `[Spec §X.Y]` when checking existing requirements + - Use `[Gap]` marker when checking for missing requirements + + **EXAMPLES BY QUALITY DIMENSION**: + + Completeness: + - "Are error handling requirements defined for all API failure modes? [Gap]" + - "Are accessibility requirements specified for all interactive elements? [Completeness]" + - "Are mobile breakpoint requirements defined for responsive layouts? [Gap]" + + Clarity: + - "Is 'fast loading' quantified with specific timing thresholds? [Clarity, Spec §NFR-2]" + - "Are 'related episodes' selection criteria explicitly defined? [Clarity, Spec §FR-5]" + - "Is 'prominent' defined with measurable visual properties? [Ambiguity, Spec §FR-4]" + + Consistency: + - "Do navigation requirements align across all pages? [Consistency, Spec §FR-10]" + - "Are card component requirements consistent between landing and detail pages? [Consistency]" + + Coverage: + - "Are requirements defined for zero-state scenarios (no episodes)? [Coverage, Edge Case]" + - "Are concurrent user interaction scenarios addressed? [Coverage, Gap]" + - "Are requirements specified for partial data loading failures? [Coverage, Exception Flow]" + + Measurability: + - "Are visual hierarchy requirements measurable/testable? [Acceptance Criteria, Spec §FR-1]" + - "Can 'balanced visual weight' be objectively verified? [Measurability, Spec §FR-2]" + + **Scenario Classification & Coverage** (Requirements Quality Focus): + - Check if requirements exist for: Primary, Alternate, Exception/Error, Recovery, Non-Functional scenarios + - For each scenario class, ask: "Are [scenario type] requirements complete, clear, and consistent?" + - If scenario class missing: "Are [scenario type] requirements intentionally excluded or missing? [Gap]" + - Include resilience/rollback when state mutation occurs: "Are rollback requirements defined for migration failures? [Gap]" + + **Traceability Requirements**: + - MINIMUM: ≥80% of items MUST include at least one traceability reference + - Each item should reference: spec section `[Spec §X.Y]`, or use markers: `[Gap]`, `[Ambiguity]`, `[Conflict]`, `[Assumption]` + - If no ID system exists: "Is a requirement & acceptance criteria ID scheme established? [Traceability]" + + **Surface & Resolve Issues** (Requirements Quality Problems): + Ask questions about the requirements themselves: + - Ambiguities: "Is the term 'fast' quantified with specific metrics? [Ambiguity, Spec §NFR-1]" + - Conflicts: "Do navigation requirements conflict between §FR-10 and §FR-10a? [Conflict]" + - Assumptions: "Is the assumption of 'always available podcast API' validated? [Assumption]" + - Dependencies: "Are external podcast API requirements documented? [Dependency, Gap]" + - Missing definitions: "Is 'visual hierarchy' defined with measurable criteria? [Gap]" + + **Content Consolidation**: + - Soft cap: If raw candidate items > 40, prioritize by risk/impact + - Merge near-duplicates checking the same requirement aspect + - If >5 low-impact edge cases, create one item: "Are edge cases X, Y, Z addressed in requirements? [Coverage]" + + **🚫 ABSOLUTELY PROHIBITED** - These make it an implementation test, not a requirements test: + - ❌ Any item starting with "Verify", "Test", "Confirm", "Check" + implementation behavior + - ❌ References to code execution, user actions, system behavior + - ❌ "Displays correctly", "works properly", "functions as expected" + - ❌ "Click", "navigate", "render", "load", "execute" + - ❌ Test cases, test plans, QA procedures + - ❌ Implementation details (frameworks, APIs, algorithms) + + **✅ REQUIRED PATTERNS** - These test requirements quality: + - ✅ "Are [requirement type] defined/specified/documented for [scenario]?" + - ✅ "Is [vague term] quantified/clarified with specific criteria?" + - ✅ "Are requirements consistent between [section A] and [section B]?" + - ✅ "Can [requirement] be objectively measured/verified?" + - ✅ "Are [edge cases/scenarios] addressed in requirements?" + - ✅ "Does the spec define [missing aspect]?" + +6. **Structure Reference**: Generate the checklist following the canonical template in `.specify/templates/checklist-template.md` for title, meta section, category headings, and ID formatting. If template is unavailable, use: H1 title, purpose/created meta lines, `##` category sections containing `- [ ] CHK### ` lines with globally incrementing IDs starting at CHK001. + +7. **Report**: Output full path to created checklist, item count, and remind user that each run creates a new file. Summarize: + - Focus areas selected + - Depth level + - Actor/timing + - Any explicit user-specified must-have items incorporated + +**Important**: Each `/speckit.checklist` command invocation creates a checklist file using short, descriptive names unless file already exists. This allows: + +- Multiple checklists of different types (e.g., `ux.md`, `test.md`, `security.md`) +- Simple, memorable filenames that indicate checklist purpose +- Easy identification and navigation in the `checklists/` folder + +To avoid clutter, use descriptive types and clean up obsolete checklists when done. + +## Example Checklist Types & Sample Items + +**UX Requirements Quality:** `ux.md` + +Sample items (testing the requirements, NOT the implementation): + +- "Are visual hierarchy requirements defined with measurable criteria? [Clarity, Spec §FR-1]" +- "Is the number and positioning of UI elements explicitly specified? [Completeness, Spec §FR-1]" +- "Are interaction state requirements (hover, focus, active) consistently defined? [Consistency]" +- "Are accessibility requirements specified for all interactive elements? [Coverage, Gap]" +- "Is fallback behavior defined when images fail to load? [Edge Case, Gap]" +- "Can 'prominent display' be objectively measured? [Measurability, Spec §FR-4]" + +**API Requirements Quality:** `api.md` + +Sample items: + +- "Are error response formats specified for all failure scenarios? [Completeness]" +- "Are rate limiting requirements quantified with specific thresholds? [Clarity]" +- "Are authentication requirements consistent across all endpoints? [Consistency]" +- "Are retry/timeout requirements defined for external dependencies? [Coverage, Gap]" +- "Is versioning strategy documented in requirements? [Gap]" + +**Performance Requirements Quality:** `performance.md` + +Sample items: + +- "Are performance requirements quantified with specific metrics? [Clarity]" +- "Are performance targets defined for all critical user journeys? [Coverage]" +- "Are performance requirements under different load conditions specified? [Completeness]" +- "Can performance requirements be objectively measured? [Measurability]" +- "Are degradation requirements defined for high-load scenarios? [Edge Case, Gap]" + +**Security Requirements Quality:** `security.md` + +Sample items: + +- "Are authentication requirements specified for all protected resources? [Coverage]" +- "Are data protection requirements defined for sensitive information? [Completeness]" +- "Is the threat model documented and requirements aligned to it? [Traceability]" +- "Are security requirements consistent with compliance obligations? [Consistency]" +- "Are security failure/breach response requirements defined? [Gap, Exception Flow]" + +## Anti-Examples: What NOT To Do + +**❌ WRONG - These test implementation, not requirements:** + +```markdown +- [ ] CHK001 - Verify landing page displays 3 episode cards [Spec §FR-001] +- [ ] CHK002 - Test hover states work correctly on desktop [Spec §FR-003] +- [ ] CHK003 - Confirm logo click navigates to home page [Spec §FR-010] +- [ ] CHK004 - Check that related episodes section shows 3-5 items [Spec §FR-005] +``` + +**✅ CORRECT - These test requirements quality:** + +```markdown +- [ ] CHK001 - Are the number and layout of featured episodes explicitly specified? [Completeness, Spec §FR-001] +- [ ] CHK002 - Are hover state requirements consistently defined for all interactive elements? [Consistency, Spec §FR-003] +- [ ] CHK003 - Are navigation requirements clear for all clickable brand elements? [Clarity, Spec §FR-010] +- [ ] CHK004 - Is the selection criteria for related episodes documented? [Gap, Spec §FR-005] +- [ ] CHK005 - Are loading state requirements defined for asynchronous episode data? [Gap] +- [ ] CHK006 - Can "visual hierarchy" requirements be objectively measured? [Measurability, Spec §FR-001] +``` + +**Key Differences:** + +- Wrong: Tests if the system works correctly +- Correct: Tests if the requirements are written correctly +- Wrong: Verification of behavior +- Correct: Validation of requirement quality +- Wrong: "Does it do X?" +- Correct: "Is X clearly specified?" diff --git a/.claude/commands/speckit.clarify.md b/.claude/commands/speckit.clarify.md new file mode 100644 index 0000000..0678e92 --- /dev/null +++ b/.claude/commands/speckit.clarify.md @@ -0,0 +1,181 @@ +--- +description: Identify underspecified areas in the current feature spec by asking up to 5 highly targeted clarification questions and encoding answers back into the spec. +handoffs: + - label: Build Technical Plan + agent: speckit.plan + prompt: Create a plan for the spec. I am building with... +--- + +## User Input + +```text +$ARGUMENTS +``` + +You **MUST** consider the user input before proceeding (if not empty). + +## Outline + +Goal: Detect and reduce ambiguity or missing decision points in the active feature specification and record the clarifications directly in the spec file. + +Note: This clarification workflow is expected to run (and be completed) BEFORE invoking `/speckit.plan`. If the user explicitly states they are skipping clarification (e.g., exploratory spike), you may proceed, but must warn that downstream rework risk increases. + +Execution steps: + +1. Run `.specify/scripts/powershell/check-prerequisites.ps1 -Json -PathsOnly` from repo root **once** (combined `--json --paths-only` mode / `-Json -PathsOnly`). Parse minimal JSON payload fields: + - `FEATURE_DIR` + - `FEATURE_SPEC` + - (Optionally capture `IMPL_PLAN`, `TASKS` for future chained flows.) + - If JSON parsing fails, abort and instruct user to re-run `/speckit.specify` or verify feature branch environment. + - For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot"). + +2. Load the current spec file. Perform a structured ambiguity & coverage scan using this taxonomy. For each category, mark status: Clear / Partial / Missing. Produce an internal coverage map used for prioritization (do not output raw map unless no questions will be asked). + + Functional Scope & Behavior: + - Core user goals & success criteria + - Explicit out-of-scope declarations + - User roles / personas differentiation + + Domain & Data Model: + - Entities, attributes, relationships + - Identity & uniqueness rules + - Lifecycle/state transitions + - Data volume / scale assumptions + + Interaction & UX Flow: + - Critical user journeys / sequences + - Error/empty/loading states + - Accessibility or localization notes + + Non-Functional Quality Attributes: + - Performance (latency, throughput targets) + - Scalability (horizontal/vertical, limits) + - Reliability & availability (uptime, recovery expectations) + - Observability (logging, metrics, tracing signals) + - Security & privacy (authN/Z, data protection, threat assumptions) + - Compliance / regulatory constraints (if any) + + Integration & External Dependencies: + - External services/APIs and failure modes + - Data import/export formats + - Protocol/versioning assumptions + + Edge Cases & Failure Handling: + - Negative scenarios + - Rate limiting / throttling + - Conflict resolution (e.g., concurrent edits) + + Constraints & Tradeoffs: + - Technical constraints (language, storage, hosting) + - Explicit tradeoffs or rejected alternatives + + Terminology & Consistency: + - Canonical glossary terms + - Avoided synonyms / deprecated terms + + Completion Signals: + - Acceptance criteria testability + - Measurable Definition of Done style indicators + + Misc / Placeholders: + - TODO markers / unresolved decisions + - Ambiguous adjectives ("robust", "intuitive") lacking quantification + + For each category with Partial or Missing status, add a candidate question opportunity unless: + - Clarification would not materially change implementation or validation strategy + - Information is better deferred to planning phase (note internally) + +3. Generate (internally) a prioritized queue of candidate clarification questions (maximum 5). Do NOT output them all at once. Apply these constraints: + - Maximum of 10 total questions across the whole session. + - Each question must be answerable with EITHER: + - A short multiple‑choice selection (2–5 distinct, mutually exclusive options), OR + - A one-word / short‑phrase answer (explicitly constrain: "Answer in <=5 words"). + - Only include questions whose answers materially impact architecture, data modeling, task decomposition, test design, UX behavior, operational readiness, or compliance validation. + - Ensure category coverage balance: attempt to cover the highest impact unresolved categories first; avoid asking two low-impact questions when a single high-impact area (e.g., security posture) is unresolved. + - Exclude questions already answered, trivial stylistic preferences, or plan-level execution details (unless blocking correctness). + - Favor clarifications that reduce downstream rework risk or prevent misaligned acceptance tests. + - If more than 5 categories remain unresolved, select the top 5 by (Impact * Uncertainty) heuristic. + +4. Sequential questioning loop (interactive): + - Present EXACTLY ONE question at a time. + - For multiple‑choice questions: + - **Analyze all options** and determine the **most suitable option** based on: + - Best practices for the project type + - Common patterns in similar implementations + - Risk reduction (security, performance, maintainability) + - Alignment with any explicit project goals or constraints visible in the spec + - Present your **recommended option prominently** at the top with clear reasoning (1-2 sentences explaining why this is the best choice). + - Format as: `**Recommended:** Option [X] - ` + - Then render all options as a Markdown table: + + | Option | Description | + |--------|-------------| + | A |