diff --git a/AGENTS.md b/AGENTS.md index 7c8eb4a..6d74f64 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -15,7 +15,7 @@ The CLI is the core of the product. The extension is a thin UI layer that calls ## Core Concepts - **Perimeter**: the top-level policy boundary for a repository -- **File Rule**: a file, folder, or glob pattern protected by an access policy. Standard rules (any member) or guardian rules (guardian/admin only) +- **File Rule**: a file, folder, or glob pattern protected by an access policy. Standard rules (any member) or elevated rules (elevated/admin only) - **Pass**: a temporary access grant allowing an agent to write to a protected file. Configured with a duration - **Demarcation**: a registered declaration of what an agent is currently working on, visible to the team via CodeLens and the demarcations panel diff --git a/CLAUDE.md b/CLAUDE.md index d59ede8..c8264ce 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -15,7 +15,7 @@ The CLI is the core of the product. The extension is a thin UI layer that calls ## Core Concepts - **Perimeter**: the top-level policy boundary for a repository -- **File Rule**: a file, folder, or glob pattern protected by an access policy. Standard rules (any member) or guardian rules (guardian/admin only) +- **File Rule**: a file, folder, or glob pattern protected by an access policy. Standard rules (any member) or elevated rules (elevated/admin only) - **Pass**: a temporary access grant allowing an agent to write to a protected file. Configured with a duration - **Demarcation**: a registered declaration of what an agent is currently working on, visible to the team via CodeLens and the demarcations panel diff --git a/cli/cmd/command/add.go b/cli/cmd/command/add.go index 6883b02..079d250 100644 --- a/cli/cmd/command/add.go +++ b/cli/cmd/command/add.go @@ -97,8 +97,8 @@ func runCommandAdd(cmd *cobra.Command, args []string) error { if r.RuleType == "allow" { ruleLabel = "allow command rule" } - if r.RuleAuthority == "guardian" { - ruleLabel += " (guardian)" + if r.RuleAuthority == "elevated" { + ruleLabel += " (elevated)" } fmt.Printf("added %s: %s\n", ruleLabel, r.Pattern) return nil diff --git a/cli/cmd/file/add.go b/cli/cmd/file/add.go index 2c9e1f0..a54f6e7 100644 --- a/cli/cmd/file/add.go +++ b/cli/cmd/file/add.go @@ -14,7 +14,7 @@ import ( ) var ( - guardian bool + elevated bool preventRead bool allow bool ) @@ -30,6 +30,7 @@ var addCmd = &cobra.Command{ func init() { addCmd.Flags().BoolVar(&preventRead, "prevent-read", false, "Also block agent read access (e.g. for credential files)") addCmd.Flags().BoolVar(&allow, "allow", false, "Create an allow file rule (permits access, overrides deny rules)") + addCmd.Flags().BoolVar(&elevated, "elevated", false, "Create an elevated-authority rule (requires elevated/admin permissions to remove)") } type fileAddResult struct { @@ -71,8 +72,8 @@ func runFileAdd(cmd *cobra.Command, args []string) error { fileAccess = "allow" } fileAuthority := "standard" - if guardian { - fileAuthority = "guardian" + if elevated { + fileAuthority = "elevated" } user := store.CurrentOSUser() @@ -127,8 +128,8 @@ func runFileAdd(cmd *cobra.Command, args []string) error { if f.FileType == "allow" { ruleLabel = "allow rule" } - if f.FileAuthority == "guardian" { - ruleLabel += " (guardian)" + if f.FileAuthority == "elevated" { + ruleLabel += " (elevated)" } readLabel := "" if f.PreventRead { diff --git a/cli/cmd/hook.go b/cli/cmd/hook.go index 23c1d7b..bfd90c5 100644 --- a/cli/cmd/hook.go +++ b/cli/cmd/hook.go @@ -57,57 +57,59 @@ var hookCmd = &cobra.Command{ // fails open — it returns (true, "") so the hook allows the write and logs the // failure. This matches Cordon's fail-open design principle. func buildPolicyChecker() hook.PolicyChecker { - return func(filePath, cwd string) (allowed bool, passID string) { + return func(filePath, cwd string) (allowed bool, passID string, notify bool) { absRoot, err := resolveRepoRoot(cwd) if err != nil { fmt.Fprintf(os.Stderr, "cordon: policy check: resolve repo root: %v\n", err) - return true, "" // fail-open + return true, "", false // fail-open } policyDB, err := store.OpenPolicyDB(absRoot) if err != nil { fmt.Fprintf(os.Stderr, "cordon: policy check: open policy db: %v\n", err) - return true, "" // fail-open + return true, "", false // fail-open } defer policyDB.Close() if err := store.MigratePolicyDB(policyDB); err != nil { fmt.Fprintf(os.Stderr, "cordon: policy check: migrate policy db: %v\n", err) - return true, "" // fail-open + return true, "", false // fail-open } rule, err := store.FileRuleForPath(policyDB, filePath, absRoot) if err != nil { fmt.Fprintf(os.Stderr, "cordon: policy check: file rule lookup: %v\n", err) - return true, "" // fail-open + return true, "", false // fail-open } if rule == nil { // File is not covered by any file rule — allow. - return true, "" + return true, "", false } + notify = rule.Notify + // File is covered by a file rule. Check for an active pass in the data database. dataDB, err := store.OpenDataDB(absRoot) if err != nil { fmt.Fprintf(os.Stderr, "cordon: policy check: open data db: %v\n", err) - return false, "" // has file rule, data DB unavailable — deny + return false, "", notify // has file rule, data DB unavailable — deny } defer dataDB.Close() if err := store.MigrateDataDB(dataDB); err != nil { fmt.Fprintf(os.Stderr, "cordon: policy check: migrate data db: %v\n", err) - return false, "" // has file rule, data DB unavailable — deny + return false, "", notify // has file rule, data DB unavailable — deny } pass, err := store.ActivePassForPath(dataDB, filePath, absRoot) if err != nil { fmt.Fprintf(os.Stderr, "cordon: policy check: pass lookup: %v\n", err) - return false, "" // has file rule, pass lookup failed — deny + return false, "", notify // has file rule, pass lookup failed — deny } if pass == nil { - return false, "" // has file rule, no active pass — deny + return false, "", notify // has file rule, no active pass — deny } - return true, pass.ID // has file rule, active pass — allow + return true, pass.ID, notify // has file rule, active pass — allow } } @@ -116,43 +118,45 @@ func buildPolicyChecker() hook.PolicyChecker { // // Fails open on any infrastructure error. func buildReadChecker() hook.ReadChecker { - return func(filePath, cwd string) (allowed bool, passID string) { + return func(filePath, cwd string) (allowed bool, passID string, notify bool) { absRoot, err := resolveRepoRoot(cwd) if err != nil { - return true, "" // fail-open + return true, "", false // fail-open } policyDB, err := store.OpenPolicyDB(absRoot) if err != nil { - return true, "" // fail-open + return true, "", false // fail-open } defer policyDB.Close() if err := store.MigratePolicyDB(policyDB); err != nil { - return true, "" // fail-open + return true, "", false // fail-open } rule, err := store.FileRuleForPath(policyDB, filePath, absRoot) if err != nil || rule == nil || !rule.PreventRead { - return true, "" // fail-open or not in a prevent-read file rule + return true, "", false // fail-open or not in a prevent-read file rule } + notify = rule.Notify + // File is in a prevent-read file rule. Check for an active pass. dataDB, err := store.OpenDataDB(absRoot) if err != nil { - return false, "" // has file rule, data DB unavailable — deny + return false, "", notify // has file rule, data DB unavailable — deny } defer dataDB.Close() if err := store.MigrateDataDB(dataDB); err != nil { - return false, "" // has file rule, data DB unavailable — deny + return false, "", notify // has file rule, data DB unavailable — deny } pass, err := store.ActivePassForPath(dataDB, filePath, absRoot) if err != nil || pass == nil { - return false, "" // has file rule, no active pass — deny + return false, "", notify // has file rule, no active pass — deny } - return true, pass.ID + return true, pass.ID, notify } } @@ -162,32 +166,34 @@ func buildReadChecker() hook.ReadChecker { // // Fails open on any infrastructure error. func buildCommandChecker() hook.CommandChecker { - return func(command, cwd string) (allowed bool, matched *hook.MatchedRule) { + return func(command, cwd string) (allowed bool, matched *hook.MatchedRule, notify bool) { absRoot, err := resolveRepoRoot(cwd) if err != nil { - return true, nil // fail-open + return true, nil, false // fail-open } policyDB, err := store.OpenPolicyDB(absRoot) if err != nil { - return true, nil // fail-open + return true, nil, false // fail-open } defer policyDB.Close() if err := store.MigratePolicyDB(policyDB); err != nil { - return true, nil // fail-open + return true, nil, false // fail-open } rule, err := store.MatchCommandRule(policyDB, command) if err != nil || rule == nil { - return true, nil // fail-open or no match + return true, nil, false // fail-open or no match } + notify = rule.Notify + return false, &hook.MatchedRule{ Pattern: rule.Pattern, RuleType: rule.RuleType, RuleAuthority: rule.RuleAuthority, - } + }, notify } } @@ -221,6 +227,7 @@ func logHookEvent(event *hook.Event) { OSUser: store.CurrentOSUser(), Agent: hookAgent, PassID: event.PassID, + Notify: event.Notify, } if err := store.InsertHookLog(db, entry); err != nil { diff --git a/cli/internal/codexpolicy/codexpolicy.go b/cli/internal/codexpolicy/codexpolicy.go index 0c65690..6750a23 100644 --- a/cli/internal/codexpolicy/codexpolicy.go +++ b/cli/internal/codexpolicy/codexpolicy.go @@ -70,8 +70,8 @@ func buildContent(rules []store.FileRule) string { continue // allow rules permit access; omit from deny list } label := "" - if f.FileAuthority == "guardian" { - label = " *(guardian rule — requires guardian/admin pass)*" + if f.FileAuthority == "elevated" { + label = " *(elevated rule — requires elevated/admin pass)*" } fmt.Fprintf(&b, "- `%s`%s\n", f.Pattern, label) } diff --git a/cli/internal/hook/commandrule.go b/cli/internal/hook/commandrule.go index c2f470d..b8882df 100644 --- a/cli/internal/hook/commandrule.go +++ b/cli/internal/hook/commandrule.go @@ -9,7 +9,7 @@ import ( type MatchedRule struct { Pattern string RuleType string // "deny" or "allow" - RuleAuthority string // "standard" or "guardian" + RuleAuthority string // "standard" or "elevated" } // CommandChecker checks whether a bash command segment is allowed by command rules. @@ -18,11 +18,11 @@ type MatchedRule struct { // cwd is the agent working directory used to locate the policy database. // // Return values: -// - true, nil — command is allowed -// - false, rule — command is blocked; rule describes the matching rule +// - true, nil, false — command is allowed +// - false, rule, notify — command is blocked; rule describes the matching rule // // A nil CommandChecker allows all commands (fail-open). -type CommandChecker func(command, cwd string) (allowed bool, matched *MatchedRule) +type CommandChecker func(command, cwd string) (allowed bool, matched *MatchedRule, notify bool) // builtinRule is a command rule compiled into the binary. type builtinRule struct { @@ -59,7 +59,7 @@ func CheckBuiltinRules(command string) *MatchedRule { firstDeny = &MatchedRule{ Pattern: r.Pattern, RuleType: "deny", - RuleAuthority: "guardian", + RuleAuthority: "elevated", } } } diff --git a/cli/internal/hook/hook.go b/cli/internal/hook/hook.go index 4838ea1..d14698e 100644 --- a/cli/internal/hook/hook.go +++ b/cli/internal/hook/hook.go @@ -32,12 +32,13 @@ const ( // - allowed=true, passID="" — file is not covered by any file rule (allow) // - allowed=true, passID="…" — file is covered by a file rule and has an active pass (allow) // - allowed=false, passID="" — file is covered by a file rule with no active pass (deny) +// - notify=true — the matched rule has notification flags set // // On infrastructure errors (DB unreadable, etc.) the checker should return -// (true, "") to fail-open per Cordon's fail-open policy. +// (true, "", false) to fail-open per Cordon's fail-open policy. // // A nil PolicyChecker causes all writes to be allowed (fail-open). -type PolicyChecker func(filePath, cwd string) (allowed bool, passID string) +type PolicyChecker func(filePath, cwd string) (allowed bool, passID string, notify bool) // Event is returned by Evaluate for every tool invocation (writing or not). // It carries all fields needed for audit logging. @@ -48,6 +49,7 @@ type Event struct { Decision Decision PassID string // non-empty if write was allowed via an active pass Cwd string // cwd from the hook payload; used by the logger for DB path discovery + Notify bool // rule had notification flags — triggers immediate background sync } // ReadChecker checks whether a read of filePath from a prevent-read file rule @@ -56,9 +58,10 @@ type Event struct { // Return values: // - allowed=true — file is not in a prevent-read file rule, or a pass is active // - allowed=false — file is in a prevent-read file rule with no active pass +// - notify=true — the matched rule has notification flags set // // A nil ReadChecker allows all reads (fail-open). -type ReadChecker func(filePath, cwd string) (allowed bool, passID string) +type ReadChecker func(filePath, cwd string) (allowed bool, passID string, notify bool) // writingTools is the set of tool names that constitute write operations and // are subject to file rule enforcement. Non-writing tools are always allowed @@ -212,7 +215,7 @@ func Evaluate(r io.Reader, w io.Writer, errW io.Writer, checker PolicyChecker, r // Reading tools: check against prevent-read file rules. if readingTools[payload.ToolName] { - allowed, readPassID := checkRead(rdChecker, filePath, payload.Cwd) + allowed, readPassID, notify := checkRead(rdChecker, filePath, payload.Cwd) if !allowed { event := &Event{ ToolName: payload.ToolName, @@ -220,6 +223,7 @@ func Evaluate(r io.Reader, w io.Writer, errW io.Writer, checker PolicyChecker, r ToolInput: payload.ToolInput, Decision: DecisionDeny, Cwd: payload.Cwd, + Notify: notify, } if err := writeDeny(w, errW, payload.ToolName, filePath); err != nil { return nil, err @@ -233,6 +237,7 @@ func Evaluate(r io.Reader, w io.Writer, errW io.Writer, checker PolicyChecker, r Decision: DecisionAllow, PassID: readPassID, Cwd: payload.Cwd, + Notify: notify, }, nil } @@ -248,7 +253,7 @@ func Evaluate(r io.Reader, w io.Writer, errW io.Writer, checker PolicyChecker, r } // Check the file against the policy database (file rules + passes). - allowed, passID := checkPolicy(checker, filePath, payload.Cwd) + allowed, passID, notify := checkPolicy(checker, filePath, payload.Cwd) if allowed { return &Event{ @@ -258,6 +263,7 @@ func Evaluate(r io.Reader, w io.Writer, errW io.Writer, checker PolicyChecker, r Decision: DecisionAllow, PassID: passID, Cwd: payload.Cwd, + Notify: notify, }, nil } @@ -267,6 +273,7 @@ func Evaluate(r io.Reader, w io.Writer, errW io.Writer, checker PolicyChecker, r ToolInput: payload.ToolInput, Decision: DecisionDeny, Cwd: payload.Cwd, + Notify: notify, } if err := writeDeny(w, errW, payload.ToolName, filePath); err != nil { return nil, err @@ -301,13 +308,14 @@ func evaluateBash(payload hookPayload, w io.Writer, errW io.Writer, checker Poli // Custom rules from the policy database. if cmdChecker != nil { - if allowed, matched := cmdChecker(seg, payload.Cwd); !allowed && matched != nil { + if allowed, matched, cmdNotify := cmdChecker(seg, payload.Cwd); !allowed && matched != nil { reason := commandRuleDenyReason(matched) event := &Event{ ToolName: payload.ToolName, ToolInput: payload.ToolInput, Decision: DecisionDeny, Cwd: payload.Cwd, + Notify: cmdNotify, } if err := encodeClaudeDeny(w, reason); err != nil { return nil, err @@ -321,7 +329,7 @@ func evaluateBash(payload hookPayload, w io.Writer, errW io.Writer, checker Poli // Check read targets against prevent-read file rules. readTargets := bashReadTargets(command) for _, target := range readTargets { - allowed, _ := checkRead(rdChecker, target, payload.Cwd) + allowed, _, rdNotify := checkRead(rdChecker, target, payload.Cwd) if !allowed { event := &Event{ ToolName: payload.ToolName, @@ -329,6 +337,7 @@ func evaluateBash(payload hookPayload, w io.Writer, errW io.Writer, checker Poli ToolInput: payload.ToolInput, Decision: DecisionDeny, Cwd: payload.Cwd, + Notify: rdNotify, } reason := readDenyReason(target) if err := encodeClaudeDeny(w, reason); err != nil { @@ -355,7 +364,7 @@ func evaluateBash(payload hookPayload, w io.Writer, errW io.Writer, checker Poli // Check each target against the policy database. Deny if any target is // covered by a file rule without an active pass. We deny on the first violation found. for _, target := range targets { - allowed, _ := checkPolicy(checker, target, payload.Cwd) + allowed, _, pNotify := checkPolicy(checker, target, payload.Cwd) if !allowed { primaryTarget := targets[0] event := &Event{ @@ -364,6 +373,7 @@ func evaluateBash(payload hookPayload, w io.Writer, errW io.Writer, checker Poli ToolInput: payload.ToolInput, Decision: DecisionDeny, Cwd: payload.Cwd, + Notify: pNotify, } if err := writeBashDeny(w, errW, primaryTarget, targets); err != nil { return nil, err @@ -399,7 +409,7 @@ func evaluateApplyPatch(payload hookPayload, w io.Writer, errW io.Writer, checke } for _, target := range targets { - allowed, _ := checkPolicy(checker, target, payload.Cwd) + allowed, _, pNotify := checkPolicy(checker, target, payload.Cwd) if !allowed { event := &Event{ ToolName: payload.ToolName, @@ -407,6 +417,7 @@ func evaluateApplyPatch(payload hookPayload, w io.Writer, errW io.Writer, checke ToolInput: payload.ToolInput, Decision: DecisionDeny, Cwd: payload.Cwd, + Notify: pNotify, } if err := writeDeny(w, errW, payload.ToolName, target); err != nil { return nil, err @@ -459,20 +470,20 @@ func patchFileTargets(toolInput json.RawMessage) []string { return targets } -// checkPolicy calls the checker if non-nil, returning (true, "") as the +// checkPolicy calls the checker if non-nil, returning (true, "", false) as the // fail-open default when checker is nil. -func checkPolicy(checker PolicyChecker, filePath, cwd string) (allowed bool, passID string) { +func checkPolicy(checker PolicyChecker, filePath, cwd string) (allowed bool, passID string, notify bool) { if checker == nil { - return true, "" + return true, "", false } return checker(filePath, cwd) } -// checkRead calls the ReadChecker if non-nil, returning (true, "") as the +// checkRead calls the ReadChecker if non-nil, returning (true, "", false) as the // fail-open default when rdChecker is nil. -func checkRead(rdChecker ReadChecker, filePath, cwd string) (allowed bool, passID string) { +func checkRead(rdChecker ReadChecker, filePath, cwd string) (allowed bool, passID string, notify bool) { if rdChecker == nil { - return true, "" + return true, "", false } return rdChecker(filePath, cwd) } diff --git a/cli/internal/store/audit.go b/cli/internal/store/audit.go index 4e51fee..f56c4c2 100644 --- a/cli/internal/store/audit.go +++ b/cli/internal/store/audit.go @@ -18,20 +18,37 @@ type AuditEntry struct { Agent string // agent platform identifier for hook events Detail string // additional context (deny reason, etc.) Timestamp string // ISO 8601; auto-set to now if empty + ParentHash string // hash of previous audit_log entry + Hash string // SHA-256 hash for tamper evidence } // InsertAudit appends a structured event to the audit_log table. // If e.Timestamp is empty, the current UTC time is used. +// The hash chain is computed automatically from the previous entry. func InsertAudit(db *sql.DB, e AuditEntry) error { if e.Timestamp == "" { e.Timestamp = time.Now().UTC().Format(time.RFC3339) } - _, err := db.Exec( + + // Read the hash of the most recent entry for chain linkage. + var parentHash string + err := db.QueryRow("SELECT hash FROM audit_log ORDER BY id DESC LIMIT 1").Scan(&parentHash) + if err != nil && err != sql.ErrNoRows { + return fmt.Errorf("store: read audit_log parent hash: %w", err) + } + + e.ParentHash = parentHash + e.Hash = computeDataHash( + e.EventType, e.FilePath, e.User, e.Agent, + e.Detail, e.Timestamp, parentHash, + ) + + _, err = db.Exec( `INSERT INTO audit_log - (event_type, tool_name, file_path, file_rule_id, pass_id, user, agent, detail, timestamp) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`, + (event_type, tool_name, file_path, file_rule_id, pass_id, user, agent, detail, timestamp, parent_hash, hash) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, e.EventType, e.ToolName, e.FilePath, e.FileRuleID, e.PassID, - e.User, e.Agent, e.Detail, e.Timestamp, + e.User, e.Agent, e.Detail, e.Timestamp, e.ParentHash, e.Hash, ) if err != nil { return fmt.Errorf("store: insert audit: %w", err) diff --git a/cli/internal/store/events.go b/cli/internal/store/events.go new file mode 100644 index 0000000..4c00c38 --- /dev/null +++ b/cli/internal/store/events.go @@ -0,0 +1,648 @@ +package store + +import ( + "crypto/sha256" + "database/sql" + "encoding/json" + "fmt" + "sort" + "time" +) + +// PolicyEvent is an immutable record of a policy mutation. +type PolicyEvent struct { + Seq int64 // local auto-increment + EventID string // UUID v4 + EventType string // "file_rule.added", "file_rule.removed", etc. + Payload string // JSON blob + Actor string // GitHub username or OS username + Timestamp string // ISO 8601 + ParentHash string // hash of previous event + Hash string // SHA-256 of this event's fields + ServerSeq *int64 // nil until server acknowledges +} + +// computeHash computes the SHA-256 hash for an event given its fields and parent hash. +func computeHash(eventID, eventType, payload, actor, timestamp, parentHash string) string { + data := eventID + "|" + eventType + "|" + payload + "|" + actor + "|" + timestamp + "|" + parentHash + h := sha256.Sum256([]byte(data)) + return fmt.Sprintf("%x", h[:]) +} + +// AppendEvent writes a policy event and applies it to the projection tables +// in a single transaction. Returns the written event with seq assigned. +func AppendEvent(db *sql.DB, eventType, payload, actor string) (*PolicyEvent, error) { + tx, err := db.Begin() + if err != nil { + return nil, fmt.Errorf("store: begin event tx: %w", err) + } + defer tx.Rollback() + + ev, err := appendEventTx(tx, eventType, payload, actor, true) + if err != nil { + return nil, err + } + + if err := tx.Commit(); err != nil { + return nil, fmt.Errorf("store: commit event tx: %w", err) + } + return ev, nil +} + +// appendEventTx is the internal version that works within an existing transaction. +// If applyProjection is true, it also applies the event to the projection tables. +func appendEventTx(tx *sql.Tx, eventType, payload, actor string, applyProjection bool) (*PolicyEvent, error) { + // Read latest hash for parent_hash. + var parentHash string + err := tx.QueryRow("SELECT hash FROM policy_events ORDER BY seq DESC LIMIT 1").Scan(&parentHash) + if err != nil && err != sql.ErrNoRows { + return nil, fmt.Errorf("store: read latest hash: %w", err) + } + + eventID, err := newUUID() + if err != nil { + return nil, fmt.Errorf("store: generate event id: %w", err) + } + + timestamp := time.Now().UTC().Format(time.RFC3339) + hash := computeHash(eventID, eventType, payload, actor, timestamp, parentHash) + + ev := &PolicyEvent{ + EventID: eventID, + EventType: eventType, + Payload: payload, + Actor: actor, + Timestamp: timestamp, + ParentHash: parentHash, + Hash: hash, + } + + res, err := tx.Exec( + `INSERT INTO policy_events (event_id, event_type, payload, actor, timestamp, parent_hash, hash) + VALUES (?, ?, ?, ?, ?, ?, ?)`, + ev.EventID, ev.EventType, ev.Payload, ev.Actor, ev.Timestamp, ev.ParentHash, ev.Hash, + ) + if err != nil { + return nil, fmt.Errorf("store: insert event: %w", err) + } + + seq, err := res.LastInsertId() + if err != nil { + return nil, fmt.Errorf("store: get event seq: %w", err) + } + ev.Seq = seq + + if applyProjection { + if err := applyEventToProjection(tx, ev); err != nil { + return nil, err + } + } + + return ev, nil +} + +// applyEventToProjection applies a single event to the projection tables within a transaction. +func applyEventToProjection(tx *sql.Tx, ev *PolicyEvent) error { + switch ev.EventType { + case "file_rule.added": + return applyFileRuleAdded(tx, ev.Payload) + case "file_rule.removed": + return applyFileRuleRemoved(tx, ev.Payload) + case "file_rule.updated": + return applyFileRuleUpdated(tx, ev.Payload) + case "command_rule.added": + return applyCommandRuleAdded(tx, ev.Payload) + case "command_rule.removed": + return applyCommandRuleRemoved(tx, ev.Payload) + case "command_rule.updated": + return applyCommandRuleUpdated(tx, ev.Payload) + default: + // Unknown event types are silently ignored for forward compatibility. + return nil + } +} + +func applyFileRuleAdded(tx *sql.Tx, payload string) error { + var p struct { + ID string `json:"id"` + Pattern string `json:"pattern"` + FileAccess string `json:"file_access"` + FileAuthority string `json:"file_authority"` + PreventWrite bool `json:"prevent_write"` + PreventRead bool `json:"prevent_read"` + CreatedBy string `json:"created_by"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Notify bool `json:"notify"` + } + if err := json.Unmarshal([]byte(payload), &p); err != nil { + return fmt.Errorf("store: unmarshal file_rule.added: %w", err) + } + now := time.Now().UTC().Format(time.RFC3339) + if p.CreatedAt == "" { + p.CreatedAt = now + } + if p.UpdatedAt == "" { + p.UpdatedAt = now + } + _, err := tx.Exec( + `INSERT INTO file_rules (id, pattern, file_access, file_authority, prevent_write, prevent_read, created_by, created_at, updated_at, notify) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, + p.ID, p.Pattern, p.FileAccess, p.FileAuthority, p.PreventWrite, p.PreventRead, p.CreatedBy, p.CreatedAt, p.UpdatedAt, p.Notify, + ) + return err +} + +func applyFileRuleRemoved(tx *sql.Tx, payload string) error { + var p struct { + ID string `json:"id"` + } + if err := json.Unmarshal([]byte(payload), &p); err != nil { + return fmt.Errorf("store: unmarshal file_rule.removed: %w", err) + } + _, err := tx.Exec(`DELETE FROM file_rules WHERE id = ?`, p.ID) + return err +} + +func applyFileRuleUpdated(tx *sql.Tx, payload string) error { + var p map[string]interface{} + if err := json.Unmarshal([]byte(payload), &p); err != nil { + return fmt.Errorf("store: unmarshal file_rule.updated: %w", err) + } + id, ok := p["id"].(string) + if !ok { + return fmt.Errorf("store: file_rule.updated missing id") + } + now := time.Now().UTC().Format(time.RFC3339) + for k, v := range p { + if k == "id" || k == "pattern" { + continue + } + col := k + _, err := tx.Exec(fmt.Sprintf(`UPDATE file_rules SET %s = ?, updated_at = ? WHERE id = ?`, col), v, now, id) + if err != nil { + return fmt.Errorf("store: update file_rules.%s: %w", col, err) + } + } + return nil +} + +func applyCommandRuleAdded(tx *sql.Tx, payload string) error { + var p struct { + ID string `json:"id"` + Pattern string `json:"pattern"` + RuleAccess string `json:"rule_access"` + RuleAuthority string `json:"rule_authority"` + CreatedBy string `json:"created_by"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Notify bool `json:"notify"` + } + if err := json.Unmarshal([]byte(payload), &p); err != nil { + return fmt.Errorf("store: unmarshal command_rule.added: %w", err) + } + now := time.Now().UTC().Format(time.RFC3339) + if p.CreatedAt == "" { + p.CreatedAt = now + } + if p.UpdatedAt == "" { + p.UpdatedAt = now + } + _, err := tx.Exec( + `INSERT INTO command_rules (id, pattern, rule_access, rule_authority, created_by, created_at, updated_at, notify) + VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, + p.ID, p.Pattern, p.RuleAccess, p.RuleAuthority, p.CreatedBy, p.CreatedAt, p.UpdatedAt, p.Notify, + ) + return err +} + +func applyCommandRuleRemoved(tx *sql.Tx, payload string) error { + var p struct { + ID string `json:"id"` + } + if err := json.Unmarshal([]byte(payload), &p); err != nil { + return fmt.Errorf("store: unmarshal command_rule.removed: %w", err) + } + _, err := tx.Exec(`DELETE FROM command_rules WHERE id = ?`, p.ID) + return err +} + +func applyCommandRuleUpdated(tx *sql.Tx, payload string) error { + var p map[string]interface{} + if err := json.Unmarshal([]byte(payload), &p); err != nil { + return fmt.Errorf("store: unmarshal command_rule.updated: %w", err) + } + id, ok := p["id"].(string) + if !ok { + return fmt.Errorf("store: command_rule.updated missing id") + } + now := time.Now().UTC().Format(time.RFC3339) + for k, v := range p { + if k == "id" || k == "pattern" { + continue + } + col := k + _, err := tx.Exec(fmt.Sprintf(`UPDATE command_rules SET %s = ?, updated_at = ? WHERE id = ?`, col), v, now, id) + if err != nil { + return fmt.Errorf("store: update command_rules.%s: %w", col, err) + } + } + return nil +} + +// ReplayEvents rebuilds file_rules and command_rules from the full event log. +// Called after sync pull or during migration. Runs in a single transaction. +func ReplayEvents(db *sql.DB) error { + tx, err := db.Begin() + if err != nil { + return fmt.Errorf("store: begin replay tx: %w", err) + } + defer tx.Rollback() + + if _, err := tx.Exec(`DELETE FROM file_rules`); err != nil { + return fmt.Errorf("store: clear file_rules: %w", err) + } + if _, err := tx.Exec(`DELETE FROM command_rules`); err != nil { + return fmt.Errorf("store: clear command_rules: %w", err) + } + + rows, err := tx.Query(`SELECT seq, event_id, event_type, payload, actor, timestamp, parent_hash, hash, server_seq + FROM policy_events ORDER BY seq ASC`) + if err != nil { + return fmt.Errorf("store: query events for replay: %w", err) + } + defer rows.Close() + + for rows.Next() { + var ev PolicyEvent + if err := rows.Scan(&ev.Seq, &ev.EventID, &ev.EventType, &ev.Payload, &ev.Actor, + &ev.Timestamp, &ev.ParentHash, &ev.Hash, &ev.ServerSeq); err != nil { + return fmt.Errorf("store: scan event: %w", err) + } + if err := applyEventToProjectionReplay(tx, &ev); err != nil { + return fmt.Errorf("store: apply event seq=%d: %w", ev.Seq, err) + } + } + if err := rows.Err(); err != nil { + return fmt.Errorf("store: iterate events: %w", err) + } + + return tx.Commit() +} + +// applyEventToProjectionReplay applies an event during replay, using INSERT OR REPLACE +// to handle duplicate patterns that can arise from concurrent remote additions. +func applyEventToProjectionReplay(tx *sql.Tx, ev *PolicyEvent) error { + switch ev.EventType { + case "file_rule.added": + return applyFileRuleAddedReplay(tx, ev.Payload) + case "file_rule.removed": + return applyFileRuleRemoved(tx, ev.Payload) + case "file_rule.updated": + return applyFileRuleUpdated(tx, ev.Payload) + case "command_rule.added": + return applyCommandRuleAddedReplay(tx, ev.Payload) + case "command_rule.removed": + return applyCommandRuleRemoved(tx, ev.Payload) + case "command_rule.updated": + return applyCommandRuleUpdated(tx, ev.Payload) + default: + return nil + } +} + +func applyFileRuleAddedReplay(tx *sql.Tx, payload string) error { + var p struct { + ID string `json:"id"` + Pattern string `json:"pattern"` + FileAccess string `json:"file_access"` + FileAuthority string `json:"file_authority"` + PreventWrite bool `json:"prevent_write"` + PreventRead bool `json:"prevent_read"` + CreatedBy string `json:"created_by"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Notify bool `json:"notify"` + } + if err := json.Unmarshal([]byte(payload), &p); err != nil { + return fmt.Errorf("store: unmarshal file_rule.added replay: %w", err) + } + now := time.Now().UTC().Format(time.RFC3339) + if p.CreatedAt == "" { + p.CreatedAt = now + } + if p.UpdatedAt == "" { + p.UpdatedAt = now + } + _, err := tx.Exec( + `INSERT OR REPLACE INTO file_rules (id, pattern, file_access, file_authority, prevent_write, prevent_read, created_by, created_at, updated_at, notify) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, + p.ID, p.Pattern, p.FileAccess, p.FileAuthority, p.PreventWrite, p.PreventRead, p.CreatedBy, p.CreatedAt, p.UpdatedAt, p.Notify, + ) + return err +} + +func applyCommandRuleAddedReplay(tx *sql.Tx, payload string) error { + var p struct { + ID string `json:"id"` + Pattern string `json:"pattern"` + RuleAccess string `json:"rule_access"` + RuleAuthority string `json:"rule_authority"` + CreatedBy string `json:"created_by"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Notify bool `json:"notify"` + } + if err := json.Unmarshal([]byte(payload), &p); err != nil { + return fmt.Errorf("store: unmarshal command_rule.added replay: %w", err) + } + now := time.Now().UTC().Format(time.RFC3339) + if p.CreatedAt == "" { + p.CreatedAt = now + } + if p.UpdatedAt == "" { + p.UpdatedAt = now + } + _, err := tx.Exec( + `INSERT OR REPLACE INTO command_rules (id, pattern, rule_access, rule_authority, created_by, created_at, updated_at, notify) + VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, + p.ID, p.Pattern, p.RuleAccess, p.RuleAuthority, p.CreatedBy, p.CreatedAt, p.UpdatedAt, p.Notify, + ) + return err +} + +// ListUnpushedEvents returns all events where server_seq IS NULL, ordered by seq ASC. +func ListUnpushedEvents(db *sql.DB) ([]PolicyEvent, error) { + rows, err := db.Query( + `SELECT seq, event_id, event_type, payload, actor, timestamp, parent_hash, hash, server_seq + FROM policy_events WHERE server_seq IS NULL ORDER BY seq ASC`, + ) + if err != nil { + return nil, fmt.Errorf("store: list unpushed events: %w", err) + } + defer rows.Close() + return scanEvents(rows) +} + +// MarkEventsPushed updates server_seq for events that have been acknowledged by the server. +// assignments maps event_id -> server_seq. +func MarkEventsPushed(db *sql.DB, assignments map[string]int64) error { + tx, err := db.Begin() + if err != nil { + return fmt.Errorf("store: begin mark pushed tx: %w", err) + } + defer tx.Rollback() + + stmt, err := tx.Prepare(`UPDATE policy_events SET server_seq = ? WHERE event_id = ?`) + if err != nil { + return fmt.Errorf("store: prepare mark pushed: %w", err) + } + defer stmt.Close() + + for eventID, serverSeq := range assignments { + if _, err := stmt.Exec(serverSeq, eventID); err != nil { + return fmt.Errorf("store: mark event %s pushed: %w", eventID, err) + } + } + + return tx.Commit() +} + +// AppendRemoteEvents inserts events received from the server and rebuilds projections. +// Runs in a single transaction. +func AppendRemoteEvents(db *sql.DB, events []PolicyEvent) error { + tx, err := db.Begin() + if err != nil { + return fmt.Errorf("store: begin remote events tx: %w", err) + } + defer tx.Rollback() + + for _, ev := range events { + _, err := tx.Exec( + `INSERT INTO policy_events (event_id, event_type, payload, actor, timestamp, parent_hash, hash, server_seq) + VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, + ev.EventID, ev.EventType, ev.Payload, ev.Actor, ev.Timestamp, ev.ParentHash, ev.Hash, ev.ServerSeq, + ) + if err != nil { + return fmt.Errorf("store: insert remote event %s: %w", ev.EventID, err) + } + } + + // Rebuild projections from the full event log. + if _, err := tx.Exec(`DELETE FROM file_rules`); err != nil { + return fmt.Errorf("store: clear file_rules for rebuild: %w", err) + } + if _, err := tx.Exec(`DELETE FROM command_rules`); err != nil { + return fmt.Errorf("store: clear command_rules for rebuild: %w", err) + } + + rows, err := tx.Query(`SELECT seq, event_id, event_type, payload, actor, timestamp, parent_hash, hash, server_seq + FROM policy_events ORDER BY seq ASC`) + if err != nil { + return fmt.Errorf("store: query events for rebuild: %w", err) + } + defer rows.Close() + + for rows.Next() { + var ev PolicyEvent + if err := rows.Scan(&ev.Seq, &ev.EventID, &ev.EventType, &ev.Payload, &ev.Actor, + &ev.Timestamp, &ev.ParentHash, &ev.Hash, &ev.ServerSeq); err != nil { + return fmt.Errorf("store: scan event for rebuild: %w", err) + } + if err := applyEventToProjectionReplay(tx, &ev); err != nil { + return fmt.Errorf("store: apply remote event seq=%d: %w", ev.Seq, err) + } + } + if err := rows.Err(); err != nil { + return fmt.Errorf("store: iterate events for rebuild: %w", err) + } + + return tx.Commit() +} + +// LatestHash returns the hash of the most recent event, or "" if no events exist. +func LatestHash(db *sql.DB) (string, error) { + var hash string + err := db.QueryRow("SELECT hash FROM policy_events ORDER BY seq DESC LIMIT 1").Scan(&hash) + if err == sql.ErrNoRows { + return "", nil + } + if err != nil { + return "", fmt.Errorf("store: latest hash: %w", err) + } + return hash, nil +} + +// VerifyChain walks the full event log and verifies that every event's parent_hash +// matches the previous event's hash, and that each hash is correctly computed. +// Returns the seq of the first broken link, or 0 if the chain is valid. +func VerifyChain(db *sql.DB) (int64, error) { + rows, err := db.Query( + `SELECT seq, event_id, event_type, payload, actor, timestamp, parent_hash, hash + FROM policy_events ORDER BY seq ASC`, + ) + if err != nil { + return 0, fmt.Errorf("store: verify chain query: %w", err) + } + defer rows.Close() + + var prevHash string + for rows.Next() { + var ev PolicyEvent + if err := rows.Scan(&ev.Seq, &ev.EventID, &ev.EventType, &ev.Payload, &ev.Actor, + &ev.Timestamp, &ev.ParentHash, &ev.Hash); err != nil { + return 0, fmt.Errorf("store: verify chain scan: %w", err) + } + + // Check parent_hash linkage. + if ev.ParentHash != prevHash { + return ev.Seq, nil + } + + // Check hash computation. + expected := computeHash(ev.EventID, ev.EventType, ev.Payload, ev.Actor, ev.Timestamp, ev.ParentHash) + if ev.Hash != expected { + return ev.Seq, nil + } + + prevHash = ev.Hash + } + return 0, rows.Err() +} + +// scanEvents reads all rows from a policy_events query into a slice. +func scanEvents(rows *sql.Rows) ([]PolicyEvent, error) { + var events []PolicyEvent + for rows.Next() { + var ev PolicyEvent + if err := rows.Scan(&ev.Seq, &ev.EventID, &ev.EventType, &ev.Payload, &ev.Actor, + &ev.Timestamp, &ev.ParentHash, &ev.Hash, &ev.ServerSeq); err != nil { + return nil, fmt.Errorf("store: scan event: %w", err) + } + events = append(events, ev) + } + return events, rows.Err() +} + +// migrateExistingRulesToEvents generates synthetic genesis events for any +// pre-existing rules that have no corresponding events. This is called during +// MigratePolicyDB to handle the transition from state-based to event-sourced policy. +func migrateExistingRulesToEvents(db *sql.DB) error { + // Check if there are already events — if so, migration is not needed. + var eventCount int + if err := db.QueryRow("SELECT COUNT(*) FROM policy_events").Scan(&eventCount); err != nil { + return fmt.Errorf("store: count events for migration: %w", err) + } + if eventCount > 0 { + return nil + } + + // Check if there are any rules to migrate. + var ruleCount int + if err := db.QueryRow("SELECT (SELECT COUNT(*) FROM file_rules) + (SELECT COUNT(*) FROM command_rules)").Scan(&ruleCount); err != nil { + return fmt.Errorf("store: count rules for migration: %w", err) + } + if ruleCount == 0 { + return nil + } + + // Collect all rules with timestamps for ordering. + type migrationEntry struct { + eventType string + payload string + timestamp string + } + + var entries []migrationEntry + + // Read file rules. + fileRows, err := db.Query( + `SELECT id, pattern, file_access, file_authority, prevent_write, prevent_read, created_by, created_at + FROM file_rules ORDER BY created_at ASC`, + ) + if err != nil { + return fmt.Errorf("store: read file rules for migration: %w", err) + } + defer fileRows.Close() + + for fileRows.Next() { + var id, pattern, fileAccess, fileAuthority, createdBy, createdAt string + var preventWrite, preventRead int + if err := fileRows.Scan(&id, &pattern, &fileAccess, &fileAuthority, &preventWrite, &preventRead, &createdBy, &createdAt); err != nil { + return fmt.Errorf("store: scan file rule for migration: %w", err) + } + payload, _ := json.Marshal(map[string]interface{}{ + "id": id, + "pattern": pattern, + "file_access": fileAccess, + "file_authority": fileAuthority, + "prevent_write": preventWrite != 0, + "prevent_read": preventRead != 0, + "created_by": createdBy, + "created_at": createdAt, + "updated_at": createdAt, + }) + entries = append(entries, migrationEntry{ + eventType: "file_rule.added", + payload: string(payload), + timestamp: createdAt, + }) + } + if err := fileRows.Err(); err != nil { + return fmt.Errorf("store: iterate file rules for migration: %w", err) + } + + // Read command rules. + cmdRows, err := db.Query( + `SELECT id, pattern, rule_access, rule_authority, created_by, created_at + FROM command_rules ORDER BY created_at ASC`, + ) + if err != nil { + return fmt.Errorf("store: read command rules for migration: %w", err) + } + defer cmdRows.Close() + + for cmdRows.Next() { + var id, pattern, ruleAccess, ruleAuthority, createdBy, createdAt string + if err := cmdRows.Scan(&id, &pattern, &ruleAccess, &ruleAuthority, &createdBy, &createdAt); err != nil { + return fmt.Errorf("store: scan command rule for migration: %w", err) + } + payload, _ := json.Marshal(map[string]interface{}{ + "id": id, + "pattern": pattern, + "rule_access": ruleAccess, + "rule_authority": ruleAuthority, + "created_by": createdBy, + "created_at": createdAt, + "updated_at": createdAt, + }) + entries = append(entries, migrationEntry{ + eventType: "command_rule.added", + payload: string(payload), + timestamp: createdAt, + }) + } + if err := cmdRows.Err(); err != nil { + return fmt.Errorf("store: iterate command rules for migration: %w", err) + } + + // Sort by timestamp across both rule types. + sort.Slice(entries, func(i, j int) bool { + return entries[i].timestamp < entries[j].timestamp + }) + + // Write synthetic events (skip projection writes since projections already exist). + tx, err := db.Begin() + if err != nil { + return fmt.Errorf("store: begin migration tx: %w", err) + } + defer tx.Rollback() + + for _, entry := range entries { + if _, err := appendEventTx(tx, entry.eventType, entry.payload, "system", false); err != nil { + return fmt.Errorf("store: append migration event: %w", err) + } + } + + return tx.Commit() +} diff --git a/cli/internal/store/events_test.go b/cli/internal/store/events_test.go new file mode 100644 index 0000000..80f1937 --- /dev/null +++ b/cli/internal/store/events_test.go @@ -0,0 +1,476 @@ +package store + +import ( + "database/sql" + "encoding/json" + "testing" +) + +func TestComputeHash_Deterministic(t *testing.T) { + h1 := computeHash("id1", "file_rule.added", `{"pattern":".env"}`, "alice", "2024-01-01T00:00:00Z", "") + h2 := computeHash("id1", "file_rule.added", `{"pattern":".env"}`, "alice", "2024-01-01T00:00:00Z", "") + if h1 != h2 { + t.Errorf("same inputs produced different hashes: %s vs %s", h1, h2) + } + if len(h1) != 64 { + t.Errorf("hash length = %d, want 64", len(h1)) + } +} + +func TestComputeHash_DifferentInputs(t *testing.T) { + h1 := computeHash("id1", "file_rule.added", `{"pattern":".env"}`, "alice", "2024-01-01T00:00:00Z", "") + h2 := computeHash("id2", "file_rule.added", `{"pattern":".env"}`, "alice", "2024-01-01T00:00:00Z", "") + if h1 == h2 { + t.Error("different event_ids should produce different hashes") + } +} + +func TestAppendEvent(t *testing.T) { + db := newTestPolicyDB(t) + + payload, _ := json.Marshal(map[string]interface{}{ + "id": "rule-1", + "pattern": ".env", + "file_access": "deny", + "file_authority": "standard", + "prevent_write": true, + "prevent_read": false, + "created_by": "test", + }) + + ev, err := AppendEvent(db, "file_rule.added", string(payload), "test") + if err != nil { + t.Fatal(err) + } + + if ev.Seq == 0 { + t.Error("expected seq > 0") + } + if ev.EventID == "" { + t.Error("expected non-empty event_id") + } + if ev.ParentHash != "" { + t.Errorf("first event should have empty parent_hash, got %q", ev.ParentHash) + } + if ev.Hash == "" { + t.Error("expected non-empty hash") + } + + // Verify the projection was updated. + rules, err := ListFileRules(db) + if err != nil { + t.Fatal(err) + } + if len(rules) != 1 { + t.Fatalf("expected 1 file rule, got %d", len(rules)) + } + if rules[0].Pattern != ".env" { + t.Errorf("pattern = %q, want .env", rules[0].Pattern) + } +} + +func TestAppendMultipleEvents_HashChain(t *testing.T) { + db := newTestPolicyDB(t) + + p1, _ := json.Marshal(map[string]interface{}{ + "id": "r1", "pattern": ".env", "file_access": "deny", + "file_authority": "standard", "prevent_write": true, + "prevent_read": false, "created_by": "test", + }) + ev1, err := AppendEvent(db, "file_rule.added", string(p1), "test") + if err != nil { + t.Fatal(err) + } + + p2, _ := json.Marshal(map[string]interface{}{ + "id": "r2", "pattern": "*.pem", "file_access": "deny", + "file_authority": "standard", "prevent_write": true, + "prevent_read": true, "created_by": "test", + }) + ev2, err := AppendEvent(db, "file_rule.added", string(p2), "test") + if err != nil { + t.Fatal(err) + } + + if ev2.ParentHash != ev1.Hash { + t.Errorf("ev2.ParentHash = %q, want %q (ev1.Hash)", ev2.ParentHash, ev1.Hash) + } + + // Verify chain is valid. + broken, err := VerifyChain(db) + if err != nil { + t.Fatal(err) + } + if broken != 0 { + t.Errorf("chain broken at seq %d, expected valid", broken) + } +} + +func TestReplayEvents(t *testing.T) { + db := newTestPolicyDB(t) + + // Add rules via events. + p1, _ := json.Marshal(map[string]interface{}{ + "id": "r1", "pattern": ".env", "file_access": "deny", + "file_authority": "standard", "prevent_write": true, + "prevent_read": false, "created_by": "test", + }) + AppendEvent(db, "file_rule.added", string(p1), "test") + + p2, _ := json.Marshal(map[string]interface{}{ + "id": "c1", "pattern": "rm -rf /*", "rule_access": "deny", + "rule_authority": "standard", "created_by": "test", + }) + AppendEvent(db, "command_rule.added", string(p2), "test") + + // Clear projections manually. + db.Exec("DELETE FROM file_rules") + db.Exec("DELETE FROM command_rules") + + // Replay should restore them. + if err := ReplayEvents(db); err != nil { + t.Fatal(err) + } + + rules, _ := ListFileRules(db) + if len(rules) != 1 || rules[0].Pattern != ".env" { + t.Errorf("expected 1 file rule (.env), got %d", len(rules)) + } + + cmdRules, _ := ListRules(db) + if len(cmdRules) != 1 || cmdRules[0].Pattern != "rm -rf /*" { + t.Errorf("expected 1 command rule, got %d", len(cmdRules)) + } +} + +func TestReplayEvents_Idempotent(t *testing.T) { + db := newTestPolicyDB(t) + + p1, _ := json.Marshal(map[string]interface{}{ + "id": "r1", "pattern": ".env", "file_access": "deny", + "file_authority": "standard", "prevent_write": true, + "prevent_read": false, "created_by": "test", + }) + AppendEvent(db, "file_rule.added", string(p1), "test") + + // Replay twice. + if err := ReplayEvents(db); err != nil { + t.Fatal(err) + } + if err := ReplayEvents(db); err != nil { + t.Fatal(err) + } + + rules, _ := ListFileRules(db) + if len(rules) != 1 { + t.Errorf("expected 1 file rule after double replay, got %d", len(rules)) + } +} + +func TestVerifyChain_TamperedEvent(t *testing.T) { + db := newTestPolicyDB(t) + + p1, _ := json.Marshal(map[string]interface{}{ + "id": "r1", "pattern": ".env", "file_access": "deny", + "file_authority": "standard", "prevent_write": true, + "prevent_read": false, "created_by": "test", + }) + AppendEvent(db, "file_rule.added", string(p1), "test") + + p2, _ := json.Marshal(map[string]interface{}{ + "id": "r2", "pattern": "*.pem", "file_access": "deny", + "file_authority": "standard", "prevent_write": true, + "prevent_read": false, "created_by": "test", + }) + AppendEvent(db, "file_rule.added", string(p2), "test") + + // Tamper with the first event's hash. + db.Exec("UPDATE policy_events SET hash = 'tampered' WHERE seq = 1") + + broken, err := VerifyChain(db) + if err != nil { + t.Fatal(err) + } + if broken == 0 { + t.Error("expected chain to be broken after tampering") + } +} + +func TestListUnpushedEvents(t *testing.T) { + db := newTestPolicyDB(t) + + p1, _ := json.Marshal(map[string]interface{}{ + "id": "r1", "pattern": ".env", "file_access": "deny", + "file_authority": "standard", "prevent_write": true, + "prevent_read": false, "created_by": "test", + }) + AppendEvent(db, "file_rule.added", string(p1), "test") + + events, err := ListUnpushedEvents(db) + if err != nil { + t.Fatal(err) + } + if len(events) != 1 { + t.Fatalf("expected 1 unpushed event, got %d", len(events)) + } + if events[0].ServerSeq != nil { + t.Error("expected nil server_seq for unpushed event") + } +} + +func TestMarkEventsPushed(t *testing.T) { + db := newTestPolicyDB(t) + + p1, _ := json.Marshal(map[string]interface{}{ + "id": "r1", "pattern": ".env", "file_access": "deny", + "file_authority": "standard", "prevent_write": true, + "prevent_read": false, "created_by": "test", + }) + ev, _ := AppendEvent(db, "file_rule.added", string(p1), "test") + + err := MarkEventsPushed(db, map[string]int64{ev.EventID: 42}) + if err != nil { + t.Fatal(err) + } + + events, _ := ListUnpushedEvents(db) + if len(events) != 0 { + t.Errorf("expected 0 unpushed events after marking, got %d", len(events)) + } +} + +func TestAppendRemoteEvents(t *testing.T) { + db := newTestPolicyDB(t) + + serverSeq := int64(1) + remoteEv := PolicyEvent{ + EventID: "remote-id-1", + EventType: "file_rule.added", + Payload: `{"id":"rr1","pattern":"secrets.json","file_access":"deny","file_authority":"standard","prevent_write":true,"prevent_read":true,"created_by":"admin"}`, + Actor: "admin", + Timestamp: "2024-06-01T00:00:00Z", + ParentHash: "", + Hash: computeHash("remote-id-1", "file_rule.added", `{"id":"rr1","pattern":"secrets.json","file_access":"deny","file_authority":"standard","prevent_write":true,"prevent_read":true,"created_by":"admin"}`, "admin", "2024-06-01T00:00:00Z", ""), + ServerSeq: &serverSeq, + } + + if err := AppendRemoteEvents(db, []PolicyEvent{remoteEv}); err != nil { + t.Fatal(err) + } + + rules, _ := ListFileRules(db) + if len(rules) != 1 || rules[0].Pattern != "secrets.json" { + t.Errorf("expected 1 file rule (secrets.json), got %v", rules) + } +} + +func TestLatestHash_Empty(t *testing.T) { + db := newTestPolicyDB(t) + + hash, err := LatestHash(db) + if err != nil { + t.Fatal(err) + } + if hash != "" { + t.Errorf("expected empty hash for empty event log, got %q", hash) + } +} + +func TestLatestHash_AfterEvent(t *testing.T) { + db := newTestPolicyDB(t) + + p1, _ := json.Marshal(map[string]interface{}{ + "id": "r1", "pattern": ".env", "file_access": "deny", + "file_authority": "standard", "prevent_write": true, + "prevent_read": false, "created_by": "test", + }) + ev, _ := AppendEvent(db, "file_rule.added", string(p1), "test") + + hash, err := LatestHash(db) + if err != nil { + t.Fatal(err) + } + if hash != ev.Hash { + t.Errorf("latest hash = %q, want %q", hash, ev.Hash) + } +} + +func TestAddFileRuleCreatesEvent(t *testing.T) { + db := newTestPolicyDB(t) + + _, err := AddFileRule(db, ".env", "deny", "standard", "alice", false) + if err != nil { + t.Fatal(err) + } + + // Verify event was created. + var count int + db.QueryRow("SELECT COUNT(*) FROM policy_events WHERE event_type = 'file_rule.added'").Scan(&count) + if count != 1 { + t.Errorf("expected 1 file_rule.added event, got %d", count) + } + + // Verify projection is correct. + rules, _ := ListFileRules(db) + if len(rules) != 1 || rules[0].Pattern != ".env" { + t.Errorf("expected 1 file rule (.env), got %v", rules) + } +} + +func TestRemoveFileRuleCreatesEvent(t *testing.T) { + db := newTestPolicyDB(t) + + AddFileRule(db, ".env", "deny", "standard", "alice", false) + + removed, err := RemoveFileRule(db, ".env") + if err != nil { + t.Fatal(err) + } + if !removed { + t.Error("expected removed=true") + } + + // Verify event was created. + var count int + db.QueryRow("SELECT COUNT(*) FROM policy_events WHERE event_type = 'file_rule.removed'").Scan(&count) + if count != 1 { + t.Errorf("expected 1 file_rule.removed event, got %d", count) + } + + // Verify projection is updated. + rules, _ := ListFileRules(db) + if len(rules) != 0 { + t.Errorf("expected 0 file rules after removal, got %d", len(rules)) + } +} + +func TestAddCommandRuleCreatesEvent(t *testing.T) { + db := newTestPolicyDB(t) + + _, err := AddRule(db, "rm -rf /*", "deny", "standard", "alice") + if err != nil { + t.Fatal(err) + } + + var count int + db.QueryRow("SELECT COUNT(*) FROM policy_events WHERE event_type = 'command_rule.added'").Scan(&count) + if count != 1 { + t.Errorf("expected 1 command_rule.added event, got %d", count) + } +} + +func TestRemoveCommandRuleCreatesEvent(t *testing.T) { + db := newTestPolicyDB(t) + + AddRule(db, "rm -rf /*", "deny", "standard", "alice") + + removed, err := RemoveRule(db, "rm -rf /*") + if err != nil { + t.Fatal(err) + } + if !removed { + t.Error("expected removed=true") + } + + var count int + db.QueryRow("SELECT COUNT(*) FROM policy_events WHERE event_type = 'command_rule.removed'").Scan(&count) + if count != 1 { + t.Errorf("expected 1 command_rule.removed event, got %d", count) + } +} + +func TestMigrationFromExistingState(t *testing.T) { + // Create a database with rules but no events (simulating pre-event-sourcing state). + db, err := sql.Open("sqlite", ":memory:") + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { db.Close() }) + + // Create tables WITHOUT policy_events (old schema). + stmts := []string{ + `CREATE TABLE file_rules ( + id TEXT PRIMARY KEY, pattern TEXT NOT NULL, + file_access TEXT NOT NULL DEFAULT 'deny', + file_authority TEXT NOT NULL DEFAULT 'standard', + prevent_write INTEGER NOT NULL DEFAULT 1, + prevent_read INTEGER NOT NULL DEFAULT 0, + created_by TEXT NOT NULL DEFAULT '', + created_at TEXT NOT NULL, updated_at TEXT NOT NULL + )`, + `CREATE UNIQUE INDEX idx_file_rules_pattern ON file_rules(pattern)`, + `CREATE TABLE command_rules ( + id TEXT PRIMARY KEY, pattern TEXT NOT NULL, + rule_access TEXT NOT NULL DEFAULT 'deny', + rule_authority TEXT NOT NULL DEFAULT 'standard', + created_by TEXT NOT NULL DEFAULT '', + created_at TEXT NOT NULL, updated_at TEXT NOT NULL + )`, + `CREATE UNIQUE INDEX idx_command_rules_pattern ON command_rules(pattern)`, + `CREATE TABLE perimeter_meta (key TEXT PRIMARY KEY, value TEXT NOT NULL)`, + } + for _, s := range stmts { + if _, err := db.Exec(s); err != nil { + t.Fatal(err) + } + } + + // Insert rules directly (pre-event-sourcing). + db.Exec(`INSERT INTO file_rules (id, pattern, file_access, file_authority, prevent_write, prevent_read, created_by, created_at, updated_at) + VALUES ('fr1', '.env', 'deny', 'standard', 1, 0, 'seed', '2024-01-01T00:00:00Z', '2024-01-01T00:00:00Z')`) + db.Exec(`INSERT INTO command_rules (id, pattern, rule_access, rule_authority, created_by, created_at, updated_at) + VALUES ('cr1', 'rm -rf /*', 'deny', 'standard', 'seed', '2024-01-02T00:00:00Z', '2024-01-02T00:00:00Z')`) + + // Run migration — this should create the policy_events table and generate synthetic events. + if err := MigratePolicyDB(db); err != nil { + t.Fatal(err) + } + + // Verify events were generated. + var eventCount int + db.QueryRow("SELECT COUNT(*) FROM policy_events").Scan(&eventCount) + if eventCount != 2 { + t.Errorf("expected 2 migration events, got %d", eventCount) + } + + // Verify chain is valid. + broken, err := VerifyChain(db) + if err != nil { + t.Fatal(err) + } + if broken != 0 { + t.Errorf("chain broken at seq %d after migration", broken) + } + + // Verify projections still have the original rules. + rules, _ := ListFileRules(db) + if len(rules) != 1 || rules[0].Pattern != ".env" { + t.Errorf("expected file rule .env, got %v", rules) + } + + cmdRules, _ := ListRules(db) + if len(cmdRules) != 1 || cmdRules[0].Pattern != "rm -rf /*" { + t.Errorf("expected command rule rm -rf /*, got %v", cmdRules) + } +} + +func TestMigrationSkipsWhenEventsExist(t *testing.T) { + db := newTestPolicyDB(t) + + // Add a rule (creates an event). + AddFileRule(db, ".env", "deny", "standard", "test", false) + + var countBefore int + db.QueryRow("SELECT COUNT(*) FROM policy_events").Scan(&countBefore) + + // Run migration again — should be a no-op. + if err := migrateExistingRulesToEvents(db); err != nil { + t.Fatal(err) + } + + var countAfter int + db.QueryRow("SELECT COUNT(*) FROM policy_events").Scan(&countAfter) + if countAfter != countBefore { + t.Errorf("migration should be no-op when events exist: before=%d, after=%d", countBefore, countAfter) + } +} diff --git a/cli/internal/store/hash.go b/cli/internal/store/hash.go new file mode 100644 index 0000000..9ff789e --- /dev/null +++ b/cli/internal/store/hash.go @@ -0,0 +1,15 @@ +package store + +import ( + "crypto/sha256" + "fmt" + "strings" +) + +// computeDataHash computes a SHA-256 hash over the given fields joined by "|". +// Used by InsertHookLog and InsertAudit to build per-table hash chains in data.db. +func computeDataHash(fields ...string) string { + data := strings.Join(fields, "|") + h := sha256.Sum256([]byte(data)) + return fmt.Sprintf("%x", h[:]) +} diff --git a/cli/internal/store/hash_test.go b/cli/internal/store/hash_test.go new file mode 100644 index 0000000..6b26d92 --- /dev/null +++ b/cli/internal/store/hash_test.go @@ -0,0 +1,148 @@ +package store + +import ( + "testing" +) + +func TestComputeDataHash_Deterministic(t *testing.T) { + h1 := computeDataHash("field1", "field2", "field3") + h2 := computeDataHash("field1", "field2", "field3") + if h1 != h2 { + t.Errorf("same inputs produced different hashes: %s vs %s", h1, h2) + } + if len(h1) != 64 { + t.Errorf("hash length = %d, want 64", len(h1)) + } +} + +func TestComputeDataHash_DifferentInputs(t *testing.T) { + h1 := computeDataHash("a", "b", "c") + h2 := computeDataHash("a", "b", "d") + if h1 == h2 { + t.Error("different inputs should produce different hashes") + } +} + +func TestInsertHookLog_HashChain(t *testing.T) { + db := newTestDataDB(t) + + e1 := HookLogEntry{ + Ts: 1000000, + ToolName: "Write", + FilePath: "/test/file.go", + Decision: "allow", + OSUser: "testuser", + Agent: "claude-code", + } + if err := InsertHookLog(db, e1); err != nil { + t.Fatal(err) + } + + // Read back the first entry. + var hash1, parentHash1 string + err := db.QueryRow("SELECT parent_hash, hash FROM hook_log WHERE id = 1").Scan(&parentHash1, &hash1) + if err != nil { + t.Fatal(err) + } + if parentHash1 != "" { + t.Errorf("first entry parent_hash = %q, want empty", parentHash1) + } + if hash1 == "" { + t.Error("first entry hash should not be empty") + } + + // Insert second entry. + e2 := HookLogEntry{ + Ts: 2000000, + ToolName: "Edit", + FilePath: "/test/other.go", + Decision: "deny", + OSUser: "testuser", + Agent: "claude-code", + } + if err := InsertHookLog(db, e2); err != nil { + t.Fatal(err) + } + + var hash2, parentHash2 string + err = db.QueryRow("SELECT parent_hash, hash FROM hook_log WHERE id = 2").Scan(&parentHash2, &hash2) + if err != nil { + t.Fatal(err) + } + if parentHash2 != hash1 { + t.Errorf("second entry parent_hash = %q, want %q", parentHash2, hash1) + } + if hash2 == "" || hash2 == hash1 { + t.Error("second entry hash should be non-empty and different from first") + } +} + +func TestInsertHookLog_NotifyFlag(t *testing.T) { + db := newTestDataDB(t) + + e := HookLogEntry{ + Ts: 1000000, + ToolName: "Write", + FilePath: "/test/file.go", + Decision: "deny", + OSUser: "testuser", + Notify: true, + } + if err := InsertHookLog(db, e); err != nil { + t.Fatal(err) + } + + var notify int + err := db.QueryRow("SELECT notify FROM hook_log WHERE id = 1").Scan(¬ify) + if err != nil { + t.Fatal(err) + } + if notify != 1 { + t.Errorf("notify = %d, want 1", notify) + } +} + +func TestInsertAudit_HashChain(t *testing.T) { + db := newTestDataDB(t) + + e1 := AuditEntry{ + EventType: "file_add", + FilePath: ".env", + User: "alice", + Detail: "added file rule", + } + if err := InsertAudit(db, e1); err != nil { + t.Fatal(err) + } + + var hash1, parentHash1 string + err := db.QueryRow("SELECT parent_hash, hash FROM audit_log WHERE id = 1").Scan(&parentHash1, &hash1) + if err != nil { + t.Fatal(err) + } + if parentHash1 != "" { + t.Errorf("first audit entry parent_hash = %q, want empty", parentHash1) + } + if hash1 == "" { + t.Error("first audit entry hash should not be empty") + } + + e2 := AuditEntry{ + EventType: "file_remove", + FilePath: ".env", + User: "alice", + Detail: "removed file rule", + } + if err := InsertAudit(db, e2); err != nil { + t.Fatal(err) + } + + var hash2, parentHash2 string + err = db.QueryRow("SELECT parent_hash, hash FROM audit_log WHERE id = 2").Scan(&parentHash2, &hash2) + if err != nil { + t.Fatal(err) + } + if parentHash2 != hash1 { + t.Errorf("second audit entry parent_hash = %q, want %q", parentHash2, hash1) + } +} diff --git a/cli/internal/store/log.go b/cli/internal/store/log.go index a92752d..1502af1 100644 --- a/cli/internal/store/log.go +++ b/cli/internal/store/log.go @@ -2,27 +2,52 @@ package store import ( "database/sql" + "fmt" "os/user" ) // HookLogEntry is a single row written to the hook_log table. type HookLogEntry struct { - Ts int64 // Unix microseconds - ToolName string - FilePath string - ToolInput string // raw JSON of the tool_input field - Decision string // "allow" or "deny" - OSUser string - Agent string - PassID string + Ts int64 // Unix microseconds + ToolName string + FilePath string + ToolInput string // raw JSON of the tool_input field + Decision string // "allow" or "deny" + OSUser string + Agent string + PassID string + Notify bool // rule had notification flags + ParentHash string // hash of previous hook_log entry + Hash string // SHA-256 hash for tamper evidence } // InsertHookLog appends a hook invocation to the audit log. +// It computes the hash chain automatically from the previous entry. +// Note: tool_input is excluded from the hash computation (see spec §14.4). func InsertHookLog(db *sql.DB, e HookLogEntry) error { - _, err := db.Exec( - `INSERT INTO hook_log (ts, tool_name, file_path, tool_input, decision, os_user, agent, pass_id) - VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, + // Read the hash of the most recent entry for chain linkage. + var parentHash string + err := db.QueryRow("SELECT hash FROM hook_log ORDER BY id DESC LIMIT 1").Scan(&parentHash) + if err != nil && err != sql.ErrNoRows { + return fmt.Errorf("store: read hook_log parent hash: %w", err) + } + + e.ParentHash = parentHash + e.Hash = computeDataHash( + fmt.Sprintf("%d", e.Ts), e.ToolName, e.FilePath, + e.Decision, e.OSUser, e.Agent, parentHash, + ) + + var notify int + if e.Notify { + notify = 1 + } + + _, err = db.Exec( + `INSERT INTO hook_log (ts, tool_name, file_path, tool_input, decision, os_user, agent, pass_id, notify, parent_hash, hash) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, e.Ts, e.ToolName, e.FilePath, e.ToolInput, e.Decision, e.OSUser, e.Agent, e.PassID, + notify, e.ParentHash, e.Hash, ) return err } diff --git a/cli/internal/store/policy.go b/cli/internal/store/policy.go index b4a1230..f3da959 100644 --- a/cli/internal/store/policy.go +++ b/cli/internal/store/policy.go @@ -3,6 +3,7 @@ package store import ( "crypto/rand" "database/sql" + "encoding/json" "errors" "fmt" "path/filepath" @@ -29,16 +30,17 @@ type FileRule struct { ID string Pattern string FileType string // "deny" (blocks access) or "allow" (permits access, overrides deny) - FileAuthority string // "standard" (any member) or "guardian" (guardian/admin only) + FileAuthority string // "standard" (any member) or "elevated" (elevated/admin only) PreventWrite bool // always true for now PreventRead bool // opt-in via --prevent-read CreatedBy string CreatedAt string // ISO 8601 UpdatedAt string // ISO 8601 + Notify bool // triggers immediate sync when rule is matched } // AddFileRule inserts a new file rule into the policy database. -// fileAccess is "deny" (default) or "allow". fileAuthority is "standard" or "guardian". +// fileAccess is "deny" (default) or "allow". fileAuthority is "standard" or "elevated". // preventRead enables read enforcement in addition to the always-on write enforcement. // Returns an error if the pattern already exists (UNIQUE constraint violation), // or if fileAccess is "allow" and preventRead is true (nonsensical combination). @@ -47,13 +49,33 @@ func AddFileRule(db *sql.DB, pattern, fileAccess, fileAuthority, createdBy strin return nil, fmt.Errorf("store: allow file rules cannot have prevent-read enabled") } - now := time.Now().UTC().Format(time.RFC3339) id, err := newUUID() if err != nil { return nil, fmt.Errorf("store: generate file rule id: %w", err) } + now := time.Now().UTC().Format(time.RFC3339) + + payload, _ := json.Marshal(map[string]interface{}{ + "id": id, + "pattern": pattern, + "file_access": fileAccess, + "file_authority": fileAuthority, + "prevent_write": true, + "prevent_read": preventRead, + "created_by": createdBy, + "created_at": now, + "updated_at": now, + }) + + _, err = AppendEvent(db, "file_rule.added", string(payload), createdBy) + if err != nil { + if isDuplicatePatternError(err) { + return nil, fmt.Errorf("store: add file rule: %w: %s", ErrDuplicatePattern, pattern) + } + return nil, fmt.Errorf("store: add file rule: %w", err) + } - f := FileRule{ + return &FileRule{ ID: id, Pattern: pattern, FileType: fileAccess, @@ -63,26 +85,13 @@ func AddFileRule(db *sql.DB, pattern, fileAccess, fileAuthority, createdBy strin CreatedBy: createdBy, CreatedAt: now, UpdatedAt: now, - } - - _, err = db.Exec( - `INSERT INTO file_rules (id, pattern, file_access, file_authority, prevent_write, prevent_read, created_by, created_at, updated_at) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`, - f.ID, f.Pattern, f.FileType, f.FileAuthority, f.PreventWrite, f.PreventRead, f.CreatedBy, f.CreatedAt, f.UpdatedAt, - ) - if err != nil { - if isDuplicatePatternError(err) { - return nil, fmt.Errorf("store: add file rule: %w: %s", ErrDuplicatePattern, pattern) - } - return nil, fmt.Errorf("store: add file rule: %w", err) - } - return &f, nil + }, nil } // ListFileRules returns all file rules ordered by creation time. func ListFileRules(db *sql.DB) ([]FileRule, error) { rows, err := db.Query( - `SELECT id, pattern, file_access, file_authority, prevent_write, prevent_read, created_by, created_at, updated_at + `SELECT id, pattern, file_access, file_authority, prevent_write, prevent_read, created_by, created_at, updated_at, notify FROM file_rules ORDER BY created_at ASC`, ) if err != nil { @@ -93,12 +102,13 @@ func ListFileRules(db *sql.DB) ([]FileRule, error) { var rules []FileRule for rows.Next() { var f FileRule - var pw, pr int - if err := rows.Scan(&f.ID, &f.Pattern, &f.FileType, &f.FileAuthority, &pw, &pr, &f.CreatedBy, &f.CreatedAt, &f.UpdatedAt); err != nil { + var pw, pr, nfy int + if err := rows.Scan(&f.ID, &f.Pattern, &f.FileType, &f.FileAuthority, &pw, &pr, &f.CreatedBy, &f.CreatedAt, &f.UpdatedAt, &nfy); err != nil { return nil, fmt.Errorf("store: scan file rule: %w", err) } f.PreventWrite = pw != 0 f.PreventRead = pr != 0 + f.Notify = nfy != 0 rules = append(rules, f) } return rules, rows.Err() @@ -107,15 +117,26 @@ func ListFileRules(db *sql.DB) ([]FileRule, error) { // RemoveFileRule deletes the file rule with the given pattern. // Returns (true, nil) if a rule was removed, (false, nil) if no matching rule exists. func RemoveFileRule(db *sql.DB, pattern string) (bool, error) { - res, err := db.Exec(`DELETE FROM file_rules WHERE pattern = ?`, pattern) + // Look up the rule ID needed for the event payload. + var id string + err := db.QueryRow(`SELECT id FROM file_rules WHERE pattern = ?`, pattern).Scan(&id) + if err == sql.ErrNoRows { + return false, nil + } if err != nil { - return false, fmt.Errorf("store: remove file rule: %w", err) + return false, fmt.Errorf("store: remove file rule lookup: %w", err) } - n, err := res.RowsAffected() + + payload, _ := json.Marshal(map[string]string{ + "id": id, + "pattern": pattern, + }) + + _, err = AppendEvent(db, "file_rule.removed", string(payload), CurrentOSUser()) if err != nil { - return false, fmt.Errorf("store: remove file rule rows affected: %w", err) + return false, fmt.Errorf("store: remove file rule: %w", err) } - return n > 0, nil + return true, nil } // FileRuleForPath returns the effective deny file rule whose pattern covers diff --git a/cli/internal/store/rules.go b/cli/internal/store/rules.go index 8824509..30f7c83 100644 --- a/cli/internal/store/rules.go +++ b/cli/internal/store/rules.go @@ -2,6 +2,7 @@ package store import ( "database/sql" + "encoding/json" "fmt" "path/filepath" "strings" @@ -33,50 +34,56 @@ type CommandRule struct { ID string Pattern string RuleType string // "deny" (blocks command) or "allow" (permits command, overrides deny) - RuleAuthority string // "standard" (any member) or "guardian" (guardian/admin only) + RuleAuthority string // "standard" (any member) or "elevated" (elevated/admin only) CreatedBy string CreatedAt string UpdatedAt string + Notify bool // triggers immediate sync when rule is matched } // AddRule inserts a command rule into the policy database. -// ruleAccess is "deny" (default) or "allow". ruleAuthority is "standard" or "guardian". +// ruleAccess is "deny" (default) or "allow". ruleAuthority is "standard" or "elevated". // Returns an error if the pattern already exists. func AddRule(db *sql.DB, pattern, ruleAccess, ruleAuthority, createdBy string) (*CommandRule, error) { - now := time.Now().UTC().Format(time.RFC3339) id, err := newUUID() if err != nil { return nil, fmt.Errorf("store: generate rule id: %w", err) } + now := time.Now().UTC().Format(time.RFC3339) - r := CommandRule{ - ID: id, - Pattern: pattern, - RuleType: ruleAccess, - RuleAuthority: ruleAuthority, - CreatedBy: createdBy, - CreatedAt: now, - UpdatedAt: now, - } + payload, _ := json.Marshal(map[string]interface{}{ + "id": id, + "pattern": pattern, + "rule_access": ruleAccess, + "rule_authority": ruleAuthority, + "created_by": createdBy, + "created_at": now, + "updated_at": now, + }) - _, err = db.Exec( - `INSERT INTO command_rules (id, pattern, rule_access, rule_authority, created_by, created_at, updated_at) - VALUES (?, ?, ?, ?, ?, ?, ?)`, - r.ID, r.Pattern, r.RuleType, r.RuleAuthority, r.CreatedBy, r.CreatedAt, r.UpdatedAt, - ) + _, err = AppendEvent(db, "command_rule.added", string(payload), createdBy) if err != nil { if isDuplicatePatternError(err) { return nil, fmt.Errorf("store: add rule: %w: %s", ErrDuplicatePattern, pattern) } return nil, fmt.Errorf("store: add rule: %w", err) } - return &r, nil + + return &CommandRule{ + ID: id, + Pattern: pattern, + RuleType: ruleAccess, + RuleAuthority: ruleAuthority, + CreatedBy: createdBy, + CreatedAt: now, + UpdatedAt: now, + }, nil } // ListRules returns all command rules ordered by creation time. func ListRules(db *sql.DB) ([]CommandRule, error) { rows, err := db.Query( - `SELECT id, pattern, rule_access, rule_authority, created_by, created_at, updated_at + `SELECT id, pattern, rule_access, rule_authority, created_by, created_at, updated_at, notify FROM command_rules ORDER BY created_at ASC`, ) if err != nil { @@ -87,10 +94,12 @@ func ListRules(db *sql.DB) ([]CommandRule, error) { var rules []CommandRule for rows.Next() { var r CommandRule + var nfy int if err := rows.Scan(&r.ID, &r.Pattern, &r.RuleType, &r.RuleAuthority, - &r.CreatedBy, &r.CreatedAt, &r.UpdatedAt); err != nil { + &r.CreatedBy, &r.CreatedAt, &r.UpdatedAt, &nfy); err != nil { return nil, fmt.Errorf("store: scan rule: %w", err) } + r.Notify = nfy != 0 rules = append(rules, r) } return rules, rows.Err() @@ -98,19 +107,30 @@ func ListRules(db *sql.DB) ([]CommandRule, error) { // RemoveRule deletes a standard-authority command rule with the given pattern. // Returns (true, nil) if removed, (false, nil) if not found. -// Guardian-authority rules cannot be removed by non-guardians. +// Elevated-authority rules cannot be removed by non-elevated users. func RemoveRule(db *sql.DB, pattern string) (bool, error) { - res, err := db.Exec( - `DELETE FROM command_rules WHERE pattern = ? AND rule_authority = 'standard'`, pattern, - ) + // Look up the rule ID, enforcing standard-authority restriction. + var id string + err := db.QueryRow( + `SELECT id FROM command_rules WHERE pattern = ? AND rule_authority = 'standard'`, pattern, + ).Scan(&id) + if err == sql.ErrNoRows { + return false, nil + } if err != nil { - return false, fmt.Errorf("store: remove rule: %w", err) + return false, fmt.Errorf("store: remove rule lookup: %w", err) } - n, err := res.RowsAffected() + + payload, _ := json.Marshal(map[string]string{ + "id": id, + "pattern": pattern, + }) + + _, err = AppendEvent(db, "command_rule.removed", string(payload), CurrentOSUser()) if err != nil { - return false, fmt.Errorf("store: remove rule rows affected: %w", err) + return false, fmt.Errorf("store: remove rule: %w", err) } - return n > 0, nil + return true, nil } // MatchCommandRule checks whether command matches any rule in the database. diff --git a/cli/internal/store/schema.go b/cli/internal/store/schema.go index 02f1147..476cddf 100644 --- a/cli/internal/store/schema.go +++ b/cli/internal/store/schema.go @@ -14,7 +14,7 @@ func MigratePolicyDB(db *sql.DB) error { // id: UUID v4 (hex string). // pattern: file path, directory path, or glob pattern. // file_access: 'deny' (blocks access) or 'allow' (permits access, overrides deny rules). - // file_authority: 'standard' (any member) or 'guardian' (guardian/admin only). + // file_authority: 'standard' (any member) or 'elevated' (elevated/admin only). // prevent_write: 1 = block agent write tools (always true for now). // prevent_read: 1 = also block agent read tools (opt-in via --prevent-read). // created_by: user identifier (github username or OS username for local users). @@ -24,7 +24,7 @@ func MigratePolicyDB(db *sql.DB) error { id TEXT PRIMARY KEY, pattern TEXT NOT NULL, file_access TEXT NOT NULL DEFAULT 'deny' CHECK(file_access IN ('allow','deny')), - file_authority TEXT NOT NULL DEFAULT 'standard' CHECK(file_authority IN ('standard','guardian')), + file_authority TEXT NOT NULL DEFAULT 'standard' CHECK(file_authority IN ('standard','elevated')), prevent_write INTEGER NOT NULL DEFAULT 1, prevent_read INTEGER NOT NULL DEFAULT 0, created_by TEXT NOT NULL DEFAULT '', @@ -37,14 +37,14 @@ func MigratePolicyDB(db *sql.DB) error { // // pattern: glob-style pattern matched against the full bash command string. // rule_access: 'deny' (blocks command) or 'allow' (permits command, overrides deny rules). - // rule_authority: 'standard' (any member) or 'guardian' (guardian/admin only). + // rule_authority: 'standard' (any member) or 'elevated' (elevated/admin only). // created_by: user identifier. // created_at / updated_at: ISO 8601 timestamps. `CREATE TABLE IF NOT EXISTS command_rules ( id TEXT PRIMARY KEY, pattern TEXT NOT NULL, rule_access TEXT NOT NULL DEFAULT 'deny' CHECK(rule_access IN ('allow','deny')), - rule_authority TEXT NOT NULL DEFAULT 'standard' CHECK(rule_authority IN ('standard','guardian')), + rule_authority TEXT NOT NULL DEFAULT 'standard' CHECK(rule_authority IN ('standard','elevated')), created_by TEXT NOT NULL DEFAULT '', created_at TEXT NOT NULL, updated_at TEXT NOT NULL @@ -60,6 +60,23 @@ func MigratePolicyDB(db *sql.DB) error { key TEXT PRIMARY KEY, value TEXT NOT NULL )`, + + // policy_events — immutable, append-only log of every policy mutation. + // The existing file_rules and command_rules tables are projections rebuilt + // from this event log. The hash chain provides tamper detection and + // deterministic replay for sync. + `CREATE TABLE IF NOT EXISTS policy_events ( + seq INTEGER PRIMARY KEY AUTOINCREMENT, + event_id TEXT NOT NULL UNIQUE, + event_type TEXT NOT NULL, + payload TEXT NOT NULL, + actor TEXT NOT NULL, + timestamp TEXT NOT NULL, + parent_hash TEXT NOT NULL DEFAULT '', + hash TEXT NOT NULL, + server_seq INTEGER + )`, + `CREATE INDEX IF NOT EXISTS idx_policy_events_server_seq ON policy_events(server_seq)`, } for _, stmt := range stmts { @@ -68,6 +85,23 @@ func MigratePolicyDB(db *sql.DB) error { } } + // Migrate existing rules to policy events if the event log is empty but + // rules already exist (pre-event-sourcing databases). + if err := migrateExistingRulesToEvents(db); err != nil { + return err + } + + // Additive column migrations for notification flag on policy tables. + policyAlterStmts := []string{ + `ALTER TABLE file_rules ADD COLUMN notify INTEGER NOT NULL DEFAULT 0`, + `ALTER TABLE command_rules ADD COLUMN notify INTEGER NOT NULL DEFAULT 0`, + } + for _, stmt := range policyAlterStmts { + if _, err := db.Exec(stmt); err != nil && !isDuplicateColumn(err) { + return err + } + } + return nil } @@ -108,7 +142,7 @@ func MigrateDataDB(db *sql.DB) error { // pattern: the file rule pattern at time of issuance (denormalized for audit). // file_path: specific file if pass is file-scoped; empty string if rule-wide. // issued_to: user identifier of pass recipient. - // issued_by: user identifier of pass issuer (self or guardian). + // issued_by: user identifier of pass issuer (self or elevated). // status: 'active', 'expired', or 'revoked'. // duration_minutes: NULL for indefinite passes. // issued_at: ISO 8601 timestamp. @@ -172,6 +206,12 @@ func MigrateDataDB(db *sql.DB) error { // we ignore that specific error ("duplicate column name"). alterStmts := []string{ `ALTER TABLE hook_log ADD COLUMN pass_id TEXT NOT NULL DEFAULT ''`, + // Hash chain columns for tamper evidence. + `ALTER TABLE hook_log ADD COLUMN notify INTEGER NOT NULL DEFAULT 0`, + `ALTER TABLE hook_log ADD COLUMN parent_hash TEXT NOT NULL DEFAULT ''`, + `ALTER TABLE hook_log ADD COLUMN hash TEXT NOT NULL DEFAULT ''`, + `ALTER TABLE audit_log ADD COLUMN parent_hash TEXT NOT NULL DEFAULT ''`, + `ALTER TABLE audit_log ADD COLUMN hash TEXT NOT NULL DEFAULT ''`, } for _, stmt := range alterStmts { if _, err := db.Exec(stmt); err != nil && !isDuplicateColumn(err) {