From a65d25630970eb9d42048debcc6c5f419fc9309a Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Fri, 18 Apr 2025 14:26:23 -0600 Subject: [PATCH 01/40] experimental sqlite cache --- apps/flowlord/cache/schema.sql | 60 +++++ apps/flowlord/cache/sqlite.go | 406 +++++++++++++++++++++++++++++++ apps/flowlord/handler.go | 50 ++++ apps/flowlord/handler/alert.tmpl | 54 ++++ apps/flowlord/handler/handler.go | 6 + apps/flowlord/taskmaster.go | 8 +- apps/go.mod | 15 +- apps/go.sum | 47 +++- go.work.sum | 7 - 9 files changed, 632 insertions(+), 21 deletions(-) create mode 100644 apps/flowlord/cache/schema.sql create mode 100644 apps/flowlord/cache/sqlite.go create mode 100644 apps/flowlord/handler/alert.tmpl create mode 100644 apps/flowlord/handler/handler.go diff --git a/apps/flowlord/cache/schema.sql b/apps/flowlord/cache/schema.sql new file mode 100644 index 0000000..cbbf1a9 --- /dev/null +++ b/apps/flowlord/cache/schema.sql @@ -0,0 +1,60 @@ +-- SQL schema for the task cache +CREATE TABLE IF NOT EXISTS events ( + id TEXT PRIMARY KEY, + completed BOOLEAN, + last_update TIMESTAMP +); + +CREATE TABLE IF NOT EXISTS task_log ( + id TEXT, + type TEXT, + job TEXT, + info TEXT, + result TEXT, + meta TEXT, + msg TEXT, + created TIMESTAMP, + started TIMESTAMP, + ended TIMESTAMP, + event_id TEXT, + FOREIGN KEY (event_id) REFERENCES events(id) +); + +CREATE INDEX IF NOT EXISTS task_log_created ON task_log (created); +CREATE INDEX IF NOT EXISTS task_log_started ON task_log (started); +CREATE INDEX IF NOT EXISTS task_log_type ON task_log (type); +CREATE INDEX IF NOT EXISTS task_log_job ON task_log (job); +CREATE INDEX IF NOT EXISTS task_log_event_id ON task_log (event_id); + +-- Create a view that calculates task and queue times +CREATE VIEW IF NOT EXISTS tasks AS +SELECT + task_log.id, + task_log.type, + task_log.job, + task_log.info, + -- SQLite doesn't have parse_url function, we'll need to handle this in Go + task_log.meta, + -- SQLite doesn't have parse_param function, we'll need to handle this in Go + task_log.msg, + task_log.result, + -- Calculate task duration in seconds + CAST((julianday(task_log.ended) - julianday(task_log.started)) * 24 * 60 * 60 AS INTEGER) as task_seconds, + -- Format task duration as HH:MM:SS + strftime('%H:%M:%S', + CAST((julianday(task_log.ended) - julianday(task_log.started)) * 24 * 60 * 60 AS INTEGER) / 3600 || ':' || + CAST((julianday(task_log.ended) - julianday(task_log.started)) * 24 * 60 * 60 AS INTEGER) % 3600 / 60 || ':' || + CAST((julianday(task_log.ended) - julianday(task_log.started)) * 24 * 60 * 60 AS INTEGER) % 60 + ) as task_time, + -- Calculate queue time in seconds + CAST((julianday(task_log.started) - julianday(task_log.created)) * 24 * 60 * 60 AS INTEGER) as queue_seconds, + -- Format queue duration as HH:MM:SS + strftime('%H:%M:%S', + CAST((julianday(task_log.started) - julianday(task_log.created)) * 24 * 60 * 60 AS INTEGER) / 3600 || ':' || + CAST((julianday(task_log.started) - julianday(task_log.created)) * 24 * 60 * 60 AS INTEGER) % 3600 / 60 || ':' || + CAST((julianday(task_log.started) - julianday(task_log.created)) * 24 * 60 * 60 AS INTEGER) % 60 + ) as queue_time, + task_log.created, + task_log.started, + task_log.ended +FROM task_log; \ No newline at end of file diff --git a/apps/flowlord/cache/sqlite.go b/apps/flowlord/cache/sqlite.go new file mode 100644 index 0000000..a4342c4 --- /dev/null +++ b/apps/flowlord/cache/sqlite.go @@ -0,0 +1,406 @@ +package cache + +import ( + "database/sql" + _ "embed" + "net/url" + "strings" + "time" + + "github.com/pcelvng/task" + "github.com/pcelvng/task/bus" + "modernc.org/sqlite" +) + +//go:embed schema.sql +var schema string + +type SQLite struct { + db *sql.DB + ttl time.Duration +} + +func NewSQLite(ttl time.Duration, dbPath string) (*SQLite, error) { + if ttl < time.Hour { + ttl = time.Hour + } + + // Register the SQLite driver + sql.Register("sqlite", &sqlite.Driver{}) + + // Open the database + db, err := sql.Open("sqlite", dbPath) + if err != nil { + return nil, err + } + + // Execute the schema + _, err = db.Exec(schema) + if err != nil { + return nil, err + } + + return &SQLite{ + db: db, + ttl: ttl, + }, nil +} + +func (s *SQLite) Add(t task.Task) { + if t.ID == "" { + return + } + + // Start a transaction + tx, err := s.db.Begin() + if err != nil { + return + } + defer tx.Rollback() + + // Check if event exists + var eventExists bool + err = tx.QueryRow("SELECT EXISTS(SELECT 1 FROM events WHERE id = ?)", t.ID).Scan(&eventExists) + if err != nil { + return + } + + // Determine completion status and last update time + completed := t.Result != "" + var lastUpdate time.Time + if completed { + lastUpdate, _ = time.Parse(time.RFC3339, t.Ended) + } else { + lastUpdate, _ = time.Parse(time.RFC3339, t.Created) + } + + // Insert or update event + if !eventExists { + _, err = tx.Exec(` + INSERT INTO events (id, completed, last_update) + VALUES (?, ?, ?) + `, t.ID, completed, lastUpdate) + } else { + _, err = tx.Exec(` + UPDATE events + SET completed = ?, last_update = ? + WHERE id = ? + `, completed, lastUpdate, t.ID) + } + if err != nil { + return + } + + // Insert task log + _, err = tx.Exec(` + INSERT INTO task_log ( + id, type, job, info, result, meta, msg, + created, started, ended, event_id + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + `, + t.ID, t.Type, t.Job, t.Info, t.Result, t.Meta, t.Msg, + t.Created, t.Started, t.Ended, t.ID, + ) + if err != nil { + return + } + + // Commit the transaction + tx.Commit() +} + +func (s *SQLite) Get(id string) TaskJob { + var tj TaskJob + var completed bool + var lastUpdate time.Time + + // Get event info + err := s.db.QueryRow(` + SELECT completed, last_update + FROM events + WHERE id = ? + `, id).Scan(&completed, &lastUpdate) + + if err != nil { + return tj + } + + // Get all task logs for this event + rows, err := s.db.Query(` + SELECT id, type, job, info, result, meta, msg, + created, started, ended + FROM task_log + WHERE event_id = ? + ORDER BY created + `, id) + if err != nil { + return tj + } + defer rows.Close() + + var events []task.Task + for rows.Next() { + var t task.Task + err := rows.Scan( + &t.ID, &t.Type, &t.Job, &t.Info, &t.Result, &t.Meta, &t.Msg, + &t.Created, &t.Started, &t.Ended, + ) + if err != nil { + continue + } + events = append(events, t) + } + + tj = TaskJob{ + LastUpdate: lastUpdate, + Completed: completed, + Events: events, + count: len(events), + } + + return tj +} + +func (s *SQLite) Recycle() Stat { + tasks := make([]task.Task, 0) + t := time.Now() + + // Start a transaction + tx, err := s.db.Begin() + if err != nil { + return Stat{} + } + defer tx.Rollback() + + // Get total count before deletion + var total int + err = tx.QueryRow("SELECT COUNT(*) FROM events").Scan(&total) + if err != nil { + return Stat{} + } + + // Get expired events and their last task log + rows, err := tx.Query(` + SELECT e.id, e.completed, tl.id, tl.type, tl.job, tl.info, tl.result, + tl.meta, tl.msg, tl.created, tl.started, tl.ended + FROM events e + JOIN task_log tl ON e.id = tl.event_id + WHERE e.last_update < ? + AND tl.created = ( + SELECT MAX(created) + FROM task_log + WHERE event_id = e.id + ) + `, t.Add(-s.ttl)) + if err != nil { + return Stat{} + } + defer rows.Close() + + // Process expired events + for rows.Next() { + var ( + eventID string + completed bool + task task.Task + ) + err := rows.Scan( + &eventID, &completed, + &task.ID, &task.Type, &task.Job, &task.Info, &task.Result, + &task.Meta, &task.Msg, &task.Created, &task.Started, &task.Ended, + ) + if err != nil { + continue + } + + if !completed { + tasks = append(tasks, task) + } + + // Delete the event and its task logs + _, err = tx.Exec("DELETE FROM task_log WHERE event_id = ?", eventID) + if err != nil { + continue + } + _, err = tx.Exec("DELETE FROM events WHERE id = ?", eventID) + if err != nil { + continue + } + } + + // Get remaining count + var remaining int + err = tx.QueryRow("SELECT COUNT(*) FROM events").Scan(&remaining) + if err != nil { + return Stat{} + } + + // Commit the transaction + tx.Commit() + + return Stat{ + Count: remaining, + Removed: total - remaining, + ProcessTime: time.Since(t), + Unfinished: tasks, + } +} + +func (s *SQLite) Recap() map[string]*Stats { + data := make(map[string]*Stats) + rows, err := s.db.Query(` + SELECT id, type, job, info, result, meta, msg, + created, started, ended + FROM task_log + `) + if err != nil { + return data + } + defer rows.Close() + + for rows.Next() { + var t task.Task + err := rows.Scan( + &t.ID, &t.Type, &t.Job, &t.Info, &t.Result, &t.Meta, &t.Msg, + &t.Created, &t.Started, &t.Ended, + ) + if err != nil { + continue + } + + job := t.Job + if job == "" { + v, _ := url.ParseQuery(t.Meta) + job = v.Get("job") + } + key := strings.TrimRight(t.Type+":"+job, ":") + stat, found := data[key] + if !found { + stat = &Stats{ + CompletedTimes: make([]time.Time, 0), + ErrorTimes: make([]time.Time, 0), + ExecTimes: &DurationStats{}, + } + data[key] = stat + } + stat.Add(t) + } + + return data +} + +func (s *SQLite) SendFunc(p bus.Producer) func(string, *task.Task) error { + return func(topic string, tsk *task.Task) error { + s.Add(*tsk) + return p.Send(topic, tsk.JSONBytes()) + } +} + +func (s *SQLite) Close() error { + return s.db.Close() +} + +/* +// GetTasks retrieves all tasks with parsed URL and meta information +func (s *SQLite) GetTasks() ([]TaskView, error) { + rows, err := s.db.Query(` + SELECT id, type, job, info, meta, msg, result, + task_seconds, task_time, queue_seconds, queue_time, + created, started, ended + FROM tasks + `) + if err != nil { + return nil, err + } + defer rows.Close() + + var tasks []TaskView + for rows.Next() { + var t TaskView + var createdStr, startedStr, endedStr string + + err := rows.Scan( + &t.ID, &t.Type, &t.Job, &t.Info, &t.Meta, &t.Msg, &t.Result, + &t.TaskSeconds, &t.TaskTime, &t.QueueSeconds, &t.QueueTime, + &createdStr, &startedStr, &endedStr, + ) + if err != nil { + continue + } + + // Parse timestamps + t.Created, _ = time.Parse(time.RFC3339, createdStr) + t.Started, _ = time.Parse(time.RFC3339, startedStr) + t.Ended, _ = time.Parse(time.RFC3339, endedStr) + + // Parse URL if present + if t.Info != "" { + t.ParsedURL, _ = url.Parse(t.Info) + } + + // Parse meta parameters + if t.Meta != "" { + v, err := url.ParseQuery(t.Meta) + if err == nil { + for _, val := range v { + if len(val) > 0 { + t.ParsedParam = val[0] + break + } + } + } + } + + tasks = append(tasks, t) + } + + return tasks, nil +} + +// GetTaskByID retrieves a single task by ID with parsed URL and meta information +func (s *SQLite) GetTaskByID(id string) (*TaskView, error) { + row := s.db.QueryRow(` + SELECT id, type, job, info, meta, msg, result, + task_seconds, task_time, queue_seconds, queue_time, + created, started, ended + FROM tasks + WHERE id = ? + `, id) + + var t TaskView + var createdStr, startedStr, endedStr string + + err := row.Scan( + &t.ID, &t.Type, &t.Job, &t.Info, &t.Meta, &t.Msg, &t.Result, + &t.TaskSeconds, &t.TaskTime, &t.QueueSeconds, &t.QueueTime, + &createdStr, &startedStr, &endedStr, + ) + if err != nil { + return nil, err + } + + // Parse timestamps + t.Created, _ = time.Parse(time.RFC3339, createdStr) + t.Started, _ = time.Parse(time.RFC3339, startedStr) + t.Ended, _ = time.Parse(time.RFC3339, endedStr) + + // Parse URL if present + if t.Info != "" { + t.ParsedURL, _ = url.Parse(t.Info) + } + + // Parse meta parameters + if t.Meta != "" { + v, err := url.ParseQuery(t.Meta) + if err == nil { + for _, val := range v { + if len(val) > 0 { + t.ParsedParam = val[0] + break + } + } + } + } + + return &t, nil +} */ diff --git a/apps/flowlord/handler.go b/apps/flowlord/handler.go index b35ce85..ddfba06 100644 --- a/apps/flowlord/handler.go +++ b/apps/flowlord/handler.go @@ -1,8 +1,11 @@ package main import ( + "bytes" "encoding/json" "errors" + "fmt" + "html/template" "io" "log" "net/http" @@ -14,6 +17,7 @@ import ( "github.com/jbsmith7741/uri" + "github.com/pcelvng/task-tools/apps/flowlord/handler" "github.com/pcelvng/task-tools/slack" "github.com/go-chi/chi/v5" @@ -23,6 +27,7 @@ import ( tools "github.com/pcelvng/task-tools" "github.com/pcelvng/task-tools/file" + "github.com/pcelvng/task-tools/tmpl" "github.com/pcelvng/task-tools/workflow" ) @@ -50,6 +55,7 @@ func (tm *taskMaster) StartHandler() { }) router.Get("/task/{id}", tm.taskHandler) router.Get("/recap", tm.recapHandler) + router.Get("/web/alert/{name}", tm.htmlAlert) if tm.port == 0 { log.Println("flowlord router disabled") @@ -286,6 +292,50 @@ func (tm *taskMaster) workflowFiles(w http.ResponseWriter, r *http.Request) { w.Write(b) } +func (tm *taskMaster) htmlAlert(w http.ResponseWriter, r *http.Request) { + name := chi.URLParam(r, "name") + if name == "" { + http.Error(w, "name parameter required", http.StatusBadRequest) + return + } + t := tmpl.InfoTime(name) + reportPath := tmpl.Parse(tm.slack.ReportPath+name, t) + fmt.Println(reportPath) + reader, err := file.NewReader(reportPath, tm.slack.file) + if err != nil { + http.Error(w, reportPath, http.StatusNotFound) + return + } + + scanner := file.NewScanner(reader) + tasks := make([]task.Task, 0, 20) + for scanner.Scan() { + var tsk task.Task + if err := json.Unmarshal(scanner.Bytes(), &tsk); err != nil { + http.Error(w, fmt.Sprintf("unmarshal error: %d %v", scanner.Stats().LineCnt, err.Error()), http.StatusInternalServerError) + } + tasks = append(tasks, tsk) + } + w.WriteHeader(http.StatusOK) + w.Write(alertHTML(tasks)) +} + +// alertHTML will take a list of task and display a html webpage that is easily to digest what is going on. +func alertHTML(tasks []task.Task) []byte { + + tmpl, err := template.New("alert").Parse(handler.AlertTemplate) + if err != nil { + return []byte(err.Error()) + } + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, tasks); err != nil { + return []byte(err.Error()) + } + + return buf.Bytes() +} + type request struct { From string // start To string // end diff --git a/apps/flowlord/handler/alert.tmpl b/apps/flowlord/handler/alert.tmpl new file mode 100644 index 0000000..d9632e1 --- /dev/null +++ b/apps/flowlord/handler/alert.tmpl @@ -0,0 +1,54 @@ + + + + + + +

Task Status Report

+ + + + + + + + + + {{range .}} + + + + + + + + + {{end}} +
Task TypeInfoStatusMessageStartedEnded
{{.Type}}{{.Info}}{{.Result}}{{.Msg}}{{.Started}}{{.Ended}}
+ + \ No newline at end of file diff --git a/apps/flowlord/handler/handler.go b/apps/flowlord/handler/handler.go new file mode 100644 index 0000000..ce77112 --- /dev/null +++ b/apps/flowlord/handler/handler.go @@ -0,0 +1,6 @@ +package handler + +import _ "embed" + +//go:embed alert.tmpl +var AlertTemplate string diff --git a/apps/flowlord/taskmaster.go b/apps/flowlord/taskmaster.go index b64163a..40d683f 100644 --- a/apps/flowlord/taskmaster.go +++ b/apps/flowlord/taskmaster.go @@ -40,7 +40,7 @@ type taskMaster struct { fOpts *file.Options doneTopic string failedTopic string - taskCache *cache.Memory + taskCache *cache.SQLite *workflow.Cache port int cron *cron.Cron @@ -97,11 +97,15 @@ func New(opts *options) *taskMaster { if opts.Slack.MaxFrequency <= opts.Slack.MinFrequency { opts.Slack.MaxFrequency = 16 * opts.Slack.MinFrequency } + db, err := cache.NewSQLite(opts.TaskTTL, "./tasks.db") + if err != nil { + log.Fatal("db init", err) + } opts.Slack.file = opts.File tm := &taskMaster{ initTime: time.Now(), - taskCache: cache.NewMemory(opts.TaskTTL), + taskCache: db, path: opts.Workflow, doneTopic: opts.DoneTopic, failedTopic: opts.FailedTopic, diff --git a/apps/go.mod b/apps/go.mod index e8b4cb7..ccaa143 100644 --- a/apps/go.mod +++ b/apps/go.mod @@ -2,7 +2,7 @@ module github.com/pcelvng/task-tools/apps go 1.23.0 -toolchain go1.23.3 +toolchain go1.24.0 require ( cloud.google.com/go v0.120.0 @@ -31,6 +31,7 @@ require ( github.com/robfig/cron/v3 v3.0.1 github.com/stretchr/testify v1.10.0 google.golang.org/api v0.228.0 + modernc.org/sqlite v1.37.0 ) require ( @@ -60,6 +61,7 @@ require ( github.com/klauspost/compress v1.16.7 // indirect github.com/klauspost/cpuid v1.3.1 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/minio/md5-simd v1.1.0 // indirect github.com/minio/minio-go/v7 v7.0.26 // indirect @@ -67,6 +69,7 @@ require ( github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/ncruces/go-strftime v0.1.9 // indirect github.com/nsqio/go-nsq v1.1.0 // indirect github.com/pierrec/lz4/v4 v4.1.18 // indirect github.com/pkg/errors v0.9.1 // indirect @@ -74,6 +77,7 @@ require ( github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rs/xid v1.2.1 // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect @@ -85,15 +89,15 @@ require ( go.opentelemetry.io/otel/metric v1.35.0 // indirect go.opentelemetry.io/otel/trace v1.35.0 // indirect golang.org/x/crypto v0.36.0 // indirect - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/mod v0.22.0 // indirect + golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect + golang.org/x/mod v0.24.0 // indirect golang.org/x/net v0.38.0 // indirect golang.org/x/oauth2 v0.28.0 // indirect golang.org/x/sync v0.12.0 // indirect golang.org/x/sys v0.31.0 // indirect golang.org/x/text v0.23.0 // indirect golang.org/x/time v0.11.0 // indirect - golang.org/x/tools v0.29.0 // indirect + golang.org/x/tools v0.31.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect google.golang.org/genproto v0.0.0-20250324211829-b45e905df463 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 // indirect @@ -103,4 +107,7 @@ require ( gopkg.in/ini.v1 v1.62.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + modernc.org/libc v1.62.1 // indirect + modernc.org/mathutil v1.7.1 // indirect + modernc.org/memory v1.9.1 // indirect ) diff --git a/apps/go.sum b/apps/go.sum index ea2c9a0..02e0a87 100644 --- a/apps/go.sum +++ b/apps/go.sum @@ -198,6 +198,8 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= +github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= @@ -241,8 +243,6 @@ github.com/jbsmith7741/go-tools v0.4.1/go.mod h1:8v8ffjiI3qOs6epawzxmPB7AOKoNNxZ github.com/jbsmith7741/trial v0.3.1 h1:JZ0/w3lhfH4iacf9R2DnZWtTMa/Uf4O13gnuMLTub/M= github.com/jbsmith7741/trial v0.3.1/go.mod h1:M4FQWUgVpPY2+i53L2nSB0AyPc86kSTIigcr9Q7XQlY= github.com/jbsmith7741/uri v0.4.1/go.mod h1:Ctt8YJ5gCFx5BX/FMFg5VkwuI9buBcvsITIiSMH+TeA= -github.com/jbsmith7741/uri v0.6.0 h1:CXpHG6LnzqvaoKZNTZCDuvrZ1hAx1hg5dtcnsKTsjwE= -github.com/jbsmith7741/uri v0.6.0/go.mod h1:Ctt8YJ5gCFx5BX/FMFg5VkwuI9buBcvsITIiSMH+TeA= github.com/jbsmith7741/uri v0.6.1 h1:RloJXpTe1lXMBSMbyGn+YCBR//aO+OTDUgJ2wH5bQEU= github.com/jbsmith7741/uri v0.6.1/go.mod h1:Ctt8YJ5gCFx5BX/FMFg5VkwuI9buBcvsITIiSMH+TeA= github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= @@ -273,6 +273,8 @@ github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= @@ -292,6 +294,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/nsqio/go-nsq v1.0.8/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY= github.com/nsqio/go-nsq v1.1.0 h1:PQg+xxiUjA7V+TLdXw7nVrJ5Jbl3sN86EhGCQj4+FYE= github.com/nsqio/go-nsq v1.1.0/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY= @@ -317,6 +321,8 @@ github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lne github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -398,8 +404,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -423,8 +429,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= -golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -522,6 +528,7 @@ golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -586,8 +593,8 @@ golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= -golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= +golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= +golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -722,6 +729,30 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +modernc.org/cc/v4 v4.25.2 h1:T2oH7sZdGvTaie0BRNFbIYsabzCxUQg8nLqCdQ2i0ic= +modernc.org/cc/v4 v4.25.2/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= +modernc.org/ccgo/v4 v4.25.1 h1:TFSzPrAGmDsdnhT9X2UrcPMI3N/mJ9/X9ykKXwLhDsU= +modernc.org/ccgo/v4 v4.25.1/go.mod h1:njjuAYiPflywOOrm3B7kCB444ONP5pAVr8PIEoE0uDw= +modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE= +modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ= +modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI= +modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito= +modernc.org/libc v1.62.1 h1:s0+fv5E3FymN8eJVmnk0llBe6rOxCu/DEU+XygRbS8s= +modernc.org/libc v1.62.1/go.mod h1:iXhATfJQLjG3NWy56a6WVU73lWOcdYVxsvwCgoPljuo= +modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= +modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= +modernc.org/memory v1.9.1 h1:V/Z1solwAVmMW1yttq3nDdZPJqV1rM05Ccq6KMSZ34g= +modernc.org/memory v1.9.1/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= +modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8= +modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns= +modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w= +modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE= +modernc.org/sqlite v1.37.0 h1:s1TMe7T3Q3ovQiK2Ouz4Jwh7dw4ZDqbebSDTlSJdfjI= +modernc.org/sqlite v1.37.0/go.mod h1:5YiWv+YviqGMuGw4V+PNplcyaJ5v+vQd7TQOgkACoJM= +modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= +modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= +modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/go.work.sum b/go.work.sum index 1861a5b..b0f6940 100644 --- a/go.work.sum +++ b/go.work.sum @@ -1740,8 +1740,6 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= @@ -1808,8 +1806,6 @@ github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0ua github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -2074,7 +2070,6 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2534,8 +2529,6 @@ modernc.org/tcl v1.13.2 h1:5PQgL/29XkQ9wsEmmNPjzKs+7iPCaYqUJAhzPvQbjDA= modernc.org/tcl v1.13.2/go.mod h1:7CLiGIPo1M8Rv1Mitpv5akc2+8fxUd2y2UzC/MfMzy0= modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= -modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/z v1.5.1 h1:RTNHdsrOpeoSeOF4FbzTo8gBYByaJ5xT7NgZ9ZqRiJM= modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= From cf84a4792e0a13a29a9d44844f64f965702acc00 Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Fri, 18 Apr 2025 15:14:25 -0600 Subject: [PATCH 02/40] fixes: sqlite init - handler recognizes workflow dir without ending slash --- apps/flowlord/cache/sqlite.go | 5 +---- apps/flowlord/handler.go | 14 +++++--------- apps/flowlord/main.go | 1 + apps/flowlord/taskmaster.go | 4 ++++ workflow/workflow.go | 5 +++++ 5 files changed, 16 insertions(+), 13 deletions(-) diff --git a/apps/flowlord/cache/sqlite.go b/apps/flowlord/cache/sqlite.go index a4342c4..e09a665 100644 --- a/apps/flowlord/cache/sqlite.go +++ b/apps/flowlord/cache/sqlite.go @@ -9,7 +9,7 @@ import ( "github.com/pcelvng/task" "github.com/pcelvng/task/bus" - "modernc.org/sqlite" + _ "modernc.org/sqlite" ) //go:embed schema.sql @@ -25,9 +25,6 @@ func NewSQLite(ttl time.Duration, dbPath string) (*SQLite, error) { ttl = time.Hour } - // Register the SQLite driver - sql.Register("sqlite", &sqlite.Driver{}) - // Open the database db, err := sql.Open("sqlite", dbPath) if err != nil { diff --git a/apps/flowlord/handler.go b/apps/flowlord/handler.go index ddfba06..176c1fd 100644 --- a/apps/flowlord/handler.go +++ b/apps/flowlord/handler.go @@ -9,7 +9,6 @@ import ( "io" "log" "net/http" - "path" "path/filepath" "strconv" "strings" @@ -248,13 +247,10 @@ func (tm *taskMaster) workflowFiles(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNoContent) return } - var pth string + pth := tm.path // support directory and single file for workflow path lookup. - if _, f := path.Split(tm.path); f == "" { - pth = tm.path + "/" + fName - } else { - // for single file show the file regardless of the file param - pth = tm.path + if tm.Cache.IsDir() { + pth += "/" + fName } sts, err := file.Stat(pth, tm.fOpts) @@ -264,12 +260,12 @@ func (tm *taskMaster) workflowFiles(w http.ResponseWriter, r *http.Request) { } w.Header().Set("Content-Type", "text/plain") if sts.IsDir { + w.WriteHeader(http.StatusOK) files, _ := file.List(pth, tm.fOpts) for _, f := range files { b, a, _ := strings.Cut(f.Path, tm.path) w.Write([]byte(b + a + "\n")) } - w.WriteHeader(http.StatusOK) return } reader, err := file.NewReader(pth, tm.fOpts) @@ -287,8 +283,8 @@ func (tm *taskMaster) workflowFiles(w http.ResponseWriter, r *http.Request) { case "yaml", "yml": w.Header().Set("Context-Type", "text/x-yaml") } - b, _ := io.ReadAll(reader) w.WriteHeader(http.StatusOK) + b, _ := io.ReadAll(reader) w.Write(b) } diff --git a/apps/flowlord/main.go b/apps/flowlord/main.go index 1d913d2..6ea54eb 100644 --- a/apps/flowlord/main.go +++ b/apps/flowlord/main.go @@ -46,6 +46,7 @@ type options struct { func main() { log.SetFlags(log.Lshortfile | log.Ldate | log.Ltime) + opts := &options{ Refresh: time.Minute * 15, TaskTTL: 4 * time.Hour, diff --git a/apps/flowlord/taskmaster.go b/apps/flowlord/taskmaster.go index 40d683f..38f4145 100644 --- a/apps/flowlord/taskmaster.go +++ b/apps/flowlord/taskmaster.go @@ -406,6 +406,10 @@ func (tm *taskMaster) Process(t *task.Task) error { } return nil } + if t.Result == task.WarnResult { + // do nothing + return nil + } return fmt.Errorf("unknown result %q %s", t.Result, t.JSONString()) } diff --git a/workflow/workflow.go b/workflow/workflow.go index 333c630..826d3e6 100644 --- a/workflow/workflow.go +++ b/workflow/workflow.go @@ -62,6 +62,11 @@ type Cache struct { Workflows map[string]Workflow // the key is the filename for the workflow } +// IsDir returns true if the original workflow path is a folder rather than a file +func (c *Cache) IsDir() bool { + return c.isDir +} + // New returns a Cache used to manage auto updating a workflow func New(path string, opts *file.Options) (*Cache, error) { c := &Cache{ From b4e38fc01d423b3088730f91014648c93c3bbbac Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Thu, 25 Sep 2025 13:09:27 -0600 Subject: [PATCH 03/40] Sqlite-alerts: Convert alert from file storage to sqlite cache (#236) * plan alert switch * add alert table and updates to notifications * fixes and refactoring * update to alerts html * fix TaskTime, remove Task_CreatedAt * fix unit tests --- .circleci/config.yml | 46 +++- apps/flowlord/cache/cache.go | 19 ++ apps/flowlord/cache/schema.sql | 15 +- apps/flowlord/cache/sqlite.go | 143 ++++++++++ apps/flowlord/handler.go | 58 +++-- apps/flowlord/handler/alert.tmpl | 420 +++++++++++++++++++++++++++-- apps/flowlord/handler_test.go | 67 +++++ apps/flowlord/main.go | 2 + apps/flowlord/sqlite_progress.md | 99 +++++++ apps/flowlord/sqlite_spec.md | 435 +++++++++++++++++++++++++++++++ apps/flowlord/taskmaster.go | 165 ++++++------ apps/flowlord/taskmaster_test.go | 10 +- tasks.db | Bin 0 -> 65536 bytes 13 files changed, 1332 insertions(+), 147 deletions(-) create mode 100644 apps/flowlord/sqlite_progress.md create mode 100644 apps/flowlord/sqlite_spec.md create mode 100644 tasks.db diff --git a/.circleci/config.yml b/.circleci/config.yml index d9d1089..95b3ffa 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,10 +1,14 @@ -version: 2 +version: 2.1 -jobs: - build: +executors: + go-executor: docker: - image: cimg/go:1.23 - working_directory: ~/task-tools + working_directory: ~/task-tools + +jobs: + test: + executor: go-executor steps: - checkout - run: go install github.com/jstemmer/go-junit-report/v2@latest @@ -23,4 +27,36 @@ jobs: - store_artifacts: path: ~/task-tools/junit - store_artifacts: - path: tests.out \ No newline at end of file + path: tests.out + + build: + executor: go-executor + steps: + - checkout + - setup_remote_docker: + docker_layer_caching: false + - run: + name: "docker login" + command: echo ${DOCKERHUB_TOKEN} | docker login -u ${DOCKERHUB_USERNAME} --password-stdin + - run: + name: "Push Docker Image" + command: make docker + +workflows: + version: 2 + test_and_build: + jobs: + - test: + filters: + tags: + only: + - /.*/ + - build: + requires: + - test + context: + - DOCKER + filters: + tags: + only: + - /.*/ \ No newline at end of file diff --git a/apps/flowlord/cache/cache.go b/apps/flowlord/cache/cache.go index 39d2cc2..596f33d 100644 --- a/apps/flowlord/cache/cache.go +++ b/apps/flowlord/cache/cache.go @@ -10,6 +10,25 @@ import ( "github.com/pcelvng/task/bus" ) +// AlertRecord represents an alert stored in the database +type AlertRecord struct { + ID int64 `json:"id"` + TaskID string `json:"task_id"` + TaskTime time.Time `json:"task_time"` + Type string `json:"type"` + Job string `json:"job"` + Msg string `json:"msg"` + CreatedAt time.Time `json:"created_at"` + //TaskCreated time.Time `json:"task_created"` +} + +// SummaryLine represents a grouped alert summary for dashboard display +type SummaryLine struct { + Key string `json:"key"` // "task.type:job" + Count int `json:"count"` // number of alerts + TimeRange string `json:"time_range"` // formatted time range +} + type Cache interface { Add(task.Task) Get(id string) TaskJob diff --git a/apps/flowlord/cache/schema.sql b/apps/flowlord/cache/schema.sql index cbbf1a9..fecfce4 100644 --- a/apps/flowlord/cache/schema.sql +++ b/apps/flowlord/cache/schema.sql @@ -57,4 +57,17 @@ SELECT task_log.created, task_log.started, task_log.ended -FROM task_log; \ No newline at end of file +FROM task_log; + +-- Alert records table for storing alert events +CREATE TABLE IF NOT EXISTS alert_records ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + task_id TEXT, -- task ID (can be empty for job send failures) + task_time TIMESTAMP, -- task time (can be empty) + task_type TEXT NOT NULL, -- task type for quick filtering + job TEXT, -- task job for quick filtering + msg TEXT NOT NULL, -- alert message (contains alert context) + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +CREATE INDEX IF NOT EXISTS idx_alert_records_created_at ON alert_records (created_at); \ No newline at end of file diff --git a/apps/flowlord/cache/sqlite.go b/apps/flowlord/cache/sqlite.go index e09a665..be22ef0 100644 --- a/apps/flowlord/cache/sqlite.go +++ b/apps/flowlord/cache/sqlite.go @@ -4,12 +4,16 @@ import ( "database/sql" _ "embed" "net/url" + "sort" "strings" + "sync" "time" "github.com/pcelvng/task" "github.com/pcelvng/task/bus" _ "modernc.org/sqlite" + + "github.com/pcelvng/task-tools/tmpl" ) //go:embed schema.sql @@ -18,6 +22,7 @@ var schema string type SQLite struct { db *sql.DB ttl time.Duration + mu sync.Mutex } func NewSQLite(ttl time.Duration, dbPath string) (*SQLite, error) { @@ -48,6 +53,9 @@ func (s *SQLite) Add(t task.Task) { return } + s.mu.Lock() + defer s.mu.Unlock() + // Start a transaction tx, err := s.db.Begin() if err != nil { @@ -107,6 +115,9 @@ func (s *SQLite) Add(t task.Task) { } func (s *SQLite) Get(id string) TaskJob { + s.mu.Lock() + defer s.mu.Unlock() + var tj TaskJob var completed bool var lastUpdate time.Time @@ -159,6 +170,9 @@ func (s *SQLite) Get(id string) TaskJob { } func (s *SQLite) Recycle() Stat { + s.mu.Lock() + defer s.mu.Unlock() + tasks := make([]task.Task, 0) t := time.Now() @@ -244,6 +258,9 @@ func (s *SQLite) Recycle() Stat { } func (s *SQLite) Recap() map[string]*Stats { + s.mu.Lock() + defer s.mu.Unlock() + data := make(map[string]*Stats) rows, err := s.db.Query(` SELECT id, type, job, info, result, meta, msg, @@ -297,6 +314,132 @@ func (s *SQLite) Close() error { return s.db.Close() } +// AddAlert stores an alert record in the database +func (s *SQLite) AddAlert(t task.Task, message string) error { + s.mu.Lock() + defer s.mu.Unlock() + + // Allow empty task ID for job send failures - store as "unknown" + taskID := t.ID + if taskID == "" { + taskID = "unknown" + } + + // Extract job using helper function + job := extractJobFromTask(t) + + // Get task time using tmpl.TaskTime function + taskTime := tmpl.TaskTime(t) + + _, err := s.db.Exec(` + INSERT INTO alert_records (task_id, task_time, task_type, job, msg) + VALUES (?, ?, ?, ?, ?) + `, taskID, taskTime, t.Type, job, message) + + return err +} + +// extractJobFromTask is a helper function to get job from task +func extractJobFromTask(t task.Task) string { + job := t.Job + if job == "" { + if meta, err := url.ParseQuery(t.Meta); err == nil { + job = meta.Get("job") + } + } + return job +} + +// GetAlertsByDate retrieves all alerts for a specific date +func (s *SQLite) GetAlertsByDate(date time.Time) ([]AlertRecord, error) { + s.mu.Lock() + defer s.mu.Unlock() + + dateStr := date.Format("2006-01-02") + + query := `SELECT id, task_id, task_time, task_type, job, msg, created_at + FROM alert_records + WHERE DATE(created_at) = ? + ORDER BY created_at DESC` + + rows, err := s.db.Query(query, dateStr) + if err != nil { + return nil, err + } + defer rows.Close() + + var alerts []AlertRecord + for rows.Next() { + var alert AlertRecord + err := rows.Scan( + &alert.ID, &alert.TaskID, &alert.TaskTime, &alert.Type, + &alert.Job, &alert.Msg, &alert.CreatedAt, + ) + if err != nil { + continue + } + alerts = append(alerts, alert) + } + + return alerts, nil +} + +// BuildCompactSummary processes alerts in memory to create compact summary +// Groups by TaskType:Job and collects task times for proper date formatting +func BuildCompactSummary(alerts []AlertRecord) []SummaryLine { + groups := make(map[string]*summaryGroup) + + for _, alert := range alerts { + key := alert.Type + if alert.Job != "" { + key += ":" + alert.Job + } + + // Extract TaskTime from alert meta (not TaskCreated) + + if summary, exists := groups[key]; exists { + summary.Count++ + summary.TaskTimes = append(summary.TaskTimes, alert.TaskTime) + } else { + groups[key] = &summaryGroup{ + Key: key, + Count: 1, + TaskTimes: []time.Time{alert.TaskTime}, + } + } + } + + // Convert map to slice and format time ranges using tmpl.PrintDates + var result []SummaryLine + for _, summary := range groups { + // Use tmpl.PrintDates for consistent formatting with existing Slack notifications + timeRange := tmpl.PrintDates(summary.TaskTimes) + + result = append(result, SummaryLine{ + Key: summary.Key, + Count: summary.Count, + TimeRange: timeRange, + }) + } + + // Use proper sorting (can be replaced with slices.Sort in Go 1.21+) + sort.Slice(result, func(i, j int) bool { + if result[i].Count != result[j].Count { + return result[i].Count > result[j].Count // Sort by count descending + } + return result[i].Key < result[j].Key // Then by key ascending + }) + + return result +} + +// summaryGroup is used internally for building compact summaries +type summaryGroup struct { + Key string + Count int + TaskTimes []time.Time +} + /* // GetTasks retrieves all tasks with parsed URL and meta information func (s *SQLite) GetTasks() ([]TaskView, error) { diff --git a/apps/flowlord/handler.go b/apps/flowlord/handler.go index 176c1fd..5d6c93a 100644 --- a/apps/flowlord/handler.go +++ b/apps/flowlord/handler.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/json" "errors" - "fmt" "html/template" "io" "log" @@ -16,17 +15,17 @@ import ( "github.com/jbsmith7741/uri" - "github.com/pcelvng/task-tools/apps/flowlord/handler" - "github.com/pcelvng/task-tools/slack" - "github.com/go-chi/chi/v5" gtools "github.com/jbsmith7741/go-tools" "github.com/jbsmith7741/go-tools/appenderr" "github.com/pcelvng/task" + "github.com/pcelvng/task-tools/apps/flowlord/cache" + "github.com/pcelvng/task-tools/apps/flowlord/handler" + "github.com/pcelvng/task-tools/slack" + tools "github.com/pcelvng/task-tools" "github.com/pcelvng/task-tools/file" - "github.com/pcelvng/task-tools/tmpl" "github.com/pcelvng/task-tools/workflow" ) @@ -54,7 +53,7 @@ func (tm *taskMaster) StartHandler() { }) router.Get("/task/{id}", tm.taskHandler) router.Get("/recap", tm.recapHandler) - router.Get("/web/alert/{name}", tm.htmlAlert) + router.Get("/web/alert", tm.htmlAlert) if tm.port == 0 { log.Println("flowlord router disabled") @@ -289,35 +288,39 @@ func (tm *taskMaster) workflowFiles(w http.ResponseWriter, r *http.Request) { } func (tm *taskMaster) htmlAlert(w http.ResponseWriter, r *http.Request) { - name := chi.URLParam(r, "name") - if name == "" { - http.Error(w, "name parameter required", http.StatusBadRequest) - return + + dt, _ := time.Parse("2006-01-02", r.URL.Query().Get("date")) + if dt.IsZero() { + dt = time.Now() } - t := tmpl.InfoTime(name) - reportPath := tmpl.Parse(tm.slack.ReportPath+name, t) - fmt.Println(reportPath) - reader, err := file.NewReader(reportPath, tm.slack.file) + alerts, err := tm.taskCache.GetAlertsByDate(dt) if err != nil { - http.Error(w, reportPath, http.StatusNotFound) + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte(err.Error())) return } - scanner := file.NewScanner(reader) - tasks := make([]task.Task, 0, 20) - for scanner.Scan() { - var tsk task.Task - if err := json.Unmarshal(scanner.Bytes(), &tsk); err != nil { - http.Error(w, fmt.Sprintf("unmarshal error: %d %v", scanner.Stats().LineCnt, err.Error()), http.StatusInternalServerError) - } - tasks = append(tasks, tsk) - } w.WriteHeader(http.StatusOK) - w.Write(alertHTML(tasks)) + w.Header().Set("Content-Type", "text/html") + w.Write(alertHTML(alerts)) +} + +// AlertData holds both the alerts and summary data for the template +type AlertData struct { + Alerts []cache.AlertRecord + Summary []cache.SummaryLine } // alertHTML will take a list of task and display a html webpage that is easily to digest what is going on. -func alertHTML(tasks []task.Task) []byte { +func alertHTML(tasks []cache.AlertRecord) []byte { + // Generate summary data using BuildCompactSummary + summary := cache.BuildCompactSummary(tasks) + + // Create data structure for template + data := AlertData{ + Alerts: tasks, + Summary: summary, + } tmpl, err := template.New("alert").Parse(handler.AlertTemplate) if err != nil { @@ -325,10 +328,9 @@ func alertHTML(tasks []task.Task) []byte { } var buf bytes.Buffer - if err := tmpl.Execute(&buf, tasks); err != nil { + if err := tmpl.Execute(&buf, data); err != nil { return []byte(err.Error()) } - return buf.Bytes() } diff --git a/apps/flowlord/handler/alert.tmpl b/apps/flowlord/handler/alert.tmpl index d9632e1..9a8c70f 100644 --- a/apps/flowlord/handler/alert.tmpl +++ b/apps/flowlord/handler/alert.tmpl @@ -1,54 +1,418 @@ + + Task Status Report -

Task Status Report

- - - - - - - - - - {{range .}} - - - - - - - - - {{end}} -
Task TypeInfoStatusMessageStartedEnded
{{.Type}}{{.Info}}{{.Result}}{{.Msg}}{{.Started}}{{.Ended}}
+
+
+

Task Status Report

+
+ + + +
+
+
+

Alert Summary

+
+ {{range .Summary}} +
+
+ {{.Key}} + {{.Count}} alerts +
+
{{.TimeRange}}
+
+ {{end}} +
+
+
+ + + + + + + + + + + + + {{range .Alerts}} + + + + + + + + + {{end}} + +
IDTask TypeJobMessageAlerted AtTask Time
{{.TaskID}}{{.Type}}{{.Job}}{{.Msg}}{{.CreatedAt.Format "2006-01-02T15:04:05Z"}}{{if .TaskTime.IsZero}}N/A{{else}}{{.TaskTime.Format "2006-01-02T15"}}{{end}}
+
+
+ Total Alerts: {{len .Alerts}} +
+
+ + + \ No newline at end of file diff --git a/apps/flowlord/handler_test.go b/apps/flowlord/handler_test.go index 12c4bb7..4d81f63 100644 --- a/apps/flowlord/handler_test.go +++ b/apps/flowlord/handler_test.go @@ -2,12 +2,14 @@ package main import ( "errors" + "os" "testing" "time" "github.com/hydronica/trial" "github.com/pcelvng/task" + "github.com/pcelvng/task-tools/apps/flowlord/cache" "github.com/pcelvng/task-tools/workflow" ) @@ -330,3 +332,68 @@ func TestMeta_UnmarshalJSON(t *testing.T) { } trial.New(fn, cases).SubTest(t) } + +// TestWebAlertPreview generates an HTML preview of the alert template for visual inspection +func TestWebAlertPreview(t *testing.T) { + // Create sample alert data to showcase the templating + sampleAlerts := []cache.AlertRecord{ + { + TaskID: "task-001", + TaskTime: trial.TimeHour("2024-01-15T11"), + Type: "data-validation", + Job: "quality-check", + Msg: "Validation failed: missing required field 'email'", + CreatedAt: trial.Time(time.RFC3339, "2024-01-15T11:15:00Z"), + }, + { + TaskID: "task-002", + TaskTime: trial.TimeHour("2024-01-15T12"), + Type: "data-validation", + Job: "quality-check", + Msg: "Validation failed: missing required field 'email'", + CreatedAt: trial.Time(time.RFC3339, "2024-01-15T12:15:00Z"), + }, + { + TaskID: "task-003", + TaskTime: trial.TimeHour("2024-01-15T11"), + Type: "file-transfer", + Job: "backup", + Msg: "File transfer completed: 1.2GB transferred", + CreatedAt: trial.Time(time.RFC3339, "2024-01-15T12:00:00Z"), + }, + { + TaskID: "task-004", + TaskTime: trial.TimeHour("2024-01-15T13"), + Type: "database-sync", + Job: "replication", + Msg: "Database sync failed: connection timeout", + CreatedAt: trial.Time(time.RFC3339, "2024-01-15T13:30:00Z"), + }, + { + TaskID: "task-005", + TaskTime: trial.TimeHour("2024-01-15T13"), + Type: "notification", + Job: "email-alert", + Msg: "Email notification sent to 150 users", + CreatedAt: trial.Time(time.RFC3339, "2024-01-15T14:00:00Z"), + }, + } + + // Generate HTML using the alertHTML function + htmlContent := alertHTML(sampleAlerts) + + // Write HTML to a file for easy viewing + outputFile := "alert_preview.html" + err := os.WriteFile(outputFile, htmlContent, 0644) + if err != nil { + t.Fatalf("Failed to write HTML file: %v", err) + } + + t.Logf("Alert preview generated and saved to: ./%s", outputFile) + + // Basic validation that HTML was generated + if len(htmlContent) == 0 { + t.Error("Generated HTML content is empty") + } + +} diff --git a/apps/flowlord/main.go b/apps/flowlord/main.go index 6ea54eb..2ca1769 100644 --- a/apps/flowlord/main.go +++ b/apps/flowlord/main.go @@ -39,6 +39,7 @@ type options struct { FileTopic string `toml:"file_topic" comment:"file topic for file watching"` FailedTopic string `toml:"failed_topic" comment:"all retry failures published to this topic default is retry-failed, disable with '-'"` Port int `toml:"status_port"` + Host string `toml:"host" comment:"host address of server "` Slack *Notification `toml:"slack"` Bus bus.Options `toml:"bus"` File *file.Options `toml:"file"` @@ -51,6 +52,7 @@ func main() { Refresh: time.Minute * 15, TaskTTL: 4 * time.Hour, DoneTopic: "done", + Host: "localhost", FailedTopic: "retry-failed", File: file.NewOptions(), Slack: &Notification{}, diff --git a/apps/flowlord/sqlite_progress.md b/apps/flowlord/sqlite_progress.md new file mode 100644 index 0000000..de7df4b --- /dev/null +++ b/apps/flowlord/sqlite_progress.md @@ -0,0 +1,99 @@ +# SQLite Migration Progress Tracker + +## Current State Analysis +- ✅ Basic SQLite task caching exists (`events`, `task_log` tables) +- ✅ Memory-based workflow management via `workflow.Cache` +- ✅ File pattern matching with in-memory rules +- ✅ Alert system using channels and Slack notifications +- ❌ No persistent storage for workflows, file patterns, or alert history + +## Implementation Status + +### ✅ Completed: Enhanced Specification Document +**Goal**: Design complete SQLite schema for all data requirements +- Created comprehensive technical specification +- Defined all required tables and relationships +- Documented implementation approach + +### ⏳ Pending: Alert Records System +**Goal**: Replace current channel-based alerts with persistent storage +- Store alert events with timestamps, task references, and severity +- Link alerts to time-based dashboard views +- Track alert frequency and backoff history +- Replace current channel-based system with persistent storage + +### ⏳ Pending: Workflow Phase Storage +**Goal**: Replace in-memory workflow maps with SQLite tables +- Store workflow files, phases, and their configurations +- Enable dependency mapping and validation queries +- Support dynamic workflow updates without application restarts +- Provide configuration issue detection + +### ⏳ Pending: File Topic Message History +**Goal**: Log all file topic messages with matching results +- Log all file topic messages with metadata +- Track pattern matching results and associated phases +- Store file processing timestamps and outcomes +- Enable file processing analytics and debugging + +### ⏳ Pending: Task.Done Message Recording +**Goal**: Enhanced tracking of task.Done message processing +- Enhanced tracking of task.Done message processing +- Record phase matches and triggered child tasks +- Maintain task relationship chains for debugging +- Link to workflow execution flow + +### ⏳ Pending: Task Record Optimization +**Goal**: Redesign task storage for optimal querying and deduplication +- Redesign task storage for optimal querying +- Implement task deduplication logic +- Optimize for tracking by type, job, creation, result, uniqueID +- Consolidate to single table design with proper indexing + +### ⏳ Pending: Backup and Restoration System +**Goal**: Automated database backup and recovery +- Automated GCS backup during application shutdown +- Periodic backup scheduling (hourly/daily configurable) +- Restoration logic comparing local vs GCS timestamps +- Database schema migration support +- Backup verification and integrity checks + +### ⏳ Pending: Retention and Size Management +**Goal**: Database maintenance and optimization +- Configurable retention periods per table type +- Automated cleanup jobs for expired data +- Table size monitoring and alerting +- REST API endpoints for database metrics +- Storage optimization strategies + +### ⏳ Pending: REST API Development +**Goal**: Create API endpoints for UI and external access +- `/api/metrics` - Database size and performance metrics +- `/api/tasks` - Task search and filtering +- `/api/alerts` - Alert history and management +- `/api/files` - File processing history +- `/api/workflows` - Workflow configuration and status +- `/api/summary` - Dashboard summary data + +### ⏳ Pending: Web UI Dashboard Components +**Goal**: Build comprehensive web interface + +**Components**: +- Summary Status - Task breakdowns by type/job with statistics +- Alert Management - Alert timeline with filtering and analysis +- File Processing Dashboard - Searchable file processing history +- Workflow Visualization - Interactive dependency graphs +- Task Search and Management - Advanced task search and filtering + +## Next Actions +1. **Start with Alert Records System** - Most foundational component +2. **Design alert_records table schema** +3. **Modify alert handling in taskmaster.go** +4. **Add persistence to notification system** +5. **Create basic API endpoints for alert data** + +## Development Notes +- Maintain backward compatibility during migration +- Use prepared statements and proper indexing +- Test each milestone thoroughly before proceeding +- Update this progress document after each completed milestone diff --git a/apps/flowlord/sqlite_spec.md b/apps/flowlord/sqlite_spec.md new file mode 100644 index 0000000..eb76a8e --- /dev/null +++ b/apps/flowlord/sqlite_spec.md @@ -0,0 +1,435 @@ +# SQLite Technical Specification +Purpose: Technical specification for converting flowlord to fully utilize SQLite for troubleshooting, historical records and configuration management. + +## Database Schema Design + +### Alert Records +Store individual alert records immediately when tasks are sent to the alert channel. Replace file-based reporting with database storage. + +```sql +CREATE TABLE alert_records ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + task_id TEXT NOT NULL, + alert_type TEXT NOT NULL, -- 'retry_failed', 'alerted', 'unfinished', 'job_send_failed' + task_type TEXT NOT NULL, -- task type for quick filtering + job TEXT, -- task job for quick filtering + msg TEXT, -- alert message (can be task_msg or custom alert message) + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + task_created TIMESTAMP -- keep for alert timeline context +); + +CREATE INDEX idx_alert_records_created_at ON alert_records (created_at); + +-- Example queries for alert management + +-- Startup check and day-based dashboard - get today's alerts +SELECT * FROM alert_records +WHERE date(created_at) = date('now') +ORDER BY created_at DESC; + +-- Get alerts for specific date (for dashboard) +SELECT * FROM alert_records +WHERE date(created_at) = ? +ORDER BY created_at DESC; + +-- Get full task details for an alert (when needed) +SELECT ar.*, t.* FROM alert_records ar +JOIN task_log t ON ar.task_id = t.id +WHERE ar.id = ?; + +-- Raw data for compact summary (grouping done in Go) +-- This provides the data for: "task.file-check: 3 2025/09/19T11-2025/09/19T13" +SELECT task_type, job, task_created, created_at +FROM alert_records +WHERE date(created_at) = date('now') +ORDER BY task_type, job, created_at; +``` + +### Workflow Phase Storage +Store loaded workflow files and phase configurations for dependency mapping and validation. + +```sql +CREATE TABLE workflow_files ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + file_path TEXT UNIQUE NOT NULL, + file_hash TEXT NOT NULL, + loaded_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + last_modified TIMESTAMP, + is_active BOOLEAN DEFAULT TRUE +); + +CREATE TABLE workflow_phases ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + workflow_file_id INTEGER NOT NULL, + task_name TEXT NOT NULL, + job_name TEXT, + depends_on TEXT, + rule TEXT, + template TEXT, + retry_count INTEGER DEFAULT 0, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY (workflow_file_id) REFERENCES workflow_files(id), + UNIQUE(workflow_file_id, task_name, job_name) +); + +CREATE TABLE workflow_dependencies ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + parent_phase_id INTEGER NOT NULL, + child_phase_id INTEGER NOT NULL, + dependency_type TEXT DEFAULT 'direct', -- 'direct', 'conditional' + FOREIGN KEY (parent_phase_id) REFERENCES workflow_phases(id), + FOREIGN KEY (child_phase_id) REFERENCES workflow_phases(id), + UNIQUE(parent_phase_id, child_phase_id) +); + +CREATE INDEX idx_workflow_phases_task ON workflow_phases (task_name, job_name); +CREATE INDEX idx_workflow_dependencies_parent ON workflow_dependencies (parent_phase_id); +CREATE INDEX idx_workflow_dependencies_child ON workflow_dependencies (child_phase_id); +``` + +### File Topic Message History +Log every message that comes through the files topic with pattern matching results. + +```sql +CREATE TABLE file_messages ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + file_path TEXT NOT NULL, + file_size INTEGER, + file_modified_at TIMESTAMP, + received_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + bucket_name TEXT, + etag TEXT, + md5_hash TEXT, + match_found BOOLEAN DEFAULT FALSE, + processing_time_ms INTEGER +); + +CREATE TABLE file_pattern_matches ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + file_message_id INTEGER NOT NULL, + workflow_phase_id INTEGER NOT NULL, + pattern TEXT NOT NULL, + task_sent BOOLEAN DEFAULT FALSE, + task_id TEXT, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY (file_message_id) REFERENCES file_messages(id), + FOREIGN KEY (workflow_phase_id) REFERENCES workflow_phases(id) +); + +CREATE INDEX idx_file_messages_path ON file_messages (file_path); +CREATE INDEX idx_file_messages_received ON file_messages (received_at); +CREATE INDEX idx_file_pattern_matches_file ON file_pattern_matches (file_message_id); +``` + +### Enhanced Task Recording +Redesign task storage for optimal querying, deduplication, and system tracking. + +```sql +-- Modify existing events table to include deduplication +ALTER TABLE events ADD COLUMN task_hash TEXT; +ALTER TABLE events ADD COLUMN first_seen TIMESTAMP; +ALTER TABLE events ADD COLUMN last_seen TIMESTAMP; + +-- Enhanced task_log with better indexing and deduplication support +CREATE TABLE task_execution_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + event_id TEXT NOT NULL, + execution_sequence INTEGER NOT NULL, -- For tracking task progression + type TEXT NOT NULL, + job TEXT, + info TEXT, + result TEXT, + meta TEXT, + msg TEXT, + created TIMESTAMP, + started TIMESTAMP, + ended TIMESTAMP, + workflow_file TEXT, + phase_matched BOOLEAN DEFAULT FALSE, + children_triggered INTEGER DEFAULT 0, + retry_count INTEGER DEFAULT 0, + is_duplicate BOOLEAN DEFAULT FALSE, + FOREIGN KEY (event_id) REFERENCES events(id) +); + +CREATE INDEX idx_task_execution_type_job ON task_execution_log (type, job); +CREATE INDEX idx_task_execution_created ON task_execution_log (created); +CREATE INDEX idx_task_execution_result ON task_execution_log (result); +CREATE INDEX idx_task_execution_event_id ON task_execution_log (event_id); +CREATE INDEX idx_task_execution_workflow ON task_execution_log (workflow_file); +``` + +### Task Relationships and Dependencies +Track task.Done message processing and child task triggering. + +```sql +CREATE TABLE task_relationships ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + parent_task_id TEXT NOT NULL, + child_task_id TEXT NOT NULL, + relationship_type TEXT DEFAULT 'triggered', -- 'triggered', 'retry', 'failed_retry' + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + workflow_phase_id INTEGER, + FOREIGN KEY (parent_task_id) REFERENCES events(id), + FOREIGN KEY (child_task_id) REFERENCES events(id), + FOREIGN KEY (workflow_phase_id) REFERENCES workflow_phases(id) +); + +CREATE INDEX idx_task_relationships_parent ON task_relationships (parent_task_id); +CREATE INDEX idx_task_relationships_child ON task_relationships (child_task_id); +``` + +## Database Maintenance + +### Backup and Restoration +```sql +-- Create metadata table for backup tracking +CREATE TABLE backup_metadata ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + backup_type TEXT NOT NULL, -- 'scheduled', 'shutdown', 'manual' + backup_path TEXT NOT NULL, + backup_size INTEGER, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + verified BOOLEAN DEFAULT FALSE, + gcs_path TEXT +); +``` + +**Implementation Requirements**: +- Automated GCS backup during application shutdown +- Periodic backup scheduling (configurable intervals) +- Restoration logic comparing local vs GCS timestamps +- Database schema migration support +- Backup verification and integrity checks + +### Retention and Size Management +```sql +-- Create retention policies table +CREATE TABLE retention_policies ( + table_name TEXT PRIMARY KEY, + retention_days INTEGER NOT NULL, + max_records INTEGER, + cleanup_enabled BOOLEAN DEFAULT TRUE, + last_cleanup TIMESTAMP +); + +-- Default retention policies +INSERT INTO retention_policies (table_name, retention_days, max_records) VALUES +('alert_records', 90, 100000), +('file_messages', 30, 50000), +('file_pattern_matches', 30, 50000), +('task_execution_log', 7, 500000), +('task_relationships', 7, 100000); +``` + +## API Endpoints Specification + +### Required REST Endpoints +``` +GET /api/metrics - Database size and performance metrics +GET /api/tasks - Task search and filtering +POST /api/tasks/search - Advanced task search +GET /api/alerts - Alert history and management +GET /api/files - File processing history +GET /api/workflows - Workflow configuration and status +GET /api/summary - Dashboard summary data +DELETE /api/cleanup - Manual cleanup operations +POST /api/backup - Manual backup trigger +``` + +### Response Formats +```json +// GET /api/summary response +{ + "time_period": "24h", + "task_summary": { + "total": 1500, + "completed": 1200, + "failed": 50, + "in_progress": 250 + }, + "by_type": { + "data-load": {"completed": 400, "failed": 10, "avg_duration": "2m30s"}, + "transform": {"completed": 800, "failed": 40, "avg_duration": "5m15s"} + } +} + +// GET /api/alerts response +{ + "alerts": [ + { + "id": 123, + "task_id": "abc-123", + "alert_type": "retry_exhausted", + "severity": "critical", + "message": "Task failed after 3 retries", + "created_at": "2023-12-01T10:30:00Z", + "dashboard_link": "/dashboard/alerts/2023-12-01" + } + ], + "pagination": {"page": 1, "total": 150} +} +``` + +## UI Component Specifications + +### Summary Status Dashboard +- Time-based filtering (hour/day/week/month) +- Task breakdown by type and job with completion statistics +- Average execution time calculations and trends +- Error rate visualization and alerting thresholds + +### Alert Management Interface + +#### Alert Dashboard Design +Replace the current file-based alert reporting with a comprehensive web dashboard for alert management and analysis. + +#### Dashboard Components + +**1. Alert Summary View (Compact)** +``` +Flowlord Alerts - Today (2025/09/19) +════════════════════════════════════════════════════════════ +task.file-check: 3 2025/09/19T11-2025/09/19T13 +task.salesforce: 9 2025/09/19T08-2025/09/19T15 +task.data-load: 1 2025/09/19T14 +════════════════════════════════════════════════════════════ +Total: 13 alerts across 3 task types +``` + +**Implementation:** +- Use simple SQL: `SELECT * FROM alert_records WHERE date(created_at) = date('now')` +- Process in Go to create compact summary (replaces current Slack formatting logic) +- Group by `task_type:job`, count occurrences, calculate time ranges +- Display both count and time span (first alert → last alert) + +**2. Detailed Alert Timeline** +- Chronological list of all alerts for the selected day +- Expandable rows showing full task details (via JOIN with task_log) +- Click-through to individual task execution details +- Color coding by alert_type: retry_exhausted (red), alert_result (orange), unfinished (yellow) + +**3. Alert Filtering and Navigation** +- Date picker for viewing historical alerts +- Filter by alert_type, task_type, job +- Quick links: Today, Yesterday, Last 7 days +- Search by task_id or message content + +#### Dashboard Implementation + +**Simple HTML Rendering Approach:** +- Go queries SQLite directly and renders HTML via templates +- No JSON APIs needed for dashboard UI +- Vanilla JavaScript only for basic interactions (date picker, auto-refresh) +- Leverage existing `handler/alert.tmpl` pattern + +**Page Structure:** +```html +/alerts - Today's alert dashboard (default) +/alerts/2025-09-19 - Specific date dashboard +/alerts/summary - Compact summary view only +``` + +**Implementation Pattern:** +```go +func (tm *taskMaster) alertDashboard(w http.ResponseWriter, r *http.Request) { + date := chi.URLParam(r, "date") // or default to today + alerts := tm.taskCache.GetAlertsByDate(date) // single query + summary := buildCompactSummary(alerts) // process in memory, same logic as current Slack formatting + + data := struct { + Date string + Alerts []AlertRecord + Summary []SummaryLine // processed summary data + Total int + }{date, alerts, summary, len(alerts)} + + tmpl.Execute(w, data) // render HTML directly +} + +// Process alerts in memory to create compact summary +func buildCompactSummary(alerts []AlertRecord) []SummaryLine { + groups := make(map[string]*SummaryLine) + for _, alert := range alerts { + key := alert.TaskType + ":" + alert.Job + if summary, exists := groups[key]; exists { + summary.Count++ + summary.updateTimeRange(alert.TaskCreated) + } else { + groups[key] = &SummaryLine{ + Key: key, Count: 1, + FirstTime: alert.TaskCreated, + LastTime: alert.TaskCreated, + } + } + } + return mapToSlice(groups) // convert to sorted slice +} +``` + +**Optional API Endpoint (for troubleshooting only):** +``` +GET /api/alerts?date=YYYY-MM-DD&format=json - JSON output for debugging/scripts +``` + +**Features:** +- Server-side rendering for fast page loads +- Simple date navigation via URL parameters +- Auto-refresh via basic JavaScript setInterval +- Minimal client-side complexity + +#### Startup Alert Integration + +**On Application Start:** +1. Query today's alerts: `SELECT * FROM alert_records WHERE date(created_at) = date('now')` +2. If alerts found, generate compact summary in existing Slack format +3. Send single startup notification: "Found X alerts from today" + summary + dashboard link +4. Dashboard link: `/alerts` (always points to today) + +**Benefits:** +- Replaces file-based alert reports entirely +- Real-time alert visibility via web dashboard +- Maintains familiar compact summary format for Slack +- Historical alert analysis and trending +- Better debugging with full task context +- Mobile-friendly alert monitoring + +### File Processing Dashboard +- Searchable file processing history with pattern match results +- File processing timeline and status indicators +- Pattern rule debugging and performance metrics +- File processing success/failure analytics + +### Workflow Visualization +- Interactive dependency graphs showing phase relationships +- Phase configuration validation and issue highlighting +- Next scheduled execution times and cron schedules +- Workflow performance analytics and bottleneck identification + +### Task Search and Management +- Advanced search by type, job, result, uniqueID, time range +- Task lifecycle tracking and execution history +- Retry and failure analysis with root cause identification +- Bulk operations for task management + +## Technical Architecture + +### Database Design Principles +- Single SQLite file for simplicity and performance +- Optimized indexes for common query patterns +- Prepared statements for security and performance +- Batch operations for high-throughput scenarios +- Foreign key constraints for data integrity + +### Backward Compatibility Strategy +- Maintain existing Cache interface during migration +- Gradual replacement of memory storage with SQLite +- Preserve current API contracts and behavior +- Seamless transition without downtime + +### Performance Considerations +- Connection pooling and prepared statement caching +- Asynchronous operations for non-critical writes +- Query optimization with proper indexing strategy +- WAL mode for better concurrent access +- Vacuum and analyze operations for maintenance \ No newline at end of file diff --git a/apps/flowlord/taskmaster.go b/apps/flowlord/taskmaster.go index 38f4145..a86cc9c 100644 --- a/apps/flowlord/taskmaster.go +++ b/apps/flowlord/taskmaster.go @@ -42,17 +42,18 @@ type taskMaster struct { failedTopic string taskCache *cache.SQLite *workflow.Cache - port int - cron *cron.Cron - slack *Notification - files []fileRule + HostName string + port int + cron *cron.Cron + slack *Notification + files []fileRule alerts chan task.Task } type Notification struct { slack.Slack - ReportPath string + //ReportPath string MinFrequency time.Duration MaxFrequency time.Duration @@ -113,6 +114,7 @@ func New(opts *options) *taskMaster { producer: producer, doneConsumer: consumer, port: opts.Port, + HostName: opts.Host, cron: cron.New(cron.WithSeconds()), dur: opts.Refresh, slack: opts.Slack, @@ -188,7 +190,7 @@ func (tm *taskMaster) Run(ctx context.Context) (err error) { return fmt.Errorf("workflow setup %w", err) } - // refresh the workflow if the file(s) have been changed + // check for alerts from today on startup // refresh the workflow if the file(s) have been changed _, err = tm.refreshCache() if err != nil { log.Fatal(err) @@ -210,7 +212,7 @@ func (tm *taskMaster) Run(ctx context.Context) (err error) { go tm.readFiles(ctx) go tm.StartHandler() - go tm.slack.handleNotifications(tm.alerts, ctx) + go tm.handleNotifications(tm.alerts, ctx) <-ctx.Done() log.Println("shutting down") return nil @@ -290,7 +292,12 @@ func (tm *taskMaster) Process(t *task.Task) error { meta, _ := url.ParseQuery(t.Meta) tm.taskCache.Add(*t) // attempt to retry - if t.Result == task.ErrResult { + switch t.Result { + case task.WarnResult: + return nil // do nothing + case task.AlertResult: + tm.alerts <- *t + case task.ErrResult: p := tm.Get(*t) rules, _ := url.ParseQuery(p.Rule) @@ -322,35 +329,28 @@ func (tm *taskMaster) Process(t *task.Task) error { } }() return nil - } else { // send to the retry failed topic if retries > p.Retry - meta.Set("retry", "failed") - meta.Set("retried", strconv.Itoa(p.Retry)) - t.Meta = meta.Encode() - if tm.failedTopic != "-" && tm.failedTopic != "" { - tm.taskCache.Add(*t) - if err := tm.producer.Send(tm.failedTopic, t.JSONBytes()); err != nil { - return err - } - } - - // don't alert if slack isn't enabled or disabled in phase - if tm.slack == nil || rules.Get("no_alert") != "" { - return nil + } + // send to the retry failed topic if retries > p.Retry + meta.Set("retry", "failed") + meta.Set("retried", strconv.Itoa(p.Retry)) + t.Meta = meta.Encode() + if tm.failedTopic != "-" && tm.failedTopic != "" { + tm.taskCache.Add(*t) + if err := tm.producer.Send(tm.failedTopic, t.JSONBytes()); err != nil { + return err } - tm.alerts <- *t } - return nil - } - - if t.Result == task.AlertResult && tm.slack != nil { - if tm.slack != nil { - tm.alerts <- *t + // don't alert if slack isn't enabled or disabled in phase + if rules.Get("no_alert") != "" { + return nil } - } - // start off any children tasks - if t.Result == task.CompleteResult { + tm.alerts <- *t + + return nil + case task.CompleteResult: + // start off any children tasks taskTime := tmpl.TaskTime(*t) phases := tm.Children(*t) for _, p := range phases { @@ -405,10 +405,7 @@ func (tm *taskMaster) Process(t *task.Task) error { log.Printf("no matches found for %v:%v", t.Type, t.Job) } return nil - } - if t.Result == task.WarnResult { - // do nothing - return nil + } return fmt.Errorf("unknown result %q %s", t.Result, t.JSONString()) } @@ -478,23 +475,29 @@ func (tm *taskMaster) readFiles(ctx context.Context) { // It uses an exponential backoff to limit the number of messages // ie, (min) 5 -> 10 -> 20 -> 40 -> 80 -> 160 (max) // The backoff is cleared after no failed tasks occur within the window -func (n *Notification) handleNotifications(taskChan chan task.Task, ctx context.Context) { +func (tm *taskMaster) handleNotifications(taskChan chan task.Task, ctx context.Context) { sendChan := make(chan struct{}) - tasks := make([]task.Task, 0) + var alerts []cache.AlertRecord go func() { - dur := n.MinFrequency - for { - if len(tasks) > 0 { + dur := tm.slack.MinFrequency + for ; ; time.Sleep(dur) { + var err error + alerts, err = tm.taskCache.GetAlertsByDate(time.Now()) + if err != nil { + log.Printf("failed to retrieve alerts: %v", err) + continue + } + + if len(alerts) > 0 { sendChan <- struct{}{} - if dur *= 2; dur > n.MaxFrequency { - dur = n.MaxFrequency + if dur *= 2; dur > tm.slack.MaxFrequency { + dur = tm.slack.MaxFrequency } log.Println("wait time ", dur) - } else if dur != n.MinFrequency { - dur = n.MinFrequency + } else if dur != tm.slack.MinFrequency { + dur = tm.slack.MinFrequency log.Println("Reset ", dur) } - time.Sleep(dur) } }() for { @@ -503,47 +506,19 @@ func (n *Notification) handleNotifications(taskChan chan task.Task, ctx context. // if the task result is an alert result, send a slack notification now if tsk.Result == task.AlertResult { b, _ := json.MarshalIndent(tsk, "", " ") - if err := n.Slack.Notify(string(b), slack.Critical); err != nil { + if err := tm.slack.Slack.Notify(string(b), slack.Critical); err != nil { log.Println(err) } } else { // if the task result is not an alert result add to the tasks list summary - tasks = append(tasks, tsk) + if err := tm.taskCache.AddAlert(tsk, tsk.Msg); err != nil { + log.Printf("failed to store alert: %v", err) + } } case <-sendChan: // prepare message - m := make(map[string]*alertStat) // [task:job]message - fPath := tmpl.Parse(n.ReportPath, time.Now()) - writer, err := file.NewWriter(fPath, n.file) - if err != nil { + if err := tm.sendAlertSummary(alerts); err != nil { log.Println(err) } - for _, tsk := range tasks { - writer.WriteLine(tsk.JSONBytes()) - - meta, _ := url.ParseQuery(tsk.Meta) - key := tsk.Type + ":" + meta.Get("job") - v, found := m[key] - if !found { - v = &alertStat{key: key, times: make([]time.Time, 0)} - m[key] = v - } - v.count++ - v.times = append(v.times, tmpl.TaskTime(tsk)) - } - - var s string - for k, v := range m { - s += fmt.Sprintf("%-35s%5d %v\n", k, v.count, tmpl.PrintDates(v.times)) - } - if err := writer.Close(); err == nil && fPath != "" { - s += "see report at " + fPath - } - fmt.Println(s) - if err := n.Slack.Notify(s, slack.Critical); err != nil { - log.Println(err) - } - - tasks = tasks[0:0] // reset slice case <-ctx.Done(): return } @@ -551,10 +526,34 @@ func (n *Notification) handleNotifications(taskChan chan task.Task, ctx context. } -type alertStat struct { - key string - count int - times []time.Time +// sendAlertSummary sends a formatted alert summary to Slack +// This can be reused by backup alert system and other components +func (tm *taskMaster) sendAlertSummary(alerts []cache.AlertRecord) error { + if len(alerts) == 0 { + return nil + } + + // build compact summary using existing logic + summary := cache.BuildCompactSummary(alerts) + + // format message similar to current Slack format + var message strings.Builder + message.WriteString(fmt.Sprintf("see report at %v:%d/web/alert?dt=%s\n", tm.HostName, tm.port, time.Now().Format("2006-01-02"))) + + for _, line := range summary { + message.WriteString(fmt.Sprintf("%-35s%5d %s\n", + line.Key+":", line.Count, line.TimeRange)) + } + + // send to Slack if configured + log.Println(message.String()) + if tm.slack != nil { + if err := tm.slack.Notify(message.String(), slack.Critical); err != nil { + return fmt.Errorf("failed to send alert summary to Slack: %w", err) + } + } + + return nil } // jitterPercent will return a time.Duration representing extra diff --git a/apps/flowlord/taskmaster_test.go b/apps/flowlord/taskmaster_test.go index 50db832..9e91457 100644 --- a/apps/flowlord/taskmaster_test.go +++ b/apps/flowlord/taskmaster_test.go @@ -15,6 +15,7 @@ import ( "github.com/pcelvng/task/bus/nop" "github.com/robfig/cron/v3" + "github.com/pcelvng/task-tools/apps/flowlord/cache" "github.com/pcelvng/task-tools/workflow" ) @@ -22,7 +23,7 @@ const base_test_path string = "../../internal/test/" func TestTaskMaster_Process(t *testing.T) { delayRegex := regexp.MustCompile(`delayed=(\d+.\d+)`) - cache, fatalErr := workflow.New(base_test_path+"workflow", nil) + workflowCache, fatalErr := workflow.New(base_test_path+"workflow", nil) if fatalErr != nil { t.Fatal("cache init", fatalErr) } @@ -32,7 +33,12 @@ func TestTaskMaster_Process(t *testing.T) { } fn := func(tsk task.Task) ([]task.Task, error) { var alerts int64 - tm := taskMaster{doneConsumer: consumer, Cache: cache, failedTopic: "failed-topic", alerts: make(chan task.Task), slack: &Notification{}} + // Initialize taskCache for the test + taskCache, err := cache.NewSQLite(time.Hour, ":memory:") + if err != nil { + return nil, err + } + tm := taskMaster{doneConsumer: consumer, Cache: workflowCache, taskCache: taskCache, failedTopic: "failed-topic", alerts: make(chan task.Task), slack: &Notification{}} producer, _ := nop.NewProducer("") tm.producer = producer nop.FakeMsg = tsk.JSONBytes() diff --git a/tasks.db b/tasks.db new file mode 100644 index 0000000000000000000000000000000000000000..7f3afac85e3d3b7e4f9bf6d20a2917aabca4db34 GIT binary patch literal 65536 zcmeHQOKclSdTv^8SyazrY}2wVyJe3kDRztfWRpY)uf`+E&e+zh9vO|1h0u>`scn+Y z>2BKN^(uia%?1HBL9hve1lSw`OfHKBW;Z|%lS_~V76`By1i>Oe275^kiy(&}w@rXt z@>jDTBu!dvkL?}X)s)y>e^vce_0`{1fBme-`Zumw9%4+}sj40$Qg2YH6!mq6p(v^h z|8KxQ4gdY{KN{TO=3lAs;W76zweX$!H2uHSSl@RkdX@g!_+0v*(s$C?u`kB<23PyP z*?$x~$VCt!2vB=tLz$)7)PqT@W}uHOb23wF0U;u zZY(iZZ@ja##XK$=vwDMRi^|N!Jm=p#I+(d|A@zZ$Y8B*uSb=t7+eIHXP)+Z29u1P* zSbY2164McOuBKMeMTj}SZ;uXTW@l5sKjrgmOS;W5+QzTVikzy#cYWFWuO<=!f)F_x3gf-&oq(xQL@h7t1o(lezj1lv}Ma z8e&kj?%l(!A9Ksr7#GzH##F6J!$IzR%+0f^h}pP$eQAAT@%noK(~z6**~YIm=Dj+^ zgq$!pR^fm1+BJwB#l)UC?GGE4ew#6^iiaGlw&N#^^YdsCa9P6WI7F}5Rd?r+WSclZ zY#(2yid&qa^Hi(m26XI1A9>9}#NrW`pLu6#d2#dF26K6HZ4F@Gj*uCh|L%p6 zq2rm%@Shj&Sm-WB+x5SK`u**zOYi%2;4+Krqw7o8mM(8FG0qixXTdUBsu2@BK&gWS zCGVhEHPddApnU7iKFzR^TbuQmUG)xP>Z;?S?S@lf%tlT3EW37*xr=5i6{d!e!FV()M>ao9pwT!Y*f4`-OFl4Z&aWWXbQ5?a8z6r zoL?6jiJIa1nZCTZzA-mkv@Y}W%x^G)1Ydcs z`L(znzIyYF>iQ`|yWpo5Wo)^rKlra#lq(2)e84 z-T*PeEn8CQ2_>Yv`~*$Kq4MJ=+)IF(xq8+T^b9ior0!9?ayXQg?lQCuF*f?x30nJ$OhM2|ACaeiKro zU|6XMiOgIWdH&s@!OYB5N^j>4Jx3UJxI0_itG00j!u*ZF!OZMbYNwqU?wLYvL*2RI zAnoihW&Ztv!3;ZeSyR!^JwQ~)t{QdrJjDudWEI}~1C~vNFr>!1o3F|E4U>;Z?8I=#F5Hc`IGep+3jue+tY$i6tnN=-9tC_OD{EU&IDU0u21Yn!ojF>6c9OQ8H*URr1TNba0v%#Y50 zD>amvm`FWX^-*g2G{29uA=IR`5US75n3!wV&mD}iD10tXkfxpA3xQa>135j2t4@)E>T43m`#hQby_v&br{ zpz20mG(;ptY-G$vk~`Vxz=q>wRnknhAn-gZ2}O<7cu8Yr-q3W>Fp()5?Q9;+&FPU9 zgD;Al26>QFRR%U9&lY$Av7A&C6;mt-MKjKg)U{C5zl^MiillG_B(r>8FR~I63|7Of@+dac z%E$_@0wV#~X!#!=-bHI1W z0kDCBC^C{cs83cG@&K!*aI9vCItwKz6paED5;7NxoXBwbPCQ2GPhtH3J&OL`_;2~2 zpN{{O&NM%rQcm0i0fGQQfFM8+AP5iy2m%BFf&f8)AV3hHsJXt)R1yow82xGLY!VyC zQ5c&3Dv1>Xk6}MgVy75^fS)9>L<~c~`$=pKLlAH+i8Wym0?sEH^befwE2nxH7VyvZ z-$=s6uZ#q?f&TQs?^6AJZ&K;gqdy+{yzkAvH;=3mX(k9fB?1RqC+W?p>4(QN4|4mL zWLe%!BCUAd^^Rj1_`Z#*Fai5Mp6OHaV0t#5?Ydo8z1_V_o?Q;;_FmR)Xt$8=1(d+G zmyM#p>=<<$Pc{nUguEh;y{!KT)2`dY9PG}Gm!~EV(&>ZAw46?td%cY~yR7Ne@pPxE zTDxQ**RyK=jNGyZ&kMR;&0_XA`{fNyvt9Pe(sY!zk-TUE;lc!QOh z#FOPks+(R&Ld7&@|sZKktWNM#G#_hfjOAZ=LJnJnu;nSPEwI1<^@9q z`yo7`qNZ{vi}PF-_-x&=JzKXcWhi~N;UcHZ3!?7WwQ^)^mbjukWm)>r+Z~S z>*~A6XjIU)5BrWgBQDMe%kb9~8K8&Qz$2go-vK`G1arq+xW!3=gXdVzkdQ1XDr@kH z#7eph26;2Du!bxc9LIyblv5yNFFY4+xppnir)89Th0NZyo!e%`zFQ9RwBXs*N;5my z_`4Are{c4ZAc{J&8oe&F_;pFrgMPJM7kEAKd$flf$9CFfAiu#2S*WFQ{9;EU(G-q* zWxU)188MVqyS8Jq&;}SjnhnQl&fRBx{=xIPUuXLe7WiBVmTWk#2YK>#VgCJ273Qtt z$<&)`*BDq|QnNiKSX%-2id`#3l8S+8{`hk?s{^Dt zeHu-WG^ZUp;a9gg9Zmm&>QDcKq7Uey@pSqpNj^_GHYCvq0t5kq06~BtKoB4Z5CjMU z1Ob8oLEuXvaI#NMb^Fa>Zahl~kp-^B;yeo;6Wu(rU0C@Bl4CSO{d*Kcj*Zvnz~dhy z4=MJT29Hm>c`)GdXOV{($2bcfd)++FfJeMgLEH%E^xvEUk9S*QL>{NXBQCiRr!fg0 zamiaePQ@jMr5TZBQn@)n|7AHj|b^QFX< zL?#Fj1PB5I0fGQQfFM8+AP5iy2m%Cw7lAxpN)S;|LpOf7a^!54T1ncfFM8+ zAP5iy2m%BFf&f8)AV3g!eGwSzgCi4yHMeu)|3;k(7eda0BON-jt-+4bb!tEKIR}nA zp<@~xR_I{B@jId8EI3v}#~EbLU6t z^He{bIYNI9d;a|e{Y8?`>nnkzwSd$e!TA4|iwUdQOaZdwhRUgzj9D4?|LRW=RI%{VRt&gIy77Mx1# zJOj?TDfl{n8otiN9+Ti~$-$|X98A#vLCwV>PXg8xu`>hC(G&1BaJ(hXK+E%-Rl!lP zG2j>;{kJ4@el&fA>L2?hMSp$lm*c-j?~eUR{7Eap=NIX=53nE?L4Y7Y5FiK;1PB5I z0fGQQfFM8+coqmG-AH~GhA#6-wnfc8ZcFuJQdv0-jA0 zkQx6z^{XUX)1QTapC<`;1_FMPWDol(2$*QI`_m9G(PsCP5O6-puK1@OS2GjzH>ppO zY#)9S0^Uy&FarVCk{p?L0s_t_33&W*1sI?|rj7>rdR#a>pZ|W66A|zTel1BrJj$O> zBFN{{5OQSjPbs=Ketzs<$CgL`Z{$bAXz1raA{Rk`AV3fx2oMBbC;|_Eb7X|tT$!5Q zpA|#REcuzrT>F{I*ihbnw#X}Zq(IwI!w=};VSw*KdQ(iID?|$nWrl;3irdlam^*0N zjhex5jBPRnIQ*c3T*kB=IOI4u)LP!)icLpbjviha3M%xHm_#dy`Qd|)JF;|mh0aX7 zbJwq~uV1~fGGD5zj*FO=^T3F%P!2{=CK*>mQ2Ix66DV7bO#tC^v9(!5;5l>_77G@7Qj_v4>V@WRyAo>0a4XfuWJTYdg2iihZ|SwQD;zTU9M%!LzHC&e_MW*ksk_@u}e{scpcO zL_D|nW;a?AAvxGRHeQ~ZJV>VxCev~{UCtK@dQnhJMG*2rA+O1%SVXGe@65uQUfqLT zTVel}O9pa1tEPGu)T9T`3%Xs+;#$X2Ro-z+xtvvVt({%ZWgS$v9SFU{Hr!m@s-uck zLpcN8$pzeU%@8hYxt@d6YA&xp9*PD+ii*@?-YjZ5Zj#{q+;$n-|jilbHu!+r<52l6jz5j$5{cPTFME)q9$%--ag3t=lyh>}|BwhMfpY35Cs-HuDBX0l2K5R`2 z<{7m`;0Vv-PD)EsLHp~+Nv*zewfeC-K>Hd;=3wT*1iY+74`OI>G^2V(xIlQC)cEdrIujj0Zm#dNphP-=MY+w3ff;kO=|UVn)FLqs|Q{(qXE z9LV?|e)ntr@!_jG{tt$EeQoe`jUy}Wco|8&1hsJTdr_qzC z^f<loZ*7^-r3qv63w3S*;+#ZzxEEHBG!Os&e6_8NB76y~*iiKC+_} f4a6`ooo1#(Xh{ycIk=W3;`BsjCL&IcX=nZ)Un0|W literal 0 HcmV?d00001 From 3cffcb0ec673ad7d7298135935174f669b722cd3 Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Thu, 25 Sep 2025 15:27:18 -0600 Subject: [PATCH 04/40] make db configurable --- apps/flowlord/main.go | 2 ++ apps/flowlord/taskmaster.go | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/apps/flowlord/main.go b/apps/flowlord/main.go index 2ca1769..ba7a64d 100644 --- a/apps/flowlord/main.go +++ b/apps/flowlord/main.go @@ -35,6 +35,7 @@ type options struct { Workflow string `toml:"workflow" comment:"path to workflow file or directory"` Refresh time.Duration `toml:"refresh" comment:"the workflow changes refresh duration value default is 15 min"` TaskTTL time.Duration `toml:"task-ttl" comment:"time that tasks are expected to have completed in. This values tells the cache how long to keep track of items and alerts if items haven't completed when the cache is cleared"` + DBPath string `toml:"db-path" comment:"path to the sqlite DB file"` DoneTopic string `toml:"done_topic" comment:"default is done"` FileTopic string `toml:"file_topic" comment:"file topic for file watching"` FailedTopic string `toml:"failed_topic" comment:"all retry failures published to this topic default is retry-failed, disable with '-'"` @@ -53,6 +54,7 @@ func main() { TaskTTL: 4 * time.Hour, DoneTopic: "done", Host: "localhost", + DBPath: "./tasks.db", FailedTopic: "retry-failed", File: file.NewOptions(), Slack: &Notification{}, diff --git a/apps/flowlord/taskmaster.go b/apps/flowlord/taskmaster.go index a86cc9c..9c8a98b 100644 --- a/apps/flowlord/taskmaster.go +++ b/apps/flowlord/taskmaster.go @@ -98,7 +98,7 @@ func New(opts *options) *taskMaster { if opts.Slack.MaxFrequency <= opts.Slack.MinFrequency { opts.Slack.MaxFrequency = 16 * opts.Slack.MinFrequency } - db, err := cache.NewSQLite(opts.TaskTTL, "./tasks.db") + db, err := cache.NewSQLite(opts.TaskTTL, opts.DBPath) if err != nil { log.Fatal("db init", err) } @@ -546,7 +546,7 @@ func (tm *taskMaster) sendAlertSummary(alerts []cache.AlertRecord) error { } // send to Slack if configured - log.Println(message.String()) + log.Println(message.String()) if tm.slack != nil { if err := tm.slack.Notify(message.String(), slack.Critical); err != nil { return fmt.Errorf("failed to send alert summary to Slack: %w", err) From d2e891409d59637ce55c6dfcaec628980f5e8b40 Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Fri, 26 Sep 2025 13:32:19 -0600 Subject: [PATCH 05/40] update plan --- apps/flowlord/files.go | 3 +- apps/flowlord/handler.go | 20 +-- apps/flowlord/sqlite_spec.md | 147 ++++++++++++--- testing-framework.md | 334 +++++++++++++++++++++++++++++++++++ 4 files changed, 468 insertions(+), 36 deletions(-) create mode 100644 testing-framework.md diff --git a/apps/flowlord/files.go b/apps/flowlord/files.go index bd6d5de..b83d781 100644 --- a/apps/flowlord/files.go +++ b/apps/flowlord/files.go @@ -9,6 +9,7 @@ import ( "time" "github.com/pcelvng/task" + "github.com/pcelvng/task-tools/file/stat" "github.com/pcelvng/task-tools/tmpl" "github.com/pcelvng/task-tools/workflow" @@ -106,13 +107,13 @@ func (tm *taskMaster) matchFile(sts stat.Stats) error { meta.Set("file", sts.Path) meta.Set("filename", filepath.Base(sts.Path)) meta.Set("workflow", f.workflowFile) - // todo: add job if provided in task name ex -> task:job // populate the info string info := tmpl.Parse(f.Template, t) info, _ = tmpl.Meta(info, meta) tsk := task.New(f.Topic(), info) + tsk.Job = f.Job() tsk.Meta, _ = url.QueryUnescape(meta.Encode()) if err := tm.producer.Send(tsk.Type, tsk.JSONBytes()); err != nil { diff --git a/apps/flowlord/handler.go b/apps/flowlord/handler.go index 5d6c93a..f406332 100644 --- a/apps/flowlord/handler.go +++ b/apps/flowlord/handler.go @@ -1,10 +1,8 @@ package main import ( - "bytes" "encoding/json" "errors" - "html/template" "io" "log" "net/http" @@ -317,21 +315,19 @@ func alertHTML(tasks []cache.AlertRecord) []byte { summary := cache.BuildCompactSummary(tasks) // Create data structure for template - data := AlertData{ - Alerts: tasks, - Summary: summary, + data := map[string]interface{}{ + "Alerts": tasks, + "Summary": summary, } - tmpl, err := template.New("alert").Parse(handler.AlertTemplate) + // Use the new template registry + registry := handler.NewRegistry() + html, err := registry.ExecuteTemplate("alert", data) if err != nil { return []byte(err.Error()) } - - var buf bytes.Buffer - if err := tmpl.Execute(&buf, data); err != nil { - return []byte(err.Error()) - } - return buf.Bytes() + + return html } type request struct { diff --git a/apps/flowlord/sqlite_spec.md b/apps/flowlord/sqlite_spec.md index eb76a8e..cc427f6 100644 --- a/apps/flowlord/sqlite_spec.md +++ b/apps/flowlord/sqlite_spec.md @@ -93,32 +93,133 @@ Log every message that comes through the files topic with pattern matching resul ```sql CREATE TABLE file_messages ( id INTEGER PRIMARY KEY AUTOINCREMENT, - file_path TEXT NOT NULL, - file_size INTEGER, - file_modified_at TIMESTAMP, - received_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - bucket_name TEXT, - etag TEXT, - md5_hash TEXT, - match_found BOOLEAN DEFAULT FALSE, - processing_time_ms INTEGER + path TEXT NOT NULL, -- File path (e.g., "gs://bucket/path/file.json") + size INTEGER, -- File size in bytes + last_modified TIMESTAMP, -- When file was modified (from file system/GCS metadata) + received_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- When the record was received (time.Now()) + task_time TIMESTAMP, -- Time extracted from path using tmpl.PathTime(sts.Path) + task_ids TEXT, -- JSON array of task IDs (null if no matches) + task_names TEXT -- JSON array of task names (type:job format, null if no matches) ); -CREATE TABLE file_pattern_matches ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - file_message_id INTEGER NOT NULL, - workflow_phase_id INTEGER NOT NULL, - pattern TEXT NOT NULL, - task_sent BOOLEAN DEFAULT FALSE, - task_id TEXT, - created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - FOREIGN KEY (file_message_id) REFERENCES file_messages(id), - FOREIGN KEY (workflow_phase_id) REFERENCES workflow_phases(id) -); - -CREATE INDEX idx_file_messages_path ON file_messages (file_path); +-- Indexes for efficient querying +CREATE INDEX idx_file_messages_path ON file_messages (path); CREATE INDEX idx_file_messages_received ON file_messages (received_at); -CREATE INDEX idx_file_pattern_matches_file ON file_pattern_matches (file_message_id); + +-- Example queries for file message history + +-- Get all files processed in the last 24 hours +SELECT path, size, last_modified, received_at, task_time, task_ids, task_names +FROM file_messages +WHERE received_at >= datetime('now', '-1 day') +ORDER BY received_at DESC; + +-- Get files that have matching tasks (with task names for quick reference) +SELECT path, size, task_time, task_ids, task_names +FROM file_messages +WHERE task_ids IS NOT NULL +ORDER BY received_at DESC; + +-- Get files that didn't match any patterns (for debugging) +SELECT path, size, received_at, task_time +FROM file_messages +WHERE task_ids IS NULL +ORDER BY received_at DESC; + +-- JOIN files with their matching tasks (returns multiple rows per file if multiple tasks) +-- This is the key query for getting file + task details +SELECT + fm.id as file_id, + fm.path, + fm.task_time, + fm.received_at, + json_extract(t.value, '$') as task_id, + tl.type as task_type, + tl.job as task_job, + tl.result as task_result, + tl.created as task_created, + tl.started as task_started, + tl.ended as task_ended +FROM file_messages fm, + json_each(fm.task_ids) as t +JOIN task_log tl ON json_extract(t.value, '$') = tl.id +WHERE fm.task_ids IS NOT NULL +ORDER BY fm.received_at DESC, fm.id; + +-- Get files that created specific task types +SELECT DISTINCT + fm.path, + fm.task_time, + fm.received_at, + COUNT(*) as task_count +FROM file_messages fm, + json_each(fm.task_ids) as t +JOIN task_log tl ON json_extract(t.value, '$') = tl.id +WHERE tl.type = 'data-load' + AND fm.received_at >= datetime('now', '-1 day') +GROUP BY fm.id, fm.path, fm.task_time, fm.received_at +ORDER BY fm.received_at DESC; + +-- Get file processing statistics by time period +SELECT + date(received_at) as date, + COUNT(*) as total_files, + SUM(CASE WHEN task_ids IS NOT NULL THEN 1 ELSE 0 END) as matched_files, + SUM(CASE WHEN task_ids IS NULL THEN 1 ELSE 0 END) as unmatched_files, + SUM(CASE + WHEN task_ids IS NOT NULL + THEN json_array_length(task_ids) + ELSE 0 + END) as total_tasks_created +FROM file_messages +WHERE received_at >= datetime('now', '-7 days') +GROUP BY date(received_at) +ORDER BY date DESC; + +-- Find files by specific task time range +SELECT path, size, task_time, task_ids, task_names +FROM file_messages +WHERE task_time >= datetime('now', '-1 day') + AND task_time < datetime('now') +ORDER BY task_time DESC; + +-- Get files with their task names (without joining task_log table) +-- This is useful for quick overview without needing task details +SELECT + fm.path, + fm.task_time, + fm.received_at, + json_extract(t.value, '$') as task_id, + json_extract(n.value, '$') as task_name +FROM file_messages fm, + json_each(fm.task_ids) as t, + json_each(fm.task_names) as n +WHERE fm.task_ids IS NOT NULL + AND json_each.key = json_each.key -- Ensures same index for both arrays +ORDER BY fm.received_at DESC; + +-- Find files that created specific task types by name pattern +SELECT DISTINCT + fm.path, + fm.task_time, + fm.received_at, + fm.task_names +FROM file_messages fm +WHERE fm.task_names IS NOT NULL + AND fm.task_names LIKE '%data-load%' + AND fm.received_at >= datetime('now', '-1 day') +ORDER BY fm.received_at DESC; + +-- Count files by task type (using task names) +SELECT + json_extract(n.value, '$') as task_type, + COUNT(DISTINCT fm.id) as file_count +FROM file_messages fm, + json_each(fm.task_names) as n +WHERE fm.task_names IS NOT NULL + AND fm.received_at >= datetime('now', '-7 days') +GROUP BY json_extract(n.value, '$') +ORDER BY file_count DESC; ``` ### Enhanced Task Recording diff --git a/testing-framework.md b/testing-framework.md new file mode 100644 index 0000000..d65bce1 --- /dev/null +++ b/testing-framework.md @@ -0,0 +1,334 @@ +# Testing Framework Documentation + +This document outlines the testing patterns and frameworks used in the task-tools repository, focusing on unit test patterns, the hydronica/trial framework, and conversion strategies for testify-based tests. + +## Overview + +The repository uses a consistent testing approach with the following key components: + +1. **hydronica/trial** - Primary testing framework for table-driven tests +2. **Go's built-in testing** - Standard Go testing patterns +3. **testify/assert** - Legacy testing assertions (to be converted) +4. **Example functions** - Documentation-style tests + +## Testing Patterns + +### 1. hydronica/trial Framework + +The `hydronica/trial` framework is the primary testing tool used throughout the repository. It provides a clean, type-safe way to write table-driven tests. + +#### Basic Pattern + +```go +func TestFunctionName(t *testing.T) { + fn := func(input InputType) (OutputType, error) { + // Test logic here + return result, err + } + + cases := trial.Cases[InputType, OutputType]{ + "test case name": { + Input: inputValue, + Expected: expectedValue, + }, + "error case": { + Input: errorInput, + ShouldErr: true, + }, + } + + trial.New(fn, cases).Test(t) +} +``` + +#### Advanced Features + +**Comparers**: Custom comparison logic for complex types +```go +trial.New(fn, cases).Comparer(trial.Contains).Test(t) +trial.New(fn, cases).Comparer(trial.EqualOpt(trial.IgnoreAllUnexported)).Test(t) +``` + +**SubTests**: For complex test scenarios +```go +trial.New(fn, cases).SubTest(t) +``` + +**Timeouts**: For tests that might hang +```go +trial.New(fn, cases).Timeout(time.Second).SubTest(t) +``` + +#### Time Handling + +The repository uses trial's time utilities instead of literal `time.Date()` calls: + +```go +// Preferred (using trial utilities) +trial.TimeDay("2023-01-01") +trial.TimeHour("2023-01-01T12") +trial.Time(time.RFC3339, "2023-01-01T00:00:00Z") + +// Avoid (literal time.Date calls) +time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC) +``` + +### 2. Standard Go Testing + +For simple tests that don't require table-driven patterns: + +```go +func TestSimpleFunction(t *testing.T) { + result := functionUnderTest() + if result != expected { + t.Errorf("got %v, expected %v", result, expected) + } +} +``` + +### 3. Example Functions + +Used for documentation and demonstrating API usage: + +```go +func ExampleFunctionName() { + // Example code + fmt.Println("output") + + // Output: + // output +} +``` + +Example functions are found in files like `file/writebyhour_test.go` and serve as both tests and documentation. + +### 4. TestMain Pattern + +For setup and teardown across multiple tests: + +```go +func TestMain(m *testing.M) { + // Setup code + code := m.Run() + // Cleanup code + os.Exit(code) +} +``` + +## Current Test File Analysis + +### Files Using hydronica/trial (41 files) + +The following files use the trial framework: + +- `apps/flowlord/taskmaster_test.go` +- `apps/flowlord/handler_test.go` +- `apps/flowlord/files_test.go` +- `apps/flowlord/batch_test.go` +- `apps/flowlord/cache/cache_test.go` +- `file/file_test.go` +- `file/util/util_test.go` +- `file/nop/nop_test.go` +- `file/minio/client_test.go` +- `file/minio/read_test.go` +- `file/minio/write_test.go` +- `file/local/read_test.go` +- `file/local/write_test.go` +- `file/local/local_test.go` +- `file/buf/buf_test.go` +- `file/stat/stat_test.go` +- `file/scanner_test.go` +- `workflow/workflow_test.go` +- `tmpl/tmpl_test.go` +- `db/prep_test.go` +- `db/batch/batch_test.go` +- `db/batch/stat_test.go` +- `consumer/discover_test.go` +- `bootstrap/bootstrap_test.go` +- `apps/workers/*/worker_test.go` (multiple worker test files) +- `apps/tm-archive/*/app_test.go` (multiple archive test files) +- `apps/utils/*/logger_test.go`, `stats_test.go`, `recap_test.go`, `filewatcher_test.go` + +### Files Using testify/assert (2 files) + +These files need conversion to trial or standard Go testing: + +- `apps/tm-archive/http/http_test.go` +- `apps/utils/filewatcher/watcher_test.go` + +### Example Function Usage + +Files with extensive example functions: +- `file/writebyhour_test.go` (13 example functions) + +## Conversion Strategies + +### From testify/assert to trial + +**Current testify pattern:** +```go +func TestFunction(t *testing.T) { + result := functionUnderTest() + assert.Equal(t, expected, result) + assert.NotNil(t, err) +} +``` + +**Convert to trial pattern:** +```go +func TestFunction(t *testing.T) { + fn := func(input InputType) (OutputType, error) { + return functionUnderTest(input) + } + + cases := trial.Cases[InputType, OutputType]{ + "success case": { + Input: testInput, + Expected: expectedOutput, + }, + "error case": { + Input: errorInput, + ShouldErr: true, + }, + } + + trial.New(fn, cases).Test(t) +} +``` + +### From testify/assert to standard Go testing + +For simple cases that don't benefit from table-driven tests: + +```go +func TestFunction(t *testing.T) { + result := functionUnderTest() + if result != expected { + t.Errorf("got %v, expected %v", result, expected) + } +} +``` + +## Best Practices + +### 1. Test Structure + +- Use descriptive test case names +- Group related test cases logically +- Use table-driven tests for multiple scenarios +- Keep test functions focused and single-purpose + +### 2. Error Testing + +```go +cases := trial.Cases[InputType, OutputType]{ + "error case": { + Input: errorInput, + ShouldErr: true, + }, +} +``` + +### 3. Time Testing + +Always use trial time utilities: +```go +// Good +trial.TimeDay("2023-01-01") +trial.TimeHour("2023-01-01T12") + +// Avoid +time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC) +``` + +### 4. Complex Comparisons + +Use appropriate comparers for complex types: +```go +trial.New(fn, cases).Comparer(trial.EqualOpt(trial.IgnoreAllUnexported)).Test(t) +``` + +### 5. Test Organization + +- Place tests in `*_test.go` files +- Use `TestMain` for setup/teardown when needed +- Use example functions for API documentation +- Keep test data in separate files when appropriate + +## Migration Plan + +### Phase 1: Convert testify/assert usage + +1. **apps/tm-archive/http/http_test.go** + - Convert `assert.Equal` calls to trial cases + - Convert `assert.Contains` to appropriate trial comparers + +2. **apps/utils/filewatcher/watcher_test.go** + - Convert `assert.Equal` and `assert.NotNil` calls + - Create table-driven test cases + +### Phase 2: Standardize patterns + +1. Ensure all new tests use trial framework +2. Convert any remaining standard Go tests to trial when beneficial +3. Maintain example functions for documentation + +### Phase 3: Documentation and training + +1. Update this document as patterns evolve +2. Provide examples for common testing scenarios +3. Establish coding standards for test writing + +## Common Test Patterns + +### Testing with external dependencies + +```go +func TestWithDependencies(t *testing.T) { + fn := func(input InputType) (OutputType, error) { + // Setup mocks or test doubles + mockDep := &MockDependency{} + service := NewService(mockDep) + return service.Process(input) + } + + cases := trial.Cases[InputType, OutputType]{ + "success": { + Input: validInput, + Expected: expectedOutput, + }, + } + + trial.New(fn, cases).Test(t) +} +``` + +### Testing async operations + +```go +func TestAsyncOperation(t *testing.T) { + fn := func(input InputType) (OutputType, error) { + result := make(chan OutputType, 1) + err := make(chan error, 1) + + go func() { + output, e := asyncOperation(input) + result <- output + err <- e + }() + + return <-result, <-err + } + + cases := trial.Cases[InputType, OutputType]{ + "async success": { + Input: testInput, + Expected: expectedOutput, + }, + } + + trial.New(fn, cases).Timeout(5 * time.Second).Test(t) +} +``` + +This testing framework provides a consistent, maintainable approach to testing across the entire task-tools repository. From 8bfaeb0c6be60b3cd77f613615d5e47537d9960e Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Fri, 26 Sep 2025 14:15:09 -0600 Subject: [PATCH 06/40] add files cache and dashboard --- .gitignore | 2 + apps/flowlord/cache/schema.sql | 18 ++- apps/flowlord/cache/sqlite.go | 215 ++++++++++++++++++++++++ apps/flowlord/files.go | 17 ++ apps/flowlord/handler.go | 106 +++++++++++- apps/flowlord/handler/files.tmpl | 270 +++++++++++++++++++++++++++++++ apps/flowlord/handler/handler.go | 3 + apps/flowlord/handler_test.go | 47 ++++++ tasks.db | Bin 65536 -> 0 bytes 9 files changed, 672 insertions(+), 6 deletions(-) create mode 100644 apps/flowlord/handler/files.tmpl delete mode 100644 tasks.db diff --git a/.gitignore b/.gitignore index 6f6f234..6fa1150 100644 --- a/.gitignore +++ b/.gitignore @@ -18,5 +18,7 @@ apps/utils/file-watcher/file-watcher */stats/stats apps/workers/sql-load/sql-load build +tasks.db +_preview.html coverage diff --git a/apps/flowlord/cache/schema.sql b/apps/flowlord/cache/schema.sql index fecfce4..03da240 100644 --- a/apps/flowlord/cache/schema.sql +++ b/apps/flowlord/cache/schema.sql @@ -70,4 +70,20 @@ CREATE TABLE IF NOT EXISTS alert_records ( created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ); -CREATE INDEX IF NOT EXISTS idx_alert_records_created_at ON alert_records (created_at); \ No newline at end of file +CREATE INDEX IF NOT EXISTS idx_alert_records_created_at ON alert_records (created_at); + +-- File message history table for tracking file processing +CREATE TABLE IF NOT EXISTS file_messages ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + path TEXT NOT NULL, -- File path (e.g., "gs://bucket/path/file.json") + size INTEGER, -- File size in bytes + last_modified TIMESTAMP, -- When file was modified (from file system/GCS metadata) + received_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- When the record was received (time.Now()) + task_time TIMESTAMP, -- Time extracted from path using tmpl.PathTime(sts.Path) + task_ids TEXT, -- JSON array of task IDs (null if no matches) + task_names TEXT -- JSON array of task names (type:job format, null if no matches) +); + +-- Indexes for efficient querying +CREATE INDEX IF NOT EXISTS idx_file_messages_path ON file_messages (path); +CREATE INDEX IF NOT EXISTS idx_file_messages_received ON file_messages (received_at); \ No newline at end of file diff --git a/apps/flowlord/cache/sqlite.go b/apps/flowlord/cache/sqlite.go index be22ef0..d8b9b90 100644 --- a/apps/flowlord/cache/sqlite.go +++ b/apps/flowlord/cache/sqlite.go @@ -3,6 +3,7 @@ package cache import ( "database/sql" _ "embed" + "encoding/json" "net/url" "sort" "strings" @@ -13,6 +14,7 @@ import ( "github.com/pcelvng/task/bus" _ "modernc.org/sqlite" + "github.com/pcelvng/task-tools/file/stat" "github.com/pcelvng/task-tools/tmpl" ) @@ -544,3 +546,216 @@ func (s *SQLite) GetTaskByID(id string) (*TaskView, error) { return &t, nil } */ + +// FileMessage represents a file message record +type FileMessage struct { + ID int `json:"id"` + Path string `json:"path"` + Size int64 `json:"size"` + LastModified time.Time `json:"last_modified"` + ReceivedAt time.Time `json:"received_at"` + TaskTime time.Time `json:"task_time"` + TaskIDs []string `json:"task_ids"` + TaskNames []string `json:"task_names"` +} + +// AddFileMessage stores a file message in the database +func (s *SQLite) AddFileMessage(sts stat.Stats, taskIDs []string, taskNames []string) error { + s.mu.Lock() + defer s.mu.Unlock() + + // Parse last modified time from the created field + var lastModified time.Time + if sts.Created != "" { + lastModified, _ = time.Parse(time.RFC3339, sts.Created) + } + + // Extract task time from path + taskTime := tmpl.PathTime(sts.Path) + + // Convert slices to JSON arrays + var taskIDsJSON, taskNamesJSON sql.NullString + if len(taskIDs) > 0 { + if jsonBytes, err := json.Marshal(taskIDs); err == nil { + taskIDsJSON = sql.NullString{String: string(jsonBytes), Valid: true} + } + } + if len(taskNames) > 0 { + if jsonBytes, err := json.Marshal(taskNames); err == nil { + taskNamesJSON = sql.NullString{String: string(jsonBytes), Valid: true} + } + } + + _, err := s.db.Exec(` + INSERT INTO file_messages (path, size, last_modified, task_time, task_ids, task_names) + VALUES (?, ?, ?, ?, ?, ?) + `, sts.Path, sts.Size, lastModified, taskTime, taskIDsJSON, taskNamesJSON) + + return err +} + +// GetFileMessages retrieves file messages with optional filtering +func (s *SQLite) GetFileMessages(limit int, offset int) ([]FileMessage, error) { + s.mu.Lock() + defer s.mu.Unlock() + + query := ` + SELECT id, path, size, last_modified, received_at, task_time, task_ids, task_names + FROM file_messages + ORDER BY received_at DESC + LIMIT ? OFFSET ? + ` + + rows, err := s.db.Query(query, limit, offset) + if err != nil { + return nil, err + } + defer rows.Close() + + var messages []FileMessage + for rows.Next() { + var msg FileMessage + var taskIDsJSON, taskNamesJSON sql.NullString + + err := rows.Scan( + &msg.ID, &msg.Path, &msg.Size, &msg.LastModified, &msg.ReceivedAt, + &msg.TaskTime, &taskIDsJSON, &taskNamesJSON, + ) + if err != nil { + continue + } + + // Parse JSON arrays + if taskIDsJSON.Valid { + json.Unmarshal([]byte(taskIDsJSON.String), &msg.TaskIDs) + } + if taskNamesJSON.Valid { + json.Unmarshal([]byte(taskNamesJSON.String), &msg.TaskNames) + } + + messages = append(messages, msg) + } + + return messages, nil +} + +// GetFileMessagesByDate retrieves file messages for a specific date +func (s *SQLite) GetFileMessagesByDate(date time.Time) ([]FileMessage, error) { + s.mu.Lock() + defer s.mu.Unlock() + + dateStr := date.Format("2006-01-02") + query := ` + SELECT id, path, size, last_modified, received_at, task_time, task_ids, task_names + FROM file_messages + WHERE DATE(received_at) = ? + ORDER BY received_at DESC + ` + + rows, err := s.db.Query(query, dateStr) + if err != nil { + return nil, err + } + defer rows.Close() + + var messages []FileMessage + for rows.Next() { + var msg FileMessage + var taskIDsJSON, taskNamesJSON sql.NullString + + err := rows.Scan( + &msg.ID, &msg.Path, &msg.Size, &msg.LastModified, &msg.ReceivedAt, + &msg.TaskTime, &taskIDsJSON, &taskNamesJSON, + ) + if err != nil { + continue + } + + // Parse JSON arrays + if taskIDsJSON.Valid { + json.Unmarshal([]byte(taskIDsJSON.String), &msg.TaskIDs) + } + if taskNamesJSON.Valid { + json.Unmarshal([]byte(taskNamesJSON.String), &msg.TaskNames) + } + + messages = append(messages, msg) + } + + return messages, nil +} + +// GetFileMessagesWithTasks retrieves file messages with their associated task details +func (s *SQLite) GetFileMessagesWithTasks(limit int, offset int) ([]FileMessageWithTasks, error) { + s.mu.Lock() + defer s.mu.Unlock() + + query := ` + SELECT + fm.id, fm.path, fm.task_time, fm.received_at, + json_extract(t.value, '$') as task_id, + tl.type as task_type, + tl.job as task_job, + tl.result as task_result, + tl.created as task_created, + tl.started as task_started, + tl.ended as task_ended + FROM file_messages fm, + json_each(fm.task_ids) as t + JOIN task_log tl ON json_extract(t.value, '$') = tl.id + WHERE fm.task_ids IS NOT NULL + ORDER BY fm.received_at DESC, fm.id + LIMIT ? OFFSET ? + ` + + rows, err := s.db.Query(query, limit, offset) + if err != nil { + return nil, err + } + defer rows.Close() + + var results []FileMessageWithTasks + for rows.Next() { + var result FileMessageWithTasks + var taskCreated, taskStarted, taskEnded sql.NullString + + err := rows.Scan( + &result.FileID, &result.Path, &result.TaskTime, &result.ReceivedAt, + &result.TaskID, &result.TaskType, &result.TaskJob, &result.TaskResult, + &taskCreated, &taskStarted, &taskEnded, + ) + if err != nil { + continue + } + + // Parse timestamps + if taskCreated.Valid { + result.TaskCreated, _ = time.Parse(time.RFC3339, taskCreated.String) + } + if taskStarted.Valid { + result.TaskStarted, _ = time.Parse(time.RFC3339, taskStarted.String) + } + if taskEnded.Valid { + result.TaskEnded, _ = time.Parse(time.RFC3339, taskEnded.String) + } + + results = append(results, result) + } + + return results, nil +} + +// FileMessageWithTasks represents a file message with associated task details +type FileMessageWithTasks struct { + FileID int `json:"file_id"` + Path string `json:"path"` + TaskTime time.Time `json:"task_time"` + ReceivedAt time.Time `json:"received_at"` + TaskID string `json:"task_id"` + TaskType string `json:"task_type"` + TaskJob string `json:"task_job"` + TaskResult string `json:"task_result"` + TaskCreated time.Time `json:"task_created"` + TaskStarted time.Time `json:"task_started"` + TaskEnded time.Time `json:"task_ended"` +} diff --git a/apps/flowlord/files.go b/apps/flowlord/files.go index b83d781..d736072 100644 --- a/apps/flowlord/files.go +++ b/apps/flowlord/files.go @@ -94,6 +94,9 @@ func unmarshalStat(b []byte) (sts stat.Stats) { // if a match is found it will create a task and send it out func (tm *taskMaster) matchFile(sts stat.Stats) error { matches := 0 + var taskIDs []string + var taskNames []string + for _, f := range tm.files { if isMatch, _ := filepath.Match(f.SrcPattern, sts.Path); !isMatch { continue @@ -116,10 +119,24 @@ func (tm *taskMaster) matchFile(sts stat.Stats) error { tsk.Job = f.Job() tsk.Meta, _ = url.QueryUnescape(meta.Encode()) + // Collect task information for storage + taskIDs = append(taskIDs, tsk.ID) + taskName := tsk.Type + if tsk.Job != "" { + taskName += ":" + tsk.Job + } + taskNames = append(taskNames, taskName) + if err := tm.producer.Send(tsk.Type, tsk.JSONBytes()); err != nil { return err } } + + // Store file message in database + if tm.taskCache != nil { + tm.taskCache.AddFileMessage(sts, taskIDs, taskNames) + } + if matches == 0 { return fmt.Errorf("no match found for %q", sts.Path) } diff --git a/apps/flowlord/handler.go b/apps/flowlord/handler.go index f406332..3a881ee 100644 --- a/apps/flowlord/handler.go +++ b/apps/flowlord/handler.go @@ -1,8 +1,11 @@ package main import ( + "bytes" "encoding/json" "errors" + "fmt" + "html/template" "io" "log" "net/http" @@ -52,6 +55,7 @@ func (tm *taskMaster) StartHandler() { router.Get("/task/{id}", tm.taskHandler) router.Get("/recap", tm.recapHandler) router.Get("/web/alert", tm.htmlAlert) + router.Get("/web/files", tm.htmlFiles) if tm.port == 0 { log.Println("flowlord router disabled") @@ -303,6 +307,93 @@ func (tm *taskMaster) htmlAlert(w http.ResponseWriter, r *http.Request) { w.Write(alertHTML(alerts)) } +// htmlFiles handles GET /web/files - displays file messages for a specific date +func (tm *taskMaster) htmlFiles(w http.ResponseWriter, r *http.Request) { + dt, _ := time.Parse("2006-01-02", r.URL.Query().Get("date")) + if dt.IsZero() { + dt = time.Now() + } + + files, err := tm.taskCache.GetFileMessagesByDate(dt) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte(err.Error())) + return + } + + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "text/html") + w.Write(filesHTML(files, dt)) +} + +// filesHTML renders the file messages HTML page +func filesHTML(files []cache.FileMessage, date time.Time) []byte { + // Calculate statistics + totalFiles := len(files) + matchedFiles := 0 + totalTasks := 0 + + for _, file := range files { + if len(file.TaskNames) > 0 { + matchedFiles++ + totalTasks += len(file.TaskNames) + } + } + + unmatchedFiles := totalFiles - matchedFiles + + // Calculate navigation dates + prevDate := date.AddDate(0, 0, -1) + nextDate := date.AddDate(0, 0, 1) + + data := map[string]interface{}{ + "Date": date.Format("Monday, January 2, 2006"), + "PrevDate": prevDate.Format("2006-01-02"), + "NextDate": nextDate.Format("2006-01-02"), + "Files": files, + "TotalFiles": totalFiles, + "MatchedFiles": matchedFiles, + "UnmatchedFiles": unmatchedFiles, + "TotalTasks": totalTasks, + } + + // Template functions + funcMap := template.FuncMap{ + "formatBytes": func(bytes int64) string { + const unit = 1024 + if bytes < unit { + return fmt.Sprintf("%d B", bytes) + } + div, exp := int64(unit), 0 + for n := bytes / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp]) + }, + "formatReceivedTime": func(t time.Time) string { + return t.Format(time.RFC3339) + }, + "formatTaskTime": func(t time.Time) string { + return t.Format("2006-01-02T15") + }, + } + + // Parse and execute template using the same pattern as alertHTML + tmpl, err := template.New("files").Funcs(funcMap).Parse(handler.FilesTemplate) + if err != nil { + return []byte(err.Error()) + } + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return []byte(err.Error()) + } + + return buf.Bytes() +} + + // AlertData holds both the alerts and summary data for the template type AlertData struct { Alerts []cache.AlertRecord @@ -320,14 +411,18 @@ func alertHTML(tasks []cache.AlertRecord) []byte { "Summary": summary, } - // Use the new template registry - registry := handler.NewRegistry() - html, err := registry.ExecuteTemplate("alert", data) + tmpl, err := template.New("alert").Parse(handler.AlertTemplate) if err != nil { return []byte(err.Error()) } - - return html + + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return []byte(err.Error()) + } + + return buf.Bytes() } type request struct { @@ -494,3 +589,4 @@ func (m Meta) UnmarshalJSON(d []byte) error { m2 := (map[string][]string)(m) return json.Unmarshal(d, &m2) } + diff --git a/apps/flowlord/handler/files.tmpl b/apps/flowlord/handler/files.tmpl new file mode 100644 index 0000000..7021619 --- /dev/null +++ b/apps/flowlord/handler/files.tmpl @@ -0,0 +1,270 @@ + + + + + + Flowlord File Messages - {{.Date}} + + + +
+
+

Flowlord File Messages

+
{{.Date}}
+
+ +
+ ← Previous Day + {{.Date}} + Next Day → + +
+ + {{if .Files}} +
+
+
+
{{.TotalFiles}}
+
Total Files
+
+
+
{{.MatchedFiles}}
+
With Tasks
+
+
+
{{.UnmatchedFiles}}
+
No Matches
+
+
+
{{.TotalTasks}}
+
Tasks Created
+
+
+
+ +
+ {{range .Files}} +
+
{{.Path}}
+
+ Size: {{.Size | formatBytes}} + Received: {{.ReceivedAt | formatReceivedTime}} + {{if not .LastModified.IsZero}} + Last Modified: {{.LastModified | formatReceivedTime}} + {{end}} + {{if not .TaskTime.IsZero}} + Task Time: {{.TaskTime | formatTaskTime}} + {{end}} +
+ {{if .TaskIDs}} +
+
Task IDs: + {{range $index, $taskID := .TaskIDs}} + {{$taskID}} + {{end}} +
+
Tasks Names: + {{range .TaskNames}} + {{.}} + {{end}} +
+
+ {{else}} +
+
No matching patterns found
+
+ {{end}} +
+ {{end}} +
+ {{else}} +
+

No file messages found

+

No files were processed on {{.Date}}

+
+ {{end}} +
+ + + + diff --git a/apps/flowlord/handler/handler.go b/apps/flowlord/handler/handler.go index ce77112..c735cd4 100644 --- a/apps/flowlord/handler/handler.go +++ b/apps/flowlord/handler/handler.go @@ -4,3 +4,6 @@ import _ "embed" //go:embed alert.tmpl var AlertTemplate string + +//go:embed files.tmpl +var FilesTemplate string diff --git a/apps/flowlord/handler_test.go b/apps/flowlord/handler_test.go index 4d81f63..63af16c 100644 --- a/apps/flowlord/handler_test.go +++ b/apps/flowlord/handler_test.go @@ -334,6 +334,7 @@ func TestMeta_UnmarshalJSON(t *testing.T) { } // TestWebAlertPreview generates an HTML preview of the alert template for visual inspection +// this provides an html file func TestWebAlertPreview(t *testing.T) { // Create sample alert data to showcase the templating sampleAlerts := []cache.AlertRecord{ @@ -397,3 +398,49 @@ func TestWebAlertPreview(t *testing.T) { } } + + +// TestFilesHTML generate a html file based on the files.tmpl it is used for vision examination of the files +func TestFilesHTML(t *testing.T) { + // Create sample file messages + files := []cache.FileMessage{ + { + ID: 1, + Path: "gs://bucket/data/2024-01-15/file1.json", + Size: 1024, + LastModified: time.Now().Add(-1 * time.Hour), + ReceivedAt: time.Now().Add(-30 * time.Minute), + TaskTime: time.Now().Add(-1 * time.Hour), + TaskIDs: []string{"task-1", "task-2"}, + TaskNames: []string{"data-load:import", "transform:clean"}, + }, + { + ID: 2, + Path: "gs://bucket/data/2024-01-15/file2.csv", + Size: 2048, + LastModified: time.Now().Add(-2 * time.Hour), + ReceivedAt: time.Now().Add(-15 * time.Minute), + TaskTime: time.Now().Add(-2 * time.Hour), + TaskIDs: []string{}, + TaskNames: []string{}, + }, + } + + date := time.Date(2024, 1, 15, 0, 0, 0, 0, time.UTC) + html := filesHTML(files, date) + + // Write HTML to a file for easy viewing + outputFile := "files_preview.html" + err := os.WriteFile(outputFile, html, 0644) + if err != nil { + t.Fatalf("Failed to write HTML file: %v", err) + } + + t.Logf("Alert preview generated and saved to: ./%s", outputFile) + + // Basic checks + if len(html) == 0 { + t.Error("Expected HTML output, got empty") + } + +} \ No newline at end of file diff --git a/tasks.db b/tasks.db deleted file mode 100644 index 7f3afac85e3d3b7e4f9bf6d20a2917aabca4db34..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 65536 zcmeHQOKclSdTv^8SyazrY}2wVyJe3kDRztfWRpY)uf`+E&e+zh9vO|1h0u>`scn+Y z>2BKN^(uia%?1HBL9hve1lSw`OfHKBW;Z|%lS_~V76`By1i>Oe275^kiy(&}w@rXt z@>jDTBu!dvkL?}X)s)y>e^vce_0`{1fBme-`Zumw9%4+}sj40$Qg2YH6!mq6p(v^h z|8KxQ4gdY{KN{TO=3lAs;W76zweX$!H2uHSSl@RkdX@g!_+0v*(s$C?u`kB<23PyP z*?$x~$VCt!2vB=tLz$)7)PqT@W}uHOb23wF0U;u zZY(iZZ@ja##XK$=vwDMRi^|N!Jm=p#I+(d|A@zZ$Y8B*uSb=t7+eIHXP)+Z29u1P* zSbY2164McOuBKMeMTj}SZ;uXTW@l5sKjrgmOS;W5+QzTVikzy#cYWFWuO<=!f)F_x3gf-&oq(xQL@h7t1o(lezj1lv}Ma z8e&kj?%l(!A9Ksr7#GzH##F6J!$IzR%+0f^h}pP$eQAAT@%noK(~z6**~YIm=Dj+^ zgq$!pR^fm1+BJwB#l)UC?GGE4ew#6^iiaGlw&N#^^YdsCa9P6WI7F}5Rd?r+WSclZ zY#(2yid&qa^Hi(m26XI1A9>9}#NrW`pLu6#d2#dF26K6HZ4F@Gj*uCh|L%p6 zq2rm%@Shj&Sm-WB+x5SK`u**zOYi%2;4+Krqw7o8mM(8FG0qixXTdUBsu2@BK&gWS zCGVhEHPddApnU7iKFzR^TbuQmUG)xP>Z;?S?S@lf%tlT3EW37*xr=5i6{d!e!FV()M>ao9pwT!Y*f4`-OFl4Z&aWWXbQ5?a8z6r zoL?6jiJIa1nZCTZzA-mkv@Y}W%x^G)1Ydcs z`L(znzIyYF>iQ`|yWpo5Wo)^rKlra#lq(2)e84 z-T*PeEn8CQ2_>Yv`~*$Kq4MJ=+)IF(xq8+T^b9ior0!9?ayXQg?lQCuF*f?x30nJ$OhM2|ACaeiKro zU|6XMiOgIWdH&s@!OYB5N^j>4Jx3UJxI0_itG00j!u*ZF!OZMbYNwqU?wLYvL*2RI zAnoihW&Ztv!3;ZeSyR!^JwQ~)t{QdrJjDudWEI}~1C~vNFr>!1o3F|E4U>;Z?8I=#F5Hc`IGep+3jue+tY$i6tnN=-9tC_OD{EU&IDU0u21Yn!ojF>6c9OQ8H*URr1TNba0v%#Y50 zD>amvm`FWX^-*g2G{29uA=IR`5US75n3!wV&mD}iD10tXkfxpA3xQa>135j2t4@)E>T43m`#hQby_v&br{ zpz20mG(;ptY-G$vk~`Vxz=q>wRnknhAn-gZ2}O<7cu8Yr-q3W>Fp()5?Q9;+&FPU9 zgD;Al26>QFRR%U9&lY$Av7A&C6;mt-MKjKg)U{C5zl^MiillG_B(r>8FR~I63|7Of@+dac z%E$_@0wV#~X!#!=-bHI1W z0kDCBC^C{cs83cG@&K!*aI9vCItwKz6paED5;7NxoXBwbPCQ2GPhtH3J&OL`_;2~2 zpN{{O&NM%rQcm0i0fGQQfFM8+AP5iy2m%BFf&f8)AV3hHsJXt)R1yow82xGLY!VyC zQ5c&3Dv1>Xk6}MgVy75^fS)9>L<~c~`$=pKLlAH+i8Wym0?sEH^befwE2nxH7VyvZ z-$=s6uZ#q?f&TQs?^6AJZ&K;gqdy+{yzkAvH;=3mX(k9fB?1RqC+W?p>4(QN4|4mL zWLe%!BCUAd^^Rj1_`Z#*Fai5Mp6OHaV0t#5?Ydo8z1_V_o?Q;;_FmR)Xt$8=1(d+G zmyM#p>=<<$Pc{nUguEh;y{!KT)2`dY9PG}Gm!~EV(&>ZAw46?td%cY~yR7Ne@pPxE zTDxQ**RyK=jNGyZ&kMR;&0_XA`{fNyvt9Pe(sY!zk-TUE;lc!QOh z#FOPks+(R&Ld7&@|sZKktWNM#G#_hfjOAZ=LJnJnu;nSPEwI1<^@9q z`yo7`qNZ{vi}PF-_-x&=JzKXcWhi~N;UcHZ3!?7WwQ^)^mbjukWm)>r+Z~S z>*~A6XjIU)5BrWgBQDMe%kb9~8K8&Qz$2go-vK`G1arq+xW!3=gXdVzkdQ1XDr@kH z#7eph26;2Du!bxc9LIyblv5yNFFY4+xppnir)89Th0NZyo!e%`zFQ9RwBXs*N;5my z_`4Are{c4ZAc{J&8oe&F_;pFrgMPJM7kEAKd$flf$9CFfAiu#2S*WFQ{9;EU(G-q* zWxU)188MVqyS8Jq&;}SjnhnQl&fRBx{=xIPUuXLe7WiBVmTWk#2YK>#VgCJ273Qtt z$<&)`*BDq|QnNiKSX%-2id`#3l8S+8{`hk?s{^Dt zeHu-WG^ZUp;a9gg9Zmm&>QDcKq7Uey@pSqpNj^_GHYCvq0t5kq06~BtKoB4Z5CjMU z1Ob8oLEuXvaI#NMb^Fa>Zahl~kp-^B;yeo;6Wu(rU0C@Bl4CSO{d*Kcj*Zvnz~dhy z4=MJT29Hm>c`)GdXOV{($2bcfd)++FfJeMgLEH%E^xvEUk9S*QL>{NXBQCiRr!fg0 zamiaePQ@jMr5TZBQn@)n|7AHj|b^QFX< zL?#Fj1PB5I0fGQQfFM8+AP5iy2m%Cw7lAxpN)S;|LpOf7a^!54T1ncfFM8+ zAP5iy2m%BFf&f8)AV3g!eGwSzgCi4yHMeu)|3;k(7eda0BON-jt-+4bb!tEKIR}nA zp<@~xR_I{B@jId8EI3v}#~EbLU6t z^He{bIYNI9d;a|e{Y8?`>nnkzwSd$e!TA4|iwUdQOaZdwhRUgzj9D4?|LRW=RI%{VRt&gIy77Mx1# zJOj?TDfl{n8otiN9+Ti~$-$|X98A#vLCwV>PXg8xu`>hC(G&1BaJ(hXK+E%-Rl!lP zG2j>;{kJ4@el&fA>L2?hMSp$lm*c-j?~eUR{7Eap=NIX=53nE?L4Y7Y5FiK;1PB5I z0fGQQfFM8+coqmG-AH~GhA#6-wnfc8ZcFuJQdv0-jA0 zkQx6z^{XUX)1QTapC<`;1_FMPWDol(2$*QI`_m9G(PsCP5O6-puK1@OS2GjzH>ppO zY#)9S0^Uy&FarVCk{p?L0s_t_33&W*1sI?|rj7>rdR#a>pZ|W66A|zTel1BrJj$O> zBFN{{5OQSjPbs=Ketzs<$CgL`Z{$bAXz1raA{Rk`AV3fx2oMBbC;|_Eb7X|tT$!5Q zpA|#REcuzrT>F{I*ihbnw#X}Zq(IwI!w=};VSw*KdQ(iID?|$nWrl;3irdlam^*0N zjhex5jBPRnIQ*c3T*kB=IOI4u)LP!)icLpbjviha3M%xHm_#dy`Qd|)JF;|mh0aX7 zbJwq~uV1~fGGD5zj*FO=^T3F%P!2{=CK*>mQ2Ix66DV7bO#tC^v9(!5;5l>_77G@7Qj_v4>V@WRyAo>0a4XfuWJTYdg2iihZ|SwQD;zTU9M%!LzHC&e_MW*ksk_@u}e{scpcO zL_D|nW;a?AAvxGRHeQ~ZJV>VxCev~{UCtK@dQnhJMG*2rA+O1%SVXGe@65uQUfqLT zTVel}O9pa1tEPGu)T9T`3%Xs+;#$X2Ro-z+xtvvVt({%ZWgS$v9SFU{Hr!m@s-uck zLpcN8$pzeU%@8hYxt@d6YA&xp9*PD+ii*@?-YjZ5Zj#{q+;$n-|jilbHu!+r<52l6jz5j$5{cPTFME)q9$%--ag3t=lyh>}|BwhMfpY35Cs-HuDBX0l2K5R`2 z<{7m`;0Vv-PD)EsLHp~+Nv*zewfeC-K>Hd;=3wT*1iY+74`OI>G^2V(xIlQC)cEdrIujj0Zm#dNphP-=MY+w3ff;kO=|UVn)FLqs|Q{(qXE z9LV?|e)ntr@!_jG{tt$EeQoe`jUy}Wco|8&1hsJTdr_qzC z^f<loZ*7^-r3qv63w3S*;+#ZzxEEHBG!Os&e6_8NB76y~*iiKC+_} f4a6`ooo1#(Xh{ycIk=W3;`BsjCL&IcX=nZ)Un0|W From a26aa423a46fe3edbe244b356efc6204ab22b303 Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Mon, 29 Sep 2025 14:05:04 -0600 Subject: [PATCH 07/40] update sqlite tasks plan --- apps/flowlord/sqlite_spec.md | 101 +++++++++++++++++++++++++---------- 1 file changed, 74 insertions(+), 27 deletions(-) diff --git a/apps/flowlord/sqlite_spec.md b/apps/flowlord/sqlite_spec.md index cc427f6..57d2965 100644 --- a/apps/flowlord/sqlite_spec.md +++ b/apps/flowlord/sqlite_spec.md @@ -223,43 +223,90 @@ ORDER BY file_count DESC; ``` ### Enhanced Task Recording -Redesign task storage for optimal querying, deduplication, and system tracking. +Replace dual-table system with single table for simplified task tracking. ```sql --- Modify existing events table to include deduplication -ALTER TABLE events ADD COLUMN task_hash TEXT; -ALTER TABLE events ADD COLUMN first_seen TIMESTAMP; -ALTER TABLE events ADD COLUMN last_seen TIMESTAMP; - --- Enhanced task_log with better indexing and deduplication support -CREATE TABLE task_execution_log ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - event_id TEXT NOT NULL, - execution_sequence INTEGER NOT NULL, -- For tracking task progression - type TEXT NOT NULL, +-- Remove existing tables +DROP TABLE IF EXISTS events; +DROP TABLE IF EXISTS task_log; + +-- Single table for all task records +CREATE TABLE task_records ( + id TEXT, + type TEXT, job TEXT, info TEXT, - result TEXT, + result TEXT, -- NULL if not completed meta TEXT, - msg TEXT, + msg TEXT, -- NULL if not completed created TIMESTAMP, - started TIMESTAMP, - ended TIMESTAMP, - workflow_file TEXT, - phase_matched BOOLEAN DEFAULT FALSE, - children_triggered INTEGER DEFAULT 0, - retry_count INTEGER DEFAULT 0, - is_duplicate BOOLEAN DEFAULT FALSE, - FOREIGN KEY (event_id) REFERENCES events(id) + started TIMESTAMP, -- NULL if not started + ended TIMESTAMP, -- NULL if not completed + PRIMARY KEY (type, job, id, created) ); -CREATE INDEX idx_task_execution_type_job ON task_execution_log (type, job); -CREATE INDEX idx_task_execution_created ON task_execution_log (created); -CREATE INDEX idx_task_execution_result ON task_execution_log (result); -CREATE INDEX idx_task_execution_event_id ON task_execution_log (event_id); -CREATE INDEX idx_task_execution_workflow ON task_execution_log (workflow_file); +-- Indexes for efficient querying +CREATE INDEX idx_task_records_type ON task_records (type); +CREATE INDEX idx_task_records_job ON task_records (job); +CREATE INDEX idx_task_records_created ON task_records (created); +CREATE INDEX idx_task_records_type_job ON task_records (type, job); +CREATE INDEX idx_task_records_date_range ON task_records (created, ended); + +-- Create a view that calculates task and queue times +CREATE VIEW IF NOT EXISTS tasks AS +SELECT + task_records.id, + task_records.type, + task_records.job, + task_records.info, + task_records.meta, + task_records.msg, + task_records.result, + -- Calculate task duration in seconds + CAST((julianday(task_records.ended) - julianday(task_records.started)) * 24 * 60 * 60 AS INTEGER) as task_seconds, + -- Format task duration as HH:MM:SS + strftime('%H:%M:%S', + CAST((julianday(task_records.ended) - julianday(task_records.started)) * 24 * 60 * 60 AS INTEGER) / 3600 || ':' || + CAST((julianday(task_records.ended) - julianday(task_records.started)) * 24 * 60 * 60 AS INTEGER) % 3600 / 60 || ':' || + CAST((julianday(task_records.ended) - julianday(task_records.started)) * 24 * 60 * 60 AS INTEGER) % 60 + ) as task_time, + -- Calculate queue time in seconds + CAST((julianday(task_records.started) - julianday(task_records.created)) * 24 * 60 * 60 AS INTEGER) as queue_seconds, + -- Format queue duration as HH:MM:SS + strftime('%H:%M:%S', + CAST((julianday(task_records.started) - julianday(task_records.created)) * 24 * 60 * 60 AS INTEGER) / 3600 || ':' || + CAST((julianday(task_records.started) - julianday(task_records.created)) * 24 * 60 * 60 AS INTEGER) % 3600 / 60 || ':' || + CAST((julianday(task_records.started) - julianday(task_records.created)) * 24 * 60 * 60 AS INTEGER) % 60 + ) as queue_time, + task_records.created, + task_records.started, + task_records.ended +FROM task_records; + +-- Common query patterns +-- All completed tasks +SELECT * FROM task_records WHERE result IS NOT NULL; + +-- Tasks by type and job +SELECT * FROM task_records WHERE type = ? AND job = ?; + +-- Incomplete tasks +SELECT * FROM task_records WHERE result IS NULL; + +-- Tasks by date range +SELECT * FROM task_records +WHERE created BETWEEN ? AND ?; ``` +**Implementation Changes:** +- Replace `events` + `task_log` with single `task_records` table +- Use composite PRIMARY KEY (type, job, id, created) to handle retries naturally +- Each retry creates a new record with unique `created` timestamp +- Track task creation (when first submitted) and completion (when finished) +- NULL values for `started`, `ended`, `result`, `msg` until completion +- Log conflicts on task creation (unexpected duplicates) for monitoring +- Maintain existing Cache interface for backward compatibility + ### Task Relationships and Dependencies Track task.Done message processing and child task triggering. From 7d340279f619c17799ed657f93e59b90daf90eb8 Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Mon, 29 Sep 2025 15:16:54 -0600 Subject: [PATCH 08/40] task Dashboard: 1st draft --- .gitignore | 2 +- apps/flowlord/cache/schema.sql | 68 ++-- apps/flowlord/cache/sqlite.go | 276 +++++++++----- apps/flowlord/files_test.go | 2 +- apps/flowlord/handler.go | 194 ++++++++++ apps/flowlord/handler/handler.go | 3 + apps/flowlord/handler/task.tmpl | 623 +++++++++++++++++++++++++++++++ apps/flowlord/handler_test.go | 49 ++- apps/flowlord/test/tasks.json | 194 ++++++++++ 9 files changed, 1278 insertions(+), 133 deletions(-) create mode 100644 apps/flowlord/handler/task.tmpl create mode 100644 apps/flowlord/test/tasks.json diff --git a/.gitignore b/.gitignore index 6fa1150..c5ce8a2 100644 --- a/.gitignore +++ b/.gitignore @@ -19,6 +19,6 @@ apps/utils/file-watcher/file-watcher apps/workers/sql-load/sql-load build tasks.db -_preview.html +*_preview.html coverage diff --git a/apps/flowlord/cache/schema.sql b/apps/flowlord/cache/schema.sql index 03da240..8a82d63 100644 --- a/apps/flowlord/cache/schema.sql +++ b/apps/flowlord/cache/schema.sql @@ -1,63 +1,57 @@ -- SQL schema for the task cache -CREATE TABLE IF NOT EXISTS events ( - id TEXT PRIMARY KEY, - completed BOOLEAN, - last_update TIMESTAMP -); - -CREATE TABLE IF NOT EXISTS task_log ( +-- Single table for all task records with composite primary key +CREATE TABLE IF NOT EXISTS task_records ( id TEXT, type TEXT, job TEXT, info TEXT, - result TEXT, + result TEXT, -- NULL if not completed meta TEXT, - msg TEXT, + msg TEXT, -- NULL if not completed created TIMESTAMP, - started TIMESTAMP, - ended TIMESTAMP, - event_id TEXT, - FOREIGN KEY (event_id) REFERENCES events(id) + started TIMESTAMP, -- NULL if not started + ended TIMESTAMP, -- NULL if not completed + PRIMARY KEY (type, job, id, created) ); -CREATE INDEX IF NOT EXISTS task_log_created ON task_log (created); -CREATE INDEX IF NOT EXISTS task_log_started ON task_log (started); -CREATE INDEX IF NOT EXISTS task_log_type ON task_log (type); -CREATE INDEX IF NOT EXISTS task_log_job ON task_log (job); -CREATE INDEX IF NOT EXISTS task_log_event_id ON task_log (event_id); +CREATE INDEX IF NOT EXISTS idx_task_records_type ON task_records (type); +CREATE INDEX IF NOT EXISTS idx_task_records_job ON task_records (job); +CREATE INDEX IF NOT EXISTS idx_task_records_created ON task_records (created); +CREATE INDEX IF NOT EXISTS idx_task_records_type_job ON task_records (type, job); +CREATE INDEX IF NOT EXISTS idx_task_records_date_range ON task_records (created, ended); -- Create a view that calculates task and queue times CREATE VIEW IF NOT EXISTS tasks AS SELECT - task_log.id, - task_log.type, - task_log.job, - task_log.info, + task_records.id, + task_records.type, + task_records.job, + task_records.info, -- SQLite doesn't have parse_url function, we'll need to handle this in Go - task_log.meta, + task_records.meta, -- SQLite doesn't have parse_param function, we'll need to handle this in Go - task_log.msg, - task_log.result, + task_records.msg, + task_records.result, -- Calculate task duration in seconds - CAST((julianday(task_log.ended) - julianday(task_log.started)) * 24 * 60 * 60 AS INTEGER) as task_seconds, + CAST((julianday(task_records.ended) - julianday(task_records.started)) * 24 * 60 * 60 AS INTEGER) as task_seconds, -- Format task duration as HH:MM:SS strftime('%H:%M:%S', - CAST((julianday(task_log.ended) - julianday(task_log.started)) * 24 * 60 * 60 AS INTEGER) / 3600 || ':' || - CAST((julianday(task_log.ended) - julianday(task_log.started)) * 24 * 60 * 60 AS INTEGER) % 3600 / 60 || ':' || - CAST((julianday(task_log.ended) - julianday(task_log.started)) * 24 * 60 * 60 AS INTEGER) % 60 + CAST((julianday(task_records.ended) - julianday(task_records.started)) * 24 * 60 * 60 AS INTEGER) / 3600 || ':' || + CAST((julianday(task_records.ended) - julianday(task_records.started)) * 24 * 60 * 60 AS INTEGER) % 3600 / 60 || ':' || + CAST((julianday(task_records.ended) - julianday(task_records.started)) * 24 * 60 * 60 AS INTEGER) % 60 ) as task_time, -- Calculate queue time in seconds - CAST((julianday(task_log.started) - julianday(task_log.created)) * 24 * 60 * 60 AS INTEGER) as queue_seconds, + CAST((julianday(task_records.started) - julianday(task_records.created)) * 24 * 60 * 60 AS INTEGER) as queue_seconds, -- Format queue duration as HH:MM:SS strftime('%H:%M:%S', - CAST((julianday(task_log.started) - julianday(task_log.created)) * 24 * 60 * 60 AS INTEGER) / 3600 || ':' || - CAST((julianday(task_log.started) - julianday(task_log.created)) * 24 * 60 * 60 AS INTEGER) % 3600 / 60 || ':' || - CAST((julianday(task_log.started) - julianday(task_log.created)) * 24 * 60 * 60 AS INTEGER) % 60 + CAST((julianday(task_records.started) - julianday(task_records.created)) * 24 * 60 * 60 AS INTEGER) / 3600 || ':' || + CAST((julianday(task_records.started) - julianday(task_records.created)) * 24 * 60 * 60 AS INTEGER) % 3600 / 60 || ':' || + CAST((julianday(task_records.started) - julianday(task_records.created)) * 24 * 60 * 60 AS INTEGER) % 60 ) as queue_time, - task_log.created, - task_log.started, - task_log.ended -FROM task_log; + task_records.created, + task_records.started, + task_records.ended +FROM task_records; -- Alert records table for storing alert events CREATE TABLE IF NOT EXISTS alert_records ( diff --git a/apps/flowlord/cache/sqlite.go b/apps/flowlord/cache/sqlite.go index d8b9b90..f53bf0d 100644 --- a/apps/flowlord/cache/sqlite.go +++ b/apps/flowlord/cache/sqlite.go @@ -4,6 +4,7 @@ import ( "database/sql" _ "embed" "encoding/json" + "log" "net/url" "sort" "strings" @@ -58,62 +59,35 @@ func (s *SQLite) Add(t task.Task) { s.mu.Lock() defer s.mu.Unlock() - // Start a transaction - tx, err := s.db.Begin() - if err != nil { - return - } - defer tx.Rollback() - - // Check if event exists - var eventExists bool - err = tx.QueryRow("SELECT EXISTS(SELECT 1 FROM events WHERE id = ?)", t.ID).Scan(&eventExists) - if err != nil { - return - } - - // Determine completion status and last update time - completed := t.Result != "" - var lastUpdate time.Time - if completed { - lastUpdate, _ = time.Parse(time.RFC3339, t.Ended) - } else { - lastUpdate, _ = time.Parse(time.RFC3339, t.Created) - } - - // Insert or update event - if !eventExists { - _, err = tx.Exec(` - INSERT INTO events (id, completed, last_update) - VALUES (?, ?, ?) - `, t.ID, completed, lastUpdate) - } else { - _, err = tx.Exec(` - UPDATE events - SET completed = ?, last_update = ? - WHERE id = ? - `, completed, lastUpdate, t.ID) - } - if err != nil { - return - } - - // Insert task log - _, err = tx.Exec(` - INSERT INTO task_log ( - id, type, job, info, result, meta, msg, - created, started, ended, event_id - ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + // Use UPSERT to handle both new tasks and updates + result, err := s.db.Exec(` + INSERT INTO task_records (id, type, job, info, result, meta, msg, created, started, ended) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + ON CONFLICT (type, job, id, created) + DO UPDATE SET + result = excluded.result, + meta = excluded.meta, + msg = excluded.msg, + started = excluded.started, + ended = excluded.ended `, t.ID, t.Type, t.Job, t.Info, t.Result, t.Meta, t.Msg, - t.Created, t.Started, t.Ended, t.ID, + t.Created, t.Started, t.Ended, ) + if err != nil { + log.Printf("ERROR: Failed to insert task record: %v", err) return } - // Commit the transaction - tx.Commit() + // Check if this was an update (conflict) rather than insert + rowsAffected, _ := result.RowsAffected() + if rowsAffected == 0 { + // This indicates a conflict occurred and the record was updated + // Log this as it's unexpected for new task creation + log.Printf("WARNING: Task creation conflict detected - task %s:%s:%s at %s was updated instead of inserted", + t.Type, t.Job, t.ID, t.Created) + } } func (s *SQLite) Get(id string) TaskJob { @@ -121,26 +95,13 @@ func (s *SQLite) Get(id string) TaskJob { defer s.mu.Unlock() var tj TaskJob - var completed bool - var lastUpdate time.Time - // Get event info - err := s.db.QueryRow(` - SELECT completed, last_update - FROM events - WHERE id = ? - `, id).Scan(&completed, &lastUpdate) - - if err != nil { - return tj - } - - // Get all task logs for this event + // Get all task records for this ID, ordered by created time rows, err := s.db.Query(` SELECT id, type, job, info, result, meta, msg, created, started, ended - FROM task_log - WHERE event_id = ? + FROM task_records + WHERE id = ? ORDER BY created `, id) if err != nil { @@ -149,6 +110,9 @@ func (s *SQLite) Get(id string) TaskJob { defer rows.Close() var events []task.Task + var lastUpdate time.Time + var completed bool + for rows.Next() { var t task.Task err := rows.Scan( @@ -159,6 +123,22 @@ func (s *SQLite) Get(id string) TaskJob { continue } events = append(events, t) + + // Track completion status and last update time + if t.Result != "" { + completed = true + if ended, err := time.Parse(time.RFC3339, t.Ended); err == nil { + if ended.After(lastUpdate) { + lastUpdate = ended + } + } + } else { + if created, err := time.Parse(time.RFC3339, t.Created); err == nil { + if created.After(lastUpdate) { + lastUpdate = created + } + } + } } tj = TaskJob{ @@ -187,38 +167,27 @@ func (s *SQLite) Recycle() Stat { // Get total count before deletion var total int - err = tx.QueryRow("SELECT COUNT(*) FROM events").Scan(&total) + err = tx.QueryRow("SELECT COUNT(*) FROM task_records").Scan(&total) if err != nil { return Stat{} } - // Get expired events and their last task log + // Get expired task records rows, err := tx.Query(` - SELECT e.id, e.completed, tl.id, tl.type, tl.job, tl.info, tl.result, - tl.meta, tl.msg, tl.created, tl.started, tl.ended - FROM events e - JOIN task_log tl ON e.id = tl.event_id - WHERE e.last_update < ? - AND tl.created = ( - SELECT MAX(created) - FROM task_log - WHERE event_id = e.id - ) + SELECT id, type, job, info, result, meta, msg, + created, started, ended + FROM task_records + WHERE created < ? `, t.Add(-s.ttl)) if err != nil { return Stat{} } defer rows.Close() - // Process expired events + // Process expired records for rows.Next() { - var ( - eventID string - completed bool - task task.Task - ) + var task task.Task err := rows.Scan( - &eventID, &completed, &task.ID, &task.Type, &task.Job, &task.Info, &task.Result, &task.Meta, &task.Msg, &task.Created, &task.Started, &task.Ended, ) @@ -226,16 +195,16 @@ func (s *SQLite) Recycle() Stat { continue } - if !completed { + // Check if task is incomplete + if task.Result == "" { tasks = append(tasks, task) } - // Delete the event and its task logs - _, err = tx.Exec("DELETE FROM task_log WHERE event_id = ?", eventID) - if err != nil { - continue - } - _, err = tx.Exec("DELETE FROM events WHERE id = ?", eventID) + // Delete the expired record + _, err = tx.Exec(` + DELETE FROM task_records + WHERE id = ? AND type = ? AND job = ? AND created = ? + `, task.ID, task.Type, task.Job, task.Created) if err != nil { continue } @@ -243,7 +212,7 @@ func (s *SQLite) Recycle() Stat { // Get remaining count var remaining int - err = tx.QueryRow("SELECT COUNT(*) FROM events").Scan(&remaining) + err = tx.QueryRow("SELECT COUNT(*) FROM task_records").Scan(&remaining) if err != nil { return Stat{} } @@ -267,7 +236,7 @@ func (s *SQLite) Recap() map[string]*Stats { rows, err := s.db.Query(` SELECT id, type, job, info, result, meta, msg, created, started, ended - FROM task_log + FROM task_records `) if err != nil { return data @@ -745,6 +714,127 @@ func (s *SQLite) GetFileMessagesWithTasks(limit int, offset int) ([]FileMessageW return results, nil } +// TaskView represents a task with calculated times from the tasks view +type TaskView struct { + ID string `json:"id"` + Type string `json:"type"` + Job string `json:"job"` + Info string `json:"info"` + Result string `json:"result"` + Meta string `json:"meta"` + Msg string `json:"msg"` + TaskSeconds int `json:"task_seconds"` + TaskTime string `json:"task_time"` + QueueSeconds int `json:"queue_seconds"` + QueueTime string `json:"queue_time"` + Created string `json:"created"` + Started string `json:"started"` + Ended string `json:"ended"` +} + +// GetTasksByDate retrieves tasks for a specific date with optional filtering using the tasks view +func (s *SQLite) GetTasksByDate(date time.Time, taskType, job, result string) ([]TaskView, error) { + s.mu.Lock() + defer s.mu.Unlock() + + dateStr := date.Format("2006-01-02") + + // Build query with optional filters using the tasks view + query := `SELECT id, type, job, info, result, meta, msg, task_seconds, task_time, queue_seconds, queue_time, created, started, ended + FROM tasks + WHERE DATE(created) = ?` + args := []interface{}{dateStr} + + if taskType != "" { + query += " AND type = ?" + args = append(args, taskType) + } + + if job != "" { + query += " AND job = ?" + args = append(args, job) + } + + if result != "" { + query += " AND result = ?" + args = append(args, result) + } + + query += " ORDER BY created DESC" + + rows, err := s.db.Query(query, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + var tasks []TaskView + for rows.Next() { + var t TaskView + err := rows.Scan( + &t.ID, &t.Type, &t.Job, &t.Info, &t.Result, &t.Meta, &t.Msg, + &t.TaskSeconds, &t.TaskTime, &t.QueueSeconds, &t.QueueTime, + &t.Created, &t.Started, &t.Ended, + ) + if err != nil { + continue + } + tasks = append(tasks, t) + } + + return tasks, nil +} + +// GetTaskSummaryByDate creates a summary of tasks for a specific date +func (s *SQLite) GetTaskSummaryByDate(date time.Time) (map[string]*Stats, error) { + s.mu.Lock() + defer s.mu.Unlock() + + dateStr := date.Format("2006-01-02") + + query := `SELECT id, type, job, info, result, meta, msg, created, started, ended + FROM task_records + WHERE DATE(created) = ? + ORDER BY created` + + rows, err := s.db.Query(query, dateStr) + if err != nil { + return nil, err + } + defer rows.Close() + + data := make(map[string]*Stats) + for rows.Next() { + var t task.Task + err := rows.Scan( + &t.ID, &t.Type, &t.Job, &t.Info, &t.Result, &t.Meta, &t.Msg, + &t.Created, &t.Started, &t.Ended, + ) + if err != nil { + continue + } + + job := t.Job + if job == "" { + v, _ := url.ParseQuery(t.Meta) + job = v.Get("job") + } + key := strings.TrimRight(t.Type+":"+job, ":") + stat, found := data[key] + if !found { + stat = &Stats{ + CompletedTimes: make([]time.Time, 0), + ErrorTimes: make([]time.Time, 0), + ExecTimes: &DurationStats{}, + } + data[key] = stat + } + stat.Add(t) + } + + return data, nil +} + // FileMessageWithTasks represents a file message with associated task details type FileMessageWithTasks struct { FileID int `json:"file_id"` diff --git a/apps/flowlord/files_test.go b/apps/flowlord/files_test.go index 6fc1936..6245181 100644 --- a/apps/flowlord/files_test.go +++ b/apps/flowlord/files_test.go @@ -128,7 +128,7 @@ func TestTaskMaster_MatchFile(t *testing.T) { Input: stat.Stats{Path: "gs://bucket/group/data.txt"}, Expected: []task.Task{ {Type: "basic", Meta: "file=gs://bucket/group/data.txt&filename=data.txt&workflow=basic.toml"}, - {Type: "data", Meta: "file=gs://bucket/group/data.txt&filename=data.txt&job=1&workflow=data.toml"}, + {Type: "data", Job: "1", Meta: "file=gs://bucket/group/data.txt&filename=data.txt&job=1&workflow=data.toml"}, }, }, } diff --git a/apps/flowlord/handler.go b/apps/flowlord/handler.go index 3a881ee..9b6537a 100644 --- a/apps/flowlord/handler.go +++ b/apps/flowlord/handler.go @@ -9,6 +9,7 @@ import ( "io" "log" "net/http" + "net/url" "path/filepath" "strconv" "strings" @@ -24,6 +25,7 @@ import ( "github.com/pcelvng/task-tools/apps/flowlord/cache" "github.com/pcelvng/task-tools/apps/flowlord/handler" "github.com/pcelvng/task-tools/slack" + "github.com/pcelvng/task-tools/tmpl" tools "github.com/pcelvng/task-tools" "github.com/pcelvng/task-tools/file" @@ -56,6 +58,7 @@ func (tm *taskMaster) StartHandler() { router.Get("/recap", tm.recapHandler) router.Get("/web/alert", tm.htmlAlert) router.Get("/web/files", tm.htmlFiles) + router.Get("/web/task", tm.htmlTask) if tm.port == 0 { log.Println("flowlord router disabled") @@ -326,6 +329,31 @@ func (tm *taskMaster) htmlFiles(w http.ResponseWriter, r *http.Request) { w.Write(filesHTML(files, dt)) } +// htmlTask handles GET /web/task - displays task summary and table for a specific date +func (tm *taskMaster) htmlTask(w http.ResponseWriter, r *http.Request) { + dt, _ := time.Parse("2006-01-02", r.URL.Query().Get("date")) + if dt.IsZero() { + dt = time.Now() + } + + // Get filter parameters + taskType := r.URL.Query().Get("type") + job := r.URL.Query().Get("job") + result := r.URL.Query().Get("result") + + // Get tasks with filters + tasks, err := tm.taskCache.GetTasksByDate(dt, taskType, job, result) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte(err.Error())) + return + } + + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "text/html") + w.Write(taskHTML(tasks, dt, taskType, job, result)) +} + // filesHTML renders the file messages HTML page func filesHTML(files []cache.FileMessage, date time.Time) []byte { // Calculate statistics @@ -393,6 +421,172 @@ func filesHTML(files []cache.FileMessage, date time.Time) []byte { return buf.Bytes() } +// generateSummaryFromTasks creates a summary of tasks grouped by type:job +func generateSummaryFromTasks(tasks []cache.TaskView) map[string]*cache.Stats { + summary := make(map[string]*cache.Stats) + + for _, t := range tasks { + // Get job from TaskView.Job or extract from Meta + job := t.Job + if job == "" { + if meta, err := url.ParseQuery(t.Meta); err == nil { + job = meta.Get("job") + } + } + + // Create key in format "type:job" + key := strings.TrimRight(t.Type+":"+job, ":") + + // Get or create stats for this type:job combination + stat, found := summary[key] + if !found { + stat = &cache.Stats{ + CompletedTimes: make([]time.Time, 0), + ErrorTimes: make([]time.Time, 0), + ExecTimes: &cache.DurationStats{}, + } + summary[key] = stat + } + + // Convert TaskView to task.Task for processing + taskTime := tmpl.TaskTime(task.Task{ + ID: t.ID, + Type: t.Type, + Job: t.Job, + Info: t.Info, + Result: task.Result(t.Result), + Meta: t.Meta, + Msg: t.Msg, + Created: t.Created, + Started: t.Started, + Ended: t.Ended, + }) + + // Process based on result type + if t.Result == "error" { + stat.ErrorCount++ + stat.ErrorTimes = append(stat.ErrorTimes, taskTime) + } else if t.Result == "complete" { + stat.CompletedCount++ + stat.CompletedTimes = append(stat.CompletedTimes, taskTime) + + // Add execution time for completed tasks + if t.Started != "" && t.Ended != "" { + startTime, err1 := time.Parse(time.RFC3339, t.Started) + endTime, err2 := time.Parse(time.RFC3339, t.Ended) + if err1 == nil && err2 == nil { + stat.ExecTimes.Add(endTime.Sub(startTime)) + } + } + } + // Note: warn and alert results don't contribute to execution time stats + } + + return summary +} + +// taskHTML renders the task summary and table HTML page +func taskHTML(tasks []cache.TaskView, date time.Time, taskType, job, result string) []byte { + // Calculate navigation dates + prevDate := date.AddDate(0, 0, -1) + nextDate := date.AddDate(0, 0, 1) + + // Generate summary from tasks data + summary := generateSummaryFromTasks(tasks) + + // Calculate statistics + totalTasks := len(tasks) + completedTasks := 0 + errorTasks := 0 + alertTasks := 0 + warnTasks := 0 + runningTasks := 0 + + for _, t := range tasks { + switch t.Result { + case "complete": + completedTasks++ + case "error": + errorTasks++ + case "alert": + alertTasks++ + case "warn": + warnTasks++ + case "": + runningTasks++ + } + } + + data := map[string]interface{}{ + "Date": date.Format("Monday, January 2, 2006"), + "PrevDate": prevDate.Format("2006-01-02"), + "NextDate": nextDate.Format("2006-01-02"), + "Tasks": tasks, + "Summary": summary, + "TotalTasks": totalTasks, + "CompletedTasks": completedTasks, + "ErrorTasks": errorTasks, + "AlertTasks": alertTasks, + "WarnTasks": warnTasks, + "RunningTasks": runningTasks, + "CurrentType": taskType, + "CurrentJob": job, + "CurrentResult": result, + } + + // Template functions + funcMap := template.FuncMap{ + "formatTime": func(t time.Time) string { + return t.Format("2006-01-02T15:04:05Z") + }, + "formatDuration": func(start, end string) string { + if start == "" || end == "" { + return "N/A" + } + startTime, err1 := time.Parse(time.RFC3339, start) + endTime, err2 := time.Parse(time.RFC3339, end) + if err1 != nil || err2 != nil { + return "N/A" + } + duration := endTime.Sub(startTime) + return duration.String() + }, + "getJobFromMeta": func(meta string) string { + if meta == "" { + return "" + } + if v, err := url.ParseQuery(meta); err == nil { + return v.Get("job") + } + return "" + }, + "add": func(a, b int) int { + return a + b + }, + "slice": func(s string, start, end int) string { + if start >= len(s) { + return "" + } + if end > len(s) { + end = len(s) + } + return s[start:end] + }, + } + + // Parse and execute template + tmpl, err := template.New("task").Funcs(funcMap).Parse(handler.TaskTemplate) + if err != nil { + return []byte(err.Error()) + } + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return []byte(err.Error()) + } + + return buf.Bytes() +} // AlertData holds both the alerts and summary data for the template type AlertData struct { diff --git a/apps/flowlord/handler/handler.go b/apps/flowlord/handler/handler.go index c735cd4..ff0412a 100644 --- a/apps/flowlord/handler/handler.go +++ b/apps/flowlord/handler/handler.go @@ -7,3 +7,6 @@ var AlertTemplate string //go:embed files.tmpl var FilesTemplate string + +//go:embed task.tmpl +var TaskTemplate string diff --git a/apps/flowlord/handler/task.tmpl b/apps/flowlord/handler/task.tmpl new file mode 100644 index 0000000..f29fd60 --- /dev/null +++ b/apps/flowlord/handler/task.tmpl @@ -0,0 +1,623 @@ + + + + + + Flowlord Task Dashboard - {{.Date}} + + + +
+
+

Flowlord Task Dashboard

+
+ ← Previous Day + {{.Date}} + Next Day → +
+
+ +
+

Filters

+
+ +
+
+ + +
+
+ + +
+
+ + +
+
+ + Clear All +
+
+
+
+ +
+

Task Summary

+
+
+
{{.TotalTasks}}
+
Total Tasks
+
+
+
{{.CompletedTasks}}
+
Completed
+
+
+
{{.ErrorTasks}}
+
Errors
+
+
+
{{.AlertTasks}}
+
Alerts
+
+
+
{{.WarnTasks}}
+
Warnings
+
+
+
{{.RunningTasks}}
+
Running
+
+
+ +

Task Type Summary

+
+ {{range $key, $stat := .Summary}} +
+
+ {{$key}} + {{add $stat.CompletedCount $stat.ErrorCount}} tasks +
+
{{$stat.String}}
+
+ {{end}} +
+
+ + {{if .Tasks}} +
+ + + + + + + + + + + + + + + + + {{range .Tasks}} + + + + + + + + + + + + + {{end}} + +
IDTypeJobResultInfoMetaCreatedTask TimeQueue TimeMessage
+ {{if ge (len .ID) 6}}{{slice .ID 0 6}}...{{else}}{{.ID}}{{end}} + {{.Type}}{{if .Job}}{{.Job}}{{else}}{{getJobFromMeta .Meta}}{{end}} + {{if .Result}}{{.Result}}{{else}}Running{{end}} + {{.Info}}{{.Meta}}{{if .Created}}{{.Created}}{{else}}N/A{{end}}{{if .TaskTime}}{{.TaskTime}}{{else}}N/A{{end}}{{if .QueueTime}}{{.QueueTime}}{{else}}N/A{{end}}{{.Msg}}
+
+
+ Total Tasks: {{.TotalTasks}} +
+ {{else}} +
+

No tasks found

+

No tasks were found for {{.Date}} with the current filters.

+
+ {{end}} +
+ + + + diff --git a/apps/flowlord/handler_test.go b/apps/flowlord/handler_test.go index 63af16c..c1de2b8 100644 --- a/apps/flowlord/handler_test.go +++ b/apps/flowlord/handler_test.go @@ -1,6 +1,7 @@ package main import ( + "encoding/json" "errors" "os" "testing" @@ -15,6 +16,22 @@ import ( const testPath = "../../internal/test" +// loadTaskViewData loads TaskView data from a JSON file +func loadTaskViewData(filename string) ([]cache.TaskView, error) { + data, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + + var tasks []cache.TaskView + err = json.Unmarshal(data, &tasks) + if err != nil { + return nil, err + } + + return tasks, nil +} + func TestBackloader(t *testing.T) { cache, err := workflow.New(testPath+"/workflow/f3.toml", nil) today := time.Now().Format("2006-01-02") @@ -443,4 +460,34 @@ func TestFilesHTML(t *testing.T) { t.Error("Expected HTML output, got empty") } -} \ No newline at end of file +} + + +func TestTaskHTML(t *testing.T) { + // Load TaskView data from JSON file + testTasks, err := loadTaskViewData("test/tasks.json") + if err != nil { + t.Fatalf("Failed to load task data: %v", err) + } + + // Set test date + date := trial.TimeDay("2024-01-15") + + // Test with no filters - summary will be generated from tasks data + html := taskHTML(testTasks, date, "", "", "") + + // Write HTML to a file for easy viewing + outputFile := "task_preview.html" + err = os.WriteFile(outputFile, html, 0644) + if err != nil { + t.Fatalf("Failed to write HTML file: %v", err) + } + + t.Logf("Task preview generated and saved to: ./%s", outputFile) + + // Basic checks + if len(html) == 0 { + t.Error("Expected HTML output, got empty") + } + +} diff --git a/apps/flowlord/test/tasks.json b/apps/flowlord/test/tasks.json new file mode 100644 index 0000000..706721d --- /dev/null +++ b/apps/flowlord/test/tasks.json @@ -0,0 +1,194 @@ +[ + { + "id": "task-1", + "type": "csv2json", + "job": "hourly", + "info": "?date=2024-01-15&hour=10", + "result": "complete", + "meta": "cron=2024-01-15T10&job=hourly", + "msg": "Successfully processed 1000 records", + "task_seconds": 325, + "task_time": "5m25s", + "queue_seconds": 5, + "queue_time": "5s", + "created": "2024-01-15T10:00:00Z", + "started": "2024-01-15T10:00:05Z", + "ended": "2024-01-15T10:05:30Z" + }, + { + "id": "task-2", + "type": "csv2json", + "job": "hourly", + "info": "?date=2024-01-15&hour=11", + "result": "complete", + "meta": "cron=2024-01-15T11&job=hourly", + "msg": "Successfully processed 1200 records", + "task_seconds": 380, + "task_time": "6m20s", + "queue_seconds": 8, + "queue_time": "8s", + "created": "2024-01-15T11:00:00Z", + "started": "2024-01-15T11:00:08Z", + "ended": "2024-01-15T11:06:28Z" + }, + { + "id": "task-3", + "type": "csv2json", + "job": "hourly", + "info": "?date=2024-01-15&hour=12", + "result": "error", + "meta": "cron=2024-01-15T12&job=hourly", + "msg": "Failed to read input file: file not found", + "task_seconds": 45, + "task_time": "45s", + "queue_seconds": 3, + "queue_time": "3s", + "created": "2024-01-15T12:00:00Z", + "started": "2024-01-15T12:00:03Z", + "ended": "2024-01-15T12:00:48Z" + }, + { + "id": "task-4", + "type": "filecopy", + "job": "backup", + "info": "?source=/data/users&dest=/backup/users", + "result": "complete", + "meta": "cron=2024-01-15T13&job=backup", + "msg": "Copied 50 files successfully", + "task_seconds": 935, + "task_time": "15m35s", + "queue_seconds": 10, + "queue_time": "10s", + "created": "2024-01-15T13:00:00Z", + "started": "2024-01-15T13:00:10Z", + "ended": "2024-01-15T13:15:45Z" + }, + { + "id": "task-5", + "type": "filecopy", + "job": "backup", + "info": "?source=/data/orders&dest=/backup/orders", + "result": "complete", + "meta": "cron=2024-01-15T14&job=backup", + "msg": "Copied 75 files successfully", + "task_seconds": 1200, + "task_time": "20m0s", + "queue_seconds": 12, + "queue_time": "12s", + "created": "2024-01-15T14:00:00Z", + "started": "2024-01-15T14:00:12Z", + "ended": "2024-01-15T14:20:00Z" + }, + { + "id": "task-6", + "type": "filecopy", + "job": "backup", + "info": "?source=/data/products&dest=/backup/products", + "result": "warn", + "meta": "cron=2024-01-15T15&job=backup", + "msg": "Some files were locked and skipped", + "task_seconds": 600, + "task_time": "10m0s", + "queue_seconds": 5, + "queue_time": "5s", + "created": "2024-01-15T15:00:00Z", + "started": "2024-01-15T15:00:05Z", + "ended": "2024-01-15T15:10:00Z" + }, + { + "id": "task-7", + "type": "transform", + "job": "realtime", + "info": "?input=stream1&output=processed1", + "result": "alert", + "meta": "cron=2024-01-15T16&job=realtime", + "msg": "High memory usage detected", + "task_seconds": 145, + "task_time": "2m25s", + "queue_seconds": 5, + "queue_time": "5s", + "created": "2024-01-15T16:00:00Z", + "started": "2024-01-15T16:00:05Z", + "ended": "2024-01-15T16:02:30Z" + }, + { + "id": "task-8", + "type": "transform", + "job": "realtime", + "info": "?input=stream2&output=processed2", + "result": "complete", + "meta": "cron=2024-01-15T17&job=realtime", + "msg": "Successfully processed 5000 events", + "task_seconds": 200, + "task_time": "3m20s", + "queue_seconds": 7, + "queue_time": "7s", + "created": "2024-01-15T17:00:00Z", + "started": "2024-01-15T17:00:07Z", + "ended": "2024-01-15T17:03:27Z" + }, + { + "id": "task-9", + "type": "transform", + "job": "realtime", + "info": "?input=stream3&output=processed3", + "result": "complete", + "meta": "cron=2024-01-15T18&job=realtime", + "msg": "Successfully processed 3000 events", + "task_seconds": 180, + "task_time": "3m0s", + "queue_seconds": 4, + "queue_time": "4s", + "created": "2024-01-15T18:00:00Z", + "started": "2024-01-15T18:00:04Z", + "ended": "2024-01-15T18:03:04Z" + }, + { + "id": "task-10", + "type": "bigquery", + "job": "analytics", + "info": "?table=events&date=2024-01-15", + "result": "warn", + "meta": "cron=2024-01-15T19&job=analytics", + "msg": "Slow query execution", + "task_seconds": 1515, + "task_time": "25m15s", + "queue_seconds": 15, + "queue_time": "15s", + "created": "2024-01-15T19:00:00Z", + "started": "2024-01-15T19:00:15Z", + "ended": "2024-01-15T19:25:30Z" + }, + { + "id": "task-11", + "type": "bigquery", + "job": "analytics", + "info": "?table=users&date=2024-01-15", + "result": "complete", + "meta": "cron=2024-01-15T20&job=analytics", + "msg": "Query completed successfully", + "task_seconds": 800, + "task_time": "13m20s", + "queue_seconds": 10, + "queue_time": "10s", + "created": "2024-01-15T20:00:00Z", + "started": "2024-01-15T20:00:10Z", + "ended": "2024-01-15T20:13:30Z" + }, + { + "id": "task-12", + "type": "bigquery", + "job": "analytics", + "info": "?table=orders&date=2024-01-15", + "result": "error", + "meta": "cron=2024-01-15T21&job=analytics", + "msg": "Query failed: table not found", + "task_seconds": 30, + "task_time": "30s", + "queue_seconds": 5, + "queue_time": "5s", + "created": "2024-01-15T21:00:00Z", + "started": "2024-01-15T21:00:05Z", + "ended": "2024-01-15T21:00:35Z" + } +] \ No newline at end of file From c205d863ff57921b59830dd4fc40ad1821f445f7 Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Tue, 30 Sep 2025 15:34:26 -0600 Subject: [PATCH 09/40] UI refinement --- apps/flowlord/handler.go | 1 + apps/flowlord/handler/task.tmpl | 285 +++++++++++++++++++++++++++++--- apps/flowlord/test/tasks.json | 2 +- 3 files changed, 263 insertions(+), 25 deletions(-) diff --git a/apps/flowlord/handler.go b/apps/flowlord/handler.go index 9b6537a..fb3c213 100644 --- a/apps/flowlord/handler.go +++ b/apps/flowlord/handler.go @@ -519,6 +519,7 @@ func taskHTML(tasks []cache.TaskView, date time.Time, taskType, job, result stri data := map[string]interface{}{ "Date": date.Format("Monday, January 2, 2006"), + "DateValue": date.Format("2006-01-02"), "PrevDate": prevDate.Format("2006-01-02"), "NextDate": nextDate.Format("2006-01-02"), "Tasks": tasks, diff --git a/apps/flowlord/handler/task.tmpl b/apps/flowlord/handler/task.tmpl index f29fd60..966dda1 100644 --- a/apps/flowlord/handler/task.tmpl +++ b/apps/flowlord/handler/task.tmpl @@ -240,6 +240,31 @@ font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; font-size: 13px; color: #6c757d; + cursor: pointer; + position: relative; + user-select: text; + } + .id-cell:hover { + background-color: #f8f9fa; + } + .id-cell.truncated { + max-width: 100px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; + } + .id-cell.expanded { + max-width: none; + white-space: normal; + word-break: break-all; + background-color: #f8f9fa; + border: 1px solid #dee2e6; + border-radius: 4px; + padding: 8px; + margin: 2px; + } + .id-cell.copyable { + position: relative; } .type-cell { font-weight: 500; @@ -277,22 +302,74 @@ font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; font-size: 13px; color: #6c757d; + cursor: pointer; + line-height: 1.4; + user-select: text; + } + .info-cell:hover { + background-color: #f8f9fa; + } + .info-cell.truncated { max-width: 200px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; + } + .info-cell.expanded { + max-width: none; + white-space: normal; word-wrap: break-word; - line-height: 1.4; + background-color: #f8f9fa; + border: 1px solid #dee2e6; + border-radius: 4px; + padding: 8px; + margin: 2px; + } + .info-cell.copyable { + position: relative; } .meta-cell { font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; font-size: 12px; color: #6c757d; + cursor: pointer; + line-height: 1.4; + user-select: text; + } + .meta-cell:hover { + background-color: #f8f9fa; + } + .meta-cell.truncated { max-width: 200px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; + } + .meta-cell.expanded { + max-width: none; + white-space: normal; word-wrap: break-word; - line-height: 1.4; + background-color: #f8f9fa; + border: 1px solid #dee2e6; + border-radius: 4px; + padding: 8px; + margin: 2px; + } + .meta-cell.copyable { + position: relative; } .message-cell { max-width: 300px; word-wrap: break-word; line-height: 1.4; + cursor: pointer; + user-select: text; + } + .message-cell:hover { + background-color: #f8f9fa; + } + .message-cell.copyable { + position: relative; } .stats { padding: 16px 20px; @@ -310,6 +387,56 @@ margin: 0 0 10px 0; color: #95a5a6; } + + /* Context menu styles */ + .context-menu { + position: absolute; + background: white; + border: 1px solid #dee2e6; + border-radius: 4px; + box-shadow: 0 4px 12px rgba(0,0,0,0.15); + z-index: 1000; + min-width: 120px; + padding: 4px 0; + } + .context-menu-item { + padding: 8px 16px; + cursor: pointer; + font-size: 14px; + color: #495057; + display: flex; + align-items: center; + gap: 8px; + } + .context-menu-item:hover { + background-color: #f8f9fa; + } + .context-menu-item:active { + background-color: #e9ecef; + } + + /* Copy feedback styles */ + .copy-feedback { + position: absolute; + top: -25px; + left: 50%; + transform: translateX(-50%); + background: #28a745; + color: white; + padding: 4px 8px; + border-radius: 4px; + font-size: 12px; + font-weight: 500; + z-index: 1001; + pointer-events: none; + animation: copyFeedback 2s ease-out forwards; + } + @keyframes copyFeedback { + 0% { opacity: 0; transform: translateX(-50%) translateY(0); } + 20% { opacity: 1; transform: translateX(-50%) translateY(-5px); } + 80% { opacity: 1; transform: translateX(-50%) translateY(-5px); } + 100% { opacity: 0; transform: translateX(-50%) translateY(-10px); } + } @@ -326,7 +453,7 @@

Filters

- +
@@ -418,20 +545,47 @@ {{range .Tasks}} - - {{if ge (len .ID) 6}}{{slice .ID 0 6}}...{{else}}{{.ID}}{{end}} + + {{if ge (len .ID) 8}}{{slice .ID 0 8}}...{{else}}{{.ID}}{{end}} {{.Type}} {{if .Job}}{{.Job}}{{else}}{{getJobFromMeta .Meta}}{{end}} {{if .Result}}{{.Result}}{{else}}Running{{end}} - {{.Info}} - {{.Meta}} + + {{if ge (len .Info) 80}}{{slice .Info 0 80}}...{{else}}{{.Info}}{{end}} + + + {{if ge (len .Meta) 80}}{{slice .Meta 0 80}}...{{else}}{{.Meta}}{{end}} + {{if .Created}}{{.Created}}{{else}}N/A{{end}} {{if .TaskTime}}{{.TaskTime}}{{else}}N/A{{end}} {{if .QueueTime}}{{.QueueTime}}{{else}}N/A{{end}} - {{.Msg}} + + {{.Msg}} + {{end}} @@ -585,39 +739,122 @@ initializeSorting(); }); - // Copy to clipboard functionality + // Toggle field expansion/collapse functionality + function toggleField(element, fullText) { + // Prevent event bubbling to avoid conflicts with sorting + event.stopPropagation(); + + if (element.classList.contains('expanded')) { + // Collapse the field + element.classList.remove('expanded'); + element.classList.add('truncated'); + element.textContent = element.dataset.truncatedText; + } else { + // Expand the field + element.classList.remove('truncated'); + element.classList.add('expanded'); + element.textContent = fullText; + } + } + + // Context menu functionality + function showContextMenu(event, text) { + event.preventDefault(); + event.stopPropagation(); + + // Remove any existing context menu + const existingMenu = document.querySelector('.context-menu'); + if (existingMenu) { + existingMenu.remove(); + } + + // Create context menu + const contextMenu = document.createElement('div'); + contextMenu.className = 'context-menu'; + contextMenu.innerHTML = ` +
+ 📋 Copy +
+ `; + + // Position the context menu + contextMenu.style.left = event.pageX + 'px'; + contextMenu.style.top = event.pageY + 'px'; + + document.body.appendChild(contextMenu); + + // Close context menu when clicking elsewhere + const closeMenu = (e) => { + if (!contextMenu.contains(e.target)) { + contextMenu.remove(); + document.removeEventListener('click', closeMenu); + } + }; + + setTimeout(() => { + document.addEventListener('click', closeMenu); + }, 100); + } + + + // Copy to clipboard functionality with enhanced feedback function copyToClipboard(text) { + const targetElement = event ? event.target : document.activeElement; + navigator.clipboard.writeText(text).then(function() { - // Show a brief success message - const originalText = event.target.textContent; - event.target.textContent = 'Copied!'; - event.target.style.color = '#28a745'; - setTimeout(function() { - event.target.textContent = originalText; - event.target.style.color = ''; - }, 1000); + showCopyFeedback(targetElement, 'Copied!'); }).catch(function(err) { console.error('Could not copy text: ', err); // Fallback for older browsers const textArea = document.createElement('textarea'); textArea.value = text; + textArea.style.position = 'fixed'; + textArea.style.left = '-999999px'; + textArea.style.top = '-999999px'; document.body.appendChild(textArea); + textArea.focus(); textArea.select(); try { document.execCommand('copy'); - const originalText = event.target.textContent; - event.target.textContent = 'Copied!'; - event.target.style.color = '#28a745'; - setTimeout(function() { - event.target.textContent = originalText; - event.target.style.color = ''; - }, 1000); + showCopyFeedback(targetElement, 'Copied!'); } catch (err) { console.error('Fallback copy failed: ', err); + showCopyFeedback(targetElement, 'Copy failed!', true); } document.body.removeChild(textArea); }); } + + // Show copy feedback with animation + function showCopyFeedback(element, message, isError = false) { + // Remove any existing feedback + const existingFeedback = element.querySelector('.copy-feedback'); + if (existingFeedback) { + existingFeedback.remove(); + } + + // Create feedback element + const feedback = document.createElement('div'); + feedback.className = 'copy-feedback'; + feedback.textContent = message; + feedback.style.backgroundColor = isError ? '#dc3545' : '#28a745'; + + // Position feedback relative to the element + const rect = element.getBoundingClientRect(); + feedback.style.position = 'fixed'; + feedback.style.left = (rect.left + rect.width / 2) + 'px'; + feedback.style.top = (rect.top - 10) + 'px'; + feedback.style.transform = 'translateX(-50%)'; + + element.appendChild(feedback); + + // Remove feedback after animation + setTimeout(() => { + if (feedback.parentNode) { + feedback.remove(); + } + }, 2000); + } diff --git a/apps/flowlord/test/tasks.json b/apps/flowlord/test/tasks.json index 706721d..4b5dd90 100644 --- a/apps/flowlord/test/tasks.json +++ b/apps/flowlord/test/tasks.json @@ -32,7 +32,7 @@ "ended": "2024-01-15T11:06:28Z" }, { - "id": "task-3", + "id": "task-long-id-abcdefg-3", "type": "csv2json", "job": "hourly", "info": "?date=2024-01-15&hour=12", From 83d6710b99872ff648a646815aafb001726b1bc3 Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Wed, 1 Oct 2025 14:14:20 -0600 Subject: [PATCH 10/40] disable delete of tasks after TTL --- apps/flowlord/cache/cache.go | 49 ++++---- apps/flowlord/cache/sqlite.go | 89 +++++++++++++-- apps/flowlord/handler/task.tmpl | 191 ++++++++++++++++++++++++++------ apps/flowlord/taskmaster.go | 18 ++- 4 files changed, 276 insertions(+), 71 deletions(-) diff --git a/apps/flowlord/cache/cache.go b/apps/flowlord/cache/cache.go index 596f33d..faab44d 100644 --- a/apps/flowlord/cache/cache.go +++ b/apps/flowlord/cache/cache.go @@ -1,15 +1,26 @@ package cache import ( - "net/url" - "strings" - "sync" "time" "github.com/pcelvng/task" - "github.com/pcelvng/task/bus" ) +// todo: name to describe info about completed tasks that are within the cache +type TaskJob struct { + LastUpdate time.Time // time since the last event with id + Completed bool + count int + Events []task.Task +} + +type Stat struct { + Count int + Removed int + ProcessTime time.Duration + Unfinished []task.Task +} + // AlertRecord represents an alert stored in the database type AlertRecord struct { ID int64 `json:"id"` @@ -29,12 +40,18 @@ type SummaryLine struct { TimeRange string `json:"time_range"` // formatted time range } -type Cache interface { - Add(task.Task) - Get(id string) TaskJob +/* +import ( + "net/url" + "strings" + "sync" + "time" + + "github.com/pcelvng/task" + "github.com/pcelvng/task/bus" +) + - // todo: listener for cache expiry? -} func NewMemory(ttl time.Duration) *Memory { if ttl < time.Hour { @@ -53,20 +70,7 @@ type Memory struct { mu sync.RWMutex } -// todo: name to describe info about completed tasks that are within the cache -type TaskJob struct { - LastUpdate time.Time // time since the last event with id - Completed bool - count int - Events []task.Task -} -type Stat struct { - Count int - Removed int - ProcessTime time.Duration - Unfinished []task.Task -} // Recycle iterates through the cache // clearing all tasks that have been completed within the cache window @@ -163,3 +167,4 @@ func (m *Memory) SendFunc(p bus.Producer) func(string, *task.Task) error { return p.Send(topic, tsk.JSONBytes()) } } +*/ \ No newline at end of file diff --git a/apps/flowlord/cache/sqlite.go b/apps/flowlord/cache/sqlite.go index f53bf0d..c8b3323 100644 --- a/apps/flowlord/cache/sqlite.go +++ b/apps/flowlord/cache/sqlite.go @@ -200,14 +200,15 @@ func (s *SQLite) Recycle() Stat { tasks = append(tasks, task) } + // TODO: Deletion logic commented out for later implementation // Delete the expired record - _, err = tx.Exec(` - DELETE FROM task_records - WHERE id = ? AND type = ? AND job = ? AND created = ? - `, task.ID, task.Type, task.Job, task.Created) - if err != nil { - continue - } + // _, err = tx.Exec(` + // DELETE FROM task_records + // WHERE id = ? AND type = ? AND job = ? AND created = ? + // `, task.ID, task.Type, task.Job, task.Created) + // if err != nil { + // continue + // } } // Get remaining count @@ -228,6 +229,79 @@ func (s *SQLite) Recycle() Stat { } } +// CheckIncompleteTasks checks for tasks that have not completed within the TTL period +// and adds them to the alerts table with deduplication. Returns count of alerts added. +// Uses a JOIN query to efficiently find incomplete tasks without existing alerts. +func (s *SQLite) CheckIncompleteTasks() Stat { + s.mu.Lock() + defer s.mu.Unlock() + + tasks := make([]task.Task, 0) + alertsAdded := 0 + t := time.Now() + + // Use LEFT JOIN to find incomplete tasks that don't have existing alerts + // This eliminates the need for separate deduplication queries + rows, err := s.db.Query(` + SELECT tr.id, tr.type, tr.job, tr.info, tr.result, tr.meta, tr.msg, + tr.created, tr.started, tr.ended + FROM task_records tr + LEFT JOIN alert_records ar ON ( + tr.id = ar.task_id AND + tr.type = ar.task_type AND + tr.job = ar.job AND + ar.msg LIKE 'INCOMPLETE:%' AND + ar.created_at > datetime('now', '-1 day') + ) + WHERE tr.created < ? + AND tr.result = '' + AND ar.id IS NULL + `, t.Add(-s.ttl)) + if err != nil { + return Stat{} + } + defer rows.Close() + + // Process incomplete records that don't have alerts + for rows.Next() { + var task task.Task + err := rows.Scan( + &task.ID, &task.Type, &task.Job, &task.Info, &task.Result, + &task.Meta, &task.Msg, &task.Created, &task.Started, &task.Ended, + ) + if err != nil { + continue + } + + // Add incomplete task to alert list + tasks = append(tasks, task) + + // Add alert directly (no need to check for duplicates since JOIN already filtered them) + taskID := task.ID + if taskID == "" { + taskID = "unknown" + } + + taskTime := tmpl.TaskTime(task) + + _, err = s.db.Exec(` + INSERT INTO alert_records (task_id, task_time, task_type, job, msg) + VALUES (?, ?, ?, ?, ?) + `, taskID, taskTime, task.Type, task.Job, "INCOMPLETE: unfinished task detected") + + if err == nil { + alertsAdded++ + } + } + + return Stat{ + Count: alertsAdded, + Removed: 0, // No deletion in this method + ProcessTime: time.Since(t), + Unfinished: tasks, + } +} + func (s *SQLite) Recap() map[string]*Stats { s.mu.Lock() defer s.mu.Unlock() @@ -310,6 +384,7 @@ func (s *SQLite) AddAlert(t task.Task, message string) error { return err } + // extractJobFromTask is a helper function to get job from task func extractJobFromTask(t task.Task) string { job := t.Job diff --git a/apps/flowlord/handler/task.tmpl b/apps/flowlord/handler/task.tmpl index 966dda1..549ade8 100644 --- a/apps/flowlord/handler/task.tmpl +++ b/apps/flowlord/handler/task.tmpl @@ -450,37 +450,6 @@
-
-

Filters

- - -
-
- - -
-
- - -
-
- - -
-
- - Clear All -
-
- -

Task Summary

@@ -525,6 +494,27 @@
+
+

Filters

+
+
+ + +
+
+ + +
+
+ +
+
+
+ {{if .Tasks}}
@@ -737,8 +727,147 @@ // Initialize the page initializeSorting(); + initializeFilters(); }); + // Initialize responsive filters + function initializeFilters() { + const typeFilter = document.getElementById('typeFilter'); + const jobFilter = document.getElementById('jobFilter'); + const table = document.getElementById('taskTable'); + + if (!table) return; + + // Get all task rows + const rows = Array.from(table.querySelectorAll('tbody tr')); + + // Extract unique task types + const taskTypes = new Set(); + const jobMap = new Map(); // type -> Set of jobs + + rows.forEach(row => { + const typeCell = row.cells[1]; // Type column + const jobCell = row.cells[2]; // Job column + + if (typeCell && jobCell) { + const type = typeCell.textContent.trim(); + const job = jobCell.textContent.trim(); + + if (type) { + taskTypes.add(type); + + if (!jobMap.has(type)) { + jobMap.set(type, new Set()); + } + if (job) { + jobMap.get(type).add(job); + } + } + } + }); + + // Populate task type dropdown + const sortedTypes = Array.from(taskTypes).sort(); + sortedTypes.forEach(type => { + const option = document.createElement('option'); + option.value = type; + option.textContent = type; + typeFilter.appendChild(option); + }); + + // Handle task type change + typeFilter.addEventListener('change', function() { + const selectedType = this.value; + const jobOptions = jobFilter.querySelectorAll('option:not([value=""])'); + jobOptions.forEach(option => option.remove()); + + if (selectedType && jobMap.has(selectedType)) { + const jobs = Array.from(jobMap.get(selectedType)).sort(); + jobs.forEach(job => { + const option = document.createElement('option'); + option.value = job; + option.textContent = job; + jobFilter.appendChild(option); + }); + } + + filterTable(); + }); + + // Handle job change + jobFilter.addEventListener('change', filterTable); + + // Initial filter + filterTable(); + } + + // Filter table based on current filter values + function filterTable() { + const typeFilter = document.getElementById('typeFilter'); + const jobFilter = document.getElementById('jobFilter'); + const table = document.getElementById('taskTable'); + + if (!table) return; + + const selectedType = typeFilter ? typeFilter.value : ''; + const selectedJob = jobFilter ? jobFilter.value : ''; + + const rows = Array.from(table.querySelectorAll('tbody tr')); + + rows.forEach(row => { + const typeCell = row.cells[1]; + const jobCell = row.cells[2]; + + if (typeCell && jobCell) { + const type = typeCell.textContent.trim(); + const job = jobCell.textContent.trim(); + + const typeMatch = !selectedType || type === selectedType; + const jobMatch = !selectedJob || job === selectedJob; + + if (typeMatch && jobMatch) { + row.style.display = ''; + } else { + row.style.display = 'none'; + } + } + }); + + // Update task count + updateTaskCount(); + } + + // Update the displayed task count + function updateTaskCount() { + const table = document.getElementById('taskTable'); + if (!table) return; + + const visibleRows = Array.from(table.querySelectorAll('tbody tr')).filter(row => + row.style.display !== 'none' + ); + + const totalCountElement = document.getElementById('totalCount'); + if (totalCountElement) { + totalCountElement.textContent = visibleRows.length; + } + } + + // Clear all filters + function clearFilters() { + const typeFilter = document.getElementById('typeFilter'); + const jobFilter = document.getElementById('jobFilter'); + + if (typeFilter) typeFilter.value = ''; + if (jobFilter) { + jobFilter.value = ''; + // Clear job options except "All Jobs" + const jobOptions = jobFilter.querySelectorAll('option:not([value=""])'); + jobOptions.forEach(option => option.remove()); + } + + filterTable(); + } + // Toggle field expansion/collapse functionality function toggleField(element, fullText) { // Prevent event bubbling to avoid conflicts with sorting diff --git a/apps/flowlord/taskmaster.go b/apps/flowlord/taskmaster.go index 9c8a98b..ab517fb 100644 --- a/apps/flowlord/taskmaster.go +++ b/apps/flowlord/taskmaster.go @@ -154,16 +154,7 @@ func (tm *taskMaster) getAllChildren(topic, workflow, job string) (s []string) { } func (tm *taskMaster) refreshCache() ([]string, error) { - stat := tm.taskCache.Recycle() - if stat.Removed > 0 { - log.Printf("task-cache: size %d removed %d time: %v", stat.Count, stat.Removed, stat.ProcessTime) - for _, t := range stat.Unfinished { - // add unfinished tasks to alerts channel - t.Msg += "unfinished task detected" - tm.alerts <- t - } - } - + // Reload workflow files files, err := tm.Cache.Refresh() if err != nil { return nil, fmt.Errorf("error reloading workflow: %w", err) @@ -470,7 +461,7 @@ func (tm *taskMaster) readFiles(ctx context.Context) { } } -// handleNotifications gathers all 'failed' tasks and +// handleNotifications gathers all 'failed' tasks and incomplete tasks // sends a summary message every X minutes // It uses an exponential backoff to limit the number of messages // ie, (min) 5 -> 10 -> 20 -> 40 -> 80 -> 160 (max) @@ -482,6 +473,11 @@ func (tm *taskMaster) handleNotifications(taskChan chan task.Task, ctx context.C dur := tm.slack.MinFrequency for ; ; time.Sleep(dur) { var err error + + // Check for incomplete tasks and add them to alerts + tm.taskCache.CheckIncompleteTasks() + + // Get all alerts (including newly added incomplete task alerts) alerts, err = tm.taskCache.GetAlertsByDate(time.Now()) if err != nil { log.Printf("failed to retrieve alerts: %v", err) From 66b6f3ee31eeb009219ff6d57e7bac4778c1ae9a Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Wed, 1 Oct 2025 14:36:51 -0600 Subject: [PATCH 11/40] Add about page --- apps/flowlord/cache/cache_test.go | 10 +- apps/flowlord/cache/sqlite.go | 136 ++++++++++++++ apps/flowlord/handler.go | 76 +++++++- apps/flowlord/handler/about.tmpl | 286 ++++++++++++++++++++++++++++++ apps/flowlord/handler/alert.tmpl | 51 ++++++ apps/flowlord/handler/files.tmpl | 51 ++++++ apps/flowlord/handler/handler.go | 6 + apps/flowlord/handler/header.tmpl | 27 +++ apps/flowlord/handler/task.tmpl | 51 ++++++ apps/flowlord/handler_test.go | 47 +++++ 10 files changed, 728 insertions(+), 13 deletions(-) create mode 100644 apps/flowlord/handler/about.tmpl create mode 100644 apps/flowlord/handler/header.tmpl diff --git a/apps/flowlord/cache/cache_test.go b/apps/flowlord/cache/cache_test.go index 7c61512..af22b53 100644 --- a/apps/flowlord/cache/cache_test.go +++ b/apps/flowlord/cache/cache_test.go @@ -1,13 +1,6 @@ package cache -import ( - "testing" - "time" - - "github.com/hydronica/trial" - "github.com/pcelvng/task" -) - +/* func TestAdd(t *testing.T) { fn := func(tasks []task.Task) (map[string]TaskJob, error) { cache := &Memory{cache: make(map[string]TaskJob)} @@ -225,3 +218,4 @@ func TestRecap(t *testing.T) { } trial.New(fn, cases).SubTest(t) } +*/ \ No newline at end of file diff --git a/apps/flowlord/cache/sqlite.go b/apps/flowlord/cache/sqlite.go index c8b3323..1853142 100644 --- a/apps/flowlord/cache/sqlite.go +++ b/apps/flowlord/cache/sqlite.go @@ -4,6 +4,7 @@ import ( "database/sql" _ "embed" "encoding/json" + "fmt" "log" "net/url" "sort" @@ -924,3 +925,138 @@ type FileMessageWithTasks struct { TaskStarted time.Time `json:"task_started"` TaskEnded time.Time `json:"task_ended"` } + +// DBSizeInfo contains database size information +type DBSizeInfo struct { + TotalSize string `json:"total_size"` + PageCount int64 `json:"page_count"` + PageSize int64 `json:"page_size"` + DBPath string `json:"db_path"` +} + +// TableStat contains information about a database table +type TableStat struct { + Name string `json:"name"` + RowCount int64 `json:"row_count"` + SizeBytes int64 `json:"size_bytes"` + SizeHuman string `json:"size_human"` + Percentage float64 `json:"percentage"` +} + +// GetDBSize returns database size information +func (s *SQLite) GetDBSize() (*DBSizeInfo, error) { + s.mu.Lock() + defer s.mu.Unlock() + + // Get page count and page size + var pageCount, pageSize int64 + err := s.db.QueryRow("PRAGMA page_count").Scan(&pageCount) + if err != nil { + return nil, err + } + + err = s.db.QueryRow("PRAGMA page_size").Scan(&pageSize) + if err != nil { + return nil, err + } + + // Get database file path + var dbPath string + err = s.db.QueryRow("PRAGMA database_list").Scan(&dbPath, nil, nil) + if err != nil { + // If we can't get the path, use a default + dbPath = "unknown" + } + + totalSize := pageCount * pageSize + totalSizeStr := formatBytes(totalSize) + + return &DBSizeInfo{ + TotalSize: totalSizeStr, + PageCount: pageCount, + PageSize: pageSize, + DBPath: dbPath, + }, nil +} + +// GetTableStats returns statistics for all tables in the database +func (s *SQLite) GetTableStats() ([]TableStat, error) { + s.mu.Lock() + defer s.mu.Unlock() + + // Get total database size first + var totalSize int64 + err := s.db.QueryRow("SELECT page_count * page_size FROM pragma_page_count(), pragma_page_size()").Scan(&totalSize) + if err != nil { + return nil, err + } + + // Get list of tables + rows, err := s.db.Query(` + SELECT name FROM sqlite_master + WHERE type='table' AND name NOT LIKE 'sqlite_%' + ORDER BY name + `) + if err != nil { + return nil, err + } + defer rows.Close() + + var tables []string + for rows.Next() { + var tableName string + if err := rows.Scan(&tableName); err != nil { + return nil, err + } + tables = append(tables, tableName) + } + + var stats []TableStat + for _, tableName := range tables { + // Get row count + var rowCount int64 + err := s.db.QueryRow(fmt.Sprintf("SELECT COUNT(*) FROM %s", tableName)).Scan(&rowCount) + if err != nil { + continue // Skip tables we can't read + } + + // Get table size using pragma table_info and estimate + // This is an approximation since SQLite doesn't provide exact table sizes + var sizeBytes int64 + if rowCount > 0 { + // Estimate size based on row count and average row size + // This is a rough approximation + avgRowSize := int64(200) // Estimated average row size in bytes + sizeBytes = rowCount * avgRowSize + } + + percentage := float64(0) + if totalSize > 0 { + percentage = float64(sizeBytes) / float64(totalSize) * 100 + } + + stats = append(stats, TableStat{ + Name: tableName, + RowCount: rowCount, + SizeBytes: sizeBytes, + SizeHuman: formatBytes(sizeBytes), + Percentage: percentage, + }) + } + + return stats, nil +} + +// formatBytes converts bytes to human readable format +func formatBytes(bytes int64) string { + const unit = 1024 + if bytes < unit { + return fmt.Sprintf("%d B", bytes) + } + div, exp := int64(unit), 0 + for n := bytes / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp]) +} diff --git a/apps/flowlord/handler.go b/apps/flowlord/handler.go index fb3c213..c3e532b 100644 --- a/apps/flowlord/handler.go +++ b/apps/flowlord/handler.go @@ -59,6 +59,7 @@ func (tm *taskMaster) StartHandler() { router.Get("/web/alert", tm.htmlAlert) router.Get("/web/files", tm.htmlFiles) router.Get("/web/task", tm.htmlTask) + router.Get("/web/about", tm.htmlAbout) if tm.port == 0 { log.Println("flowlord router disabled") @@ -354,6 +355,13 @@ func (tm *taskMaster) htmlTask(w http.ResponseWriter, r *http.Request) { w.Write(taskHTML(tasks, dt, taskType, job, result)) } +// htmlAbout handles GET /web/about - displays system information and cache statistics +func (tm *taskMaster) htmlAbout(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "text/html") + w.Write(tm.aboutHTML()) +} + // filesHTML renders the file messages HTML page func filesHTML(files []cache.FileMessage, date time.Time) []byte { // Calculate statistics @@ -376,6 +384,7 @@ func filesHTML(files []cache.FileMessage, date time.Time) []byte { data := map[string]interface{}{ "Date": date.Format("Monday, January 2, 2006"), + "DateValue": date.Format("2006-01-02"), "PrevDate": prevDate.Format("2006-01-02"), "NextDate": nextDate.Format("2006-01-02"), "Files": files, @@ -383,6 +392,7 @@ func filesHTML(files []cache.FileMessage, date time.Time) []byte { "MatchedFiles": matchedFiles, "UnmatchedFiles": unmatchedFiles, "TotalTasks": totalTasks, + "CurrentPage": "files", } // Template functions @@ -408,7 +418,7 @@ func filesHTML(files []cache.FileMessage, date time.Time) []byte { } // Parse and execute template using the same pattern as alertHTML - tmpl, err := template.New("files").Funcs(funcMap).Parse(handler.FilesTemplate) + tmpl, err := template.New("files").Funcs(funcMap).Parse(handler.HeaderTemplate + handler.FilesTemplate) if err != nil { return []byte(err.Error()) } @@ -533,6 +543,7 @@ func taskHTML(tasks []cache.TaskView, date time.Time, taskType, job, result stri "CurrentType": taskType, "CurrentJob": job, "CurrentResult": result, + "CurrentPage": "task", } // Template functions @@ -576,7 +587,7 @@ func taskHTML(tasks []cache.TaskView, date time.Time, taskType, job, result stri } // Parse and execute template - tmpl, err := template.New("task").Funcs(funcMap).Parse(handler.TaskTemplate) + tmpl, err := template.New("task").Funcs(funcMap).Parse(handler.HeaderTemplate + handler.TaskTemplate) if err != nil { return []byte(err.Error()) } @@ -589,6 +600,59 @@ func taskHTML(tasks []cache.TaskView, date time.Time, taskType, job, result stri return buf.Bytes() } +// aboutHTML renders the about page HTML +func (tm *taskMaster) aboutHTML() []byte { + // Get basic system information + sts := stats{ + AppName: "flowlord", + Version: tools.Version, + RunTime: gtools.PrintDuration(time.Since(tm.initTime)), + NextUpdate: tm.nextUpdate.Format("2006-01-02T15:04:05"), + LastUpdate: tm.lastUpdate.Format("2006-01-02T15:04:05"), + } + + // Get database size information + dbSize, err := tm.taskCache.GetDBSize() + if err != nil { + return []byte("Error getting database size: " + err.Error()) + } + + // Get table statistics + tableStats, err := tm.taskCache.GetTableStats() + if err != nil { + return []byte("Error getting table statistics: " + err.Error()) + } + + // Create data structure for template + data := map[string]interface{}{ + "AppName": sts.AppName, + "Version": sts.Version, + "RunTime": sts.RunTime, + "LastUpdate": sts.LastUpdate, + "NextUpdate": sts.NextUpdate, + "TotalDBSize": dbSize.TotalSize, + "PageCount": dbSize.PageCount, + "PageSize": dbSize.PageSize, + "DBPath": dbSize.DBPath, + "TableStats": tableStats, + "CurrentPage": "about", + "DateValue": "", // About page doesn't need date + } + + // Parse and execute template + tmpl, err := template.New("about").Parse(handler.HeaderTemplate + handler.AboutTemplate) + if err != nil { + return []byte("Error parsing template: " + err.Error()) + } + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return []byte("Error executing template: " + err.Error()) + } + + return buf.Bytes() +} + // AlertData holds both the alerts and summary data for the template type AlertData struct { Alerts []cache.AlertRecord @@ -602,11 +666,13 @@ func alertHTML(tasks []cache.AlertRecord) []byte { // Create data structure for template data := map[string]interface{}{ - "Alerts": tasks, - "Summary": summary, + "Alerts": tasks, + "Summary": summary, + "CurrentPage": "alert", + "DateValue": "", // Will be set by the template if needed } - tmpl, err := template.New("alert").Parse(handler.AlertTemplate) + tmpl, err := template.New("alert").Parse(handler.HeaderTemplate + handler.AlertTemplate) if err != nil { return []byte(err.Error()) } diff --git a/apps/flowlord/handler/about.tmpl b/apps/flowlord/handler/about.tmpl new file mode 100644 index 0000000..c4a23ae --- /dev/null +++ b/apps/flowlord/handler/about.tmpl @@ -0,0 +1,286 @@ + + + + + + Flowlord About - System Information + + + + {{template "header" .}} +
+
+

System Information

+
+ +
+ + +
+
+

Application

+
+ App Name + {{.AppName}} +
+
+ Version + {{.Version}} +
+
+ Runtime + {{.RunTime}} +
+
+ +
+

Cache Status

+
+ Last Update + {{.LastUpdate}} +
+
+ Next Update + {{.NextUpdate}} +
+
+ Database File + {{.DBPath}} +
+
+ +
+

Database Size

+
+ Total Size + {{.TotalDBSize}} +
+
+ Page Count + {{.PageCount}} +
+
+ Page Size + {{.PageSize}} +
+
+
+ +
+

Table Breakdown

+
+
+ + + + + + + + + + + {{range .TableStats}} + + + + + + + + {{end}} + +
Table NameRow CountSize (Bytes)Size (Human)Percentage
{{.Name}}{{.RowCount}}{{.SizeBytes}}{{.SizeHuman}}{{.Percentage}}%
+
+ + + + + + + diff --git a/apps/flowlord/handler/alert.tmpl b/apps/flowlord/handler/alert.tmpl index 9a8c70f..3fb5b3f 100644 --- a/apps/flowlord/handler/alert.tmpl +++ b/apps/flowlord/handler/alert.tmpl @@ -207,9 +207,60 @@ font-size: 14px; color: #6c757d; } + + /* Navigation Header Styles */ + .nav-header { + background: linear-gradient(135deg, #2c3e50 0%, #34495e 100%); + color: white; + padding: 0; + box-shadow: 0 2px 4px rgba(0,0,0,0.1); + } + .nav-container { + max-width: 1200px; + margin: 0 auto; + display: flex; + justify-content: space-between; + align-items: center; + padding: 0 20px; + } + .nav-brand h1 { + margin: 0; + font-size: 20px; + font-weight: 600; + padding: 15px 0; + } + .nav-menu { + display: flex; + gap: 0; + } + .nav-link { + display: flex; + align-items: center; + gap: 8px; + padding: 15px 20px; + color: rgba(255, 255, 255, 0.8); + text-decoration: none; + font-weight: 500; + font-size: 14px; + transition: all 0.2s ease; + border-bottom: 3px solid transparent; + } + .nav-link:hover { + color: white; + background: rgba(255, 255, 255, 0.1); + } + .nav-link.active { + color: white; + background: rgba(255, 255, 255, 0.15); + border-bottom-color: #3498db; + } + .nav-icon { + font-size: 16px; + } + {{template "header" .}}

Task Status Report

diff --git a/apps/flowlord/handler/files.tmpl b/apps/flowlord/handler/files.tmpl index 7021619..967e767 100644 --- a/apps/flowlord/handler/files.tmpl +++ b/apps/flowlord/handler/files.tmpl @@ -179,9 +179,60 @@ .refresh-btn:hover { background: #2980b9; } + + /* Navigation Header Styles */ + .nav-header { + background: linear-gradient(135deg, #2c3e50 0%, #34495e 100%); + color: white; + padding: 0; + box-shadow: 0 2px 4px rgba(0,0,0,0.1); + } + .nav-container { + max-width: 1200px; + margin: 0 auto; + display: flex; + justify-content: space-between; + align-items: center; + padding: 0 20px; + } + .nav-brand h1 { + margin: 0; + font-size: 20px; + font-weight: 600; + padding: 15px 0; + } + .nav-menu { + display: flex; + gap: 0; + } + .nav-link { + display: flex; + align-items: center; + gap: 8px; + padding: 15px 20px; + color: rgba(255, 255, 255, 0.8); + text-decoration: none; + font-weight: 500; + font-size: 14px; + transition: all 0.2s ease; + border-bottom: 3px solid transparent; + } + .nav-link:hover { + color: white; + background: rgba(255, 255, 255, 0.1); + } + .nav-link.active { + color: white; + background: rgba(255, 255, 255, 0.15); + border-bottom-color: #3498db; + } + .nav-icon { + font-size: 16px; + } + {{template "header" .}}

Flowlord File Messages

diff --git a/apps/flowlord/handler/handler.go b/apps/flowlord/handler/handler.go index ff0412a..b5942f8 100644 --- a/apps/flowlord/handler/handler.go +++ b/apps/flowlord/handler/handler.go @@ -10,3 +10,9 @@ var FilesTemplate string //go:embed task.tmpl var TaskTemplate string + +//go:embed header.tmpl +var HeaderTemplate string + +//go:embed about.tmpl +var AboutTemplate string diff --git a/apps/flowlord/handler/header.tmpl b/apps/flowlord/handler/header.tmpl new file mode 100644 index 0000000..fa086c1 --- /dev/null +++ b/apps/flowlord/handler/header.tmpl @@ -0,0 +1,27 @@ +{{define "header"}} + +{{end}} diff --git a/apps/flowlord/handler/task.tmpl b/apps/flowlord/handler/task.tmpl index 549ade8..69c5252 100644 --- a/apps/flowlord/handler/task.tmpl +++ b/apps/flowlord/handler/task.tmpl @@ -437,9 +437,60 @@ 80% { opacity: 1; transform: translateX(-50%) translateY(-5px); } 100% { opacity: 0; transform: translateX(-50%) translateY(-10px); } } + + /* Navigation Header Styles */ + .nav-header { + background: linear-gradient(135deg, #2c3e50 0%, #34495e 100%); + color: white; + padding: 0; + box-shadow: 0 2px 4px rgba(0,0,0,0.1); + } + .nav-container { + max-width: 1200px; + margin: 0 auto; + display: flex; + justify-content: space-between; + align-items: center; + padding: 0 20px; + } + .nav-brand h1 { + margin: 0; + font-size: 20px; + font-weight: 600; + padding: 15px 0; + } + .nav-menu { + display: flex; + gap: 0; + } + .nav-link { + display: flex; + align-items: center; + gap: 8px; + padding: 15px 20px; + color: rgba(255, 255, 255, 0.8); + text-decoration: none; + font-weight: 500; + font-size: 14px; + transition: all 0.2s ease; + border-bottom: 3px solid transparent; + } + .nav-link:hover { + color: white; + background: rgba(255, 255, 255, 0.1); + } + .nav-link.active { + color: white; + background: rgba(255, 255, 255, 0.15); + border-bottom-color: #3498db; + } + .nav-icon { + font-size: 16px; + } + {{template "header" .}}

Flowlord Task Dashboard

diff --git a/apps/flowlord/handler_test.go b/apps/flowlord/handler_test.go index c1de2b8..95dd5bc 100644 --- a/apps/flowlord/handler_test.go +++ b/apps/flowlord/handler_test.go @@ -4,6 +4,7 @@ import ( "encoding/json" "errors" "os" + "strings" "testing" "time" @@ -491,3 +492,49 @@ func TestTaskHTML(t *testing.T) { } } + + +func TestAboutHTML(t *testing.T) { + // Create a real SQLite cache for testing + taskCache, err := cache.NewSQLite(time.Hour, ":memory:") + if err != nil { + t.Fatalf("Failed to create test cache: %v", err) + } + + // Create a mock taskMaster with test data + tm := &taskMaster{ + initTime: time.Now().Add(-2 * time.Hour), // 2 hours ago + nextUpdate: time.Now().Add(30 * time.Minute), // 30 minutes from now + lastUpdate: time.Now().Add(-15 * time.Minute), // 15 minutes ago + taskCache: taskCache, + } + + // Generate HTML using the aboutHTML method + html := tm.aboutHTML() + + // Write HTML to a file for easy viewing + outputFile := "about_preview.html" + err = os.WriteFile(outputFile, html, 0644) + if err != nil { + t.Fatalf("Failed to write HTML file: %v", err) + } + + t.Logf("About preview generated and saved to: ./%s", outputFile) + + // Basic checks + if len(html) == 0 { + t.Error("Expected HTML output, got empty") + } + + // Check that key content is present + htmlStr := string(html) + if !strings.Contains(htmlStr, "flowlord") { + t.Error("Expected 'flowlord' in HTML output") + } + if !strings.Contains(htmlStr, "System Information") { + t.Error("Expected 'System Information' in HTML output") + } + if !strings.Contains(htmlStr, "Table Breakdown") { + t.Error("Expected 'Table Breakdown' in HTML output") + } +} From 5b9dc9779c235a2842f3464b842566fa1a8912ba Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Wed, 1 Oct 2025 15:51:16 -0600 Subject: [PATCH 12/40] move css into single sytle.css --- apps/flowlord/handler/about.tmpl | 182 +----- apps/flowlord/handler/alert.tmpl | 256 +------- apps/flowlord/handler/files.tmpl | 229 +------ apps/flowlord/handler/handler.go | 18 - apps/flowlord/handler/static/style.css | 867 +++++++++++++++++++++++++ apps/flowlord/handler/task.tmpl | 485 +------------- 6 files changed, 872 insertions(+), 1165 deletions(-) delete mode 100644 apps/flowlord/handler/handler.go create mode 100644 apps/flowlord/handler/static/style.css diff --git a/apps/flowlord/handler/about.tmpl b/apps/flowlord/handler/about.tmpl index c4a23ae..e6351ae 100644 --- a/apps/flowlord/handler/about.tmpl +++ b/apps/flowlord/handler/about.tmpl @@ -4,187 +4,7 @@ Flowlord About - System Information - + {{template "header" .}} diff --git a/apps/flowlord/handler/alert.tmpl b/apps/flowlord/handler/alert.tmpl index 3fb5b3f..4823a92 100644 --- a/apps/flowlord/handler/alert.tmpl +++ b/apps/flowlord/handler/alert.tmpl @@ -3,261 +3,7 @@ Task Status Report - + {{template "header" .}} diff --git a/apps/flowlord/handler/files.tmpl b/apps/flowlord/handler/files.tmpl index 967e767..9c5f314 100644 --- a/apps/flowlord/handler/files.tmpl +++ b/apps/flowlord/handler/files.tmpl @@ -4,237 +4,12 @@ Flowlord File Messages - {{.Date}} - + {{template "header" .}}
-
+

Flowlord File Messages

{{.Date}}
diff --git a/apps/flowlord/handler/handler.go b/apps/flowlord/handler/handler.go deleted file mode 100644 index b5942f8..0000000 --- a/apps/flowlord/handler/handler.go +++ /dev/null @@ -1,18 +0,0 @@ -package handler - -import _ "embed" - -//go:embed alert.tmpl -var AlertTemplate string - -//go:embed files.tmpl -var FilesTemplate string - -//go:embed task.tmpl -var TaskTemplate string - -//go:embed header.tmpl -var HeaderTemplate string - -//go:embed about.tmpl -var AboutTemplate string diff --git a/apps/flowlord/handler/static/style.css b/apps/flowlord/handler/static/style.css new file mode 100644 index 0000000..60cc21e --- /dev/null +++ b/apps/flowlord/handler/static/style.css @@ -0,0 +1,867 @@ +/* Flowlord Dashboard Styles */ + +/* Base Styles */ +body { + font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; + margin: 0; + padding: 20px; + background-color: #f5f5f5; +} + +.container { + max-width: 1200px; + margin: 0 auto; + background: white; + border-radius: 8px; + box-shadow: 0 2px 10px rgba(0,0,0,0.1); + overflow: hidden; +} + +/* Header Styles */ +.header { + background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); + color: white; + padding: 20px; + text-align: center; +} + +.header.simple { + background: #2c3e50; + color: white; + padding: 20px; + text-align: center; +} + +.header h1 { + margin: 0 0 15px 0; + font-size: 24px; + font-weight: 600; +} + +.header .date { + margin-top: 5px; + opacity: 0.8; + font-size: 14px; +} + +/* Navigation Header Styles */ +.nav-header { + background: linear-gradient(135deg, #2c3e50 0%, #34495e 100%); + color: white; + padding: 0; + box-shadow: 0 2px 4px rgba(0,0,0,0.1); +} + +.nav-container { + max-width: 1200px; + margin: 0 auto; + display: flex; + justify-content: space-between; + align-items: center; + padding: 0 20px; +} + +.nav-brand h1 { + margin: 0; + font-size: 20px; + font-weight: 600; + padding: 15px 0; +} + +.nav-menu { + display: flex; + gap: 0; +} + +.nav-link { + display: flex; + align-items: center; + gap: 8px; + padding: 15px 20px; + color: rgba(255, 255, 255, 0.8); + text-decoration: none; + font-weight: 500; + font-size: 14px; + transition: all 0.2s ease; + border-bottom: 3px solid transparent; +} + +.nav-link:hover { + color: white; + background: rgba(255, 255, 255, 0.1); +} + +.nav-link.active { + color: white; + background: rgba(255, 255, 255, 0.15); + border-bottom-color: #3498db; +} + +.nav-icon { + font-size: 16px; +} + +/* Content Styles */ +.content { + padding: 30px; +} + +/* Info Grid Styles */ +.info-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); + gap: 20px; + margin-bottom: 30px; +} + +.info-card { + background: #f8f9fa; + border: 1px solid #e1e5e9; + border-radius: 8px; + padding: 20px; + box-shadow: 0 1px 3px rgba(0,0,0,0.1); +} + +.info-card h3 { + margin: 0 0 15px 0; + color: #2c3e50; + font-size: 18px; + font-weight: 600; + border-bottom: 2px solid #3498db; + padding-bottom: 8px; +} + +.info-item { + display: flex; + justify-content: space-between; + align-items: center; + padding: 8px 0; + border-bottom: 1px solid #e9ecef; +} + +.info-item:last-child { + border-bottom: none; +} + +.info-label { + font-weight: 500; + color: #495057; +} + +.info-value { + color: #6c757d; + font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; + font-size: 14px; +} + +/* Cache Section Styles */ +.cache-section { + background: #f8f9fa; + border: 1px solid #e1e5e9; + border-radius: 8px; + padding: 20px; + margin-top: 20px; +} + +.cache-section h3 { + margin: 0 0 15px 0; + color: #2c3e50; + font-size: 18px; + font-weight: 600; + border-bottom: 2px solid #e74c3c; + padding-bottom: 8px; +} + +/* Table Styles */ +.table-container { + overflow-x: auto; +} + +table { + border-collapse: collapse; + width: 100%; + margin: 0; +} + +th, td { + border: 1px solid #e1e5e9; + padding: 12px 16px; + text-align: left; + vertical-align: top; +} + +th { + background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%); + font-weight: 600; + color: #495057; + cursor: pointer; + user-select: none; + position: relative; +} + +th:hover { + background: linear-gradient(135deg, #e9ecef 0%, #dee2e6 100%); +} + +th.sortable::after { + content: ' ↕'; + opacity: 0.5; + font-size: 12px; +} + +th.sort-asc::after { + content: ' ↑'; + opacity: 1; + color: #007bff; +} + +th.sort-desc::after { + content: ' ↓'; + opacity: 1; + color: #007bff; +} + +tr:nth-child(even) { + background-color: #f8f9fa; +} + +tr:hover { + background-color: #e3f2fd; +} + +/* Cell Styles */ +.id-cell { + font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; + font-size: 13px; + color: #6c757d; + cursor: pointer; + position: relative; + user-select: text; +} + +.id-cell:hover { + background-color: #f8f9fa; +} + +.id-cell.truncated { + max-width: 100px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.id-cell.expanded { + max-width: none; + white-space: normal; + word-break: break-all; + background-color: #f8f9fa; + border: 1px solid #dee2e6; + border-radius: 4px; + padding: 8px; + margin: 2px; +} + +.id-cell.copyable { + position: relative; +} + +.type-cell { + font-weight: 500; + color: #495057; +} + +.job-cell { + color: #6c757d; + font-style: italic; +} + +.result-cell { + font-weight: 500; +} + +.result-complete { + color: #28a745; +} + +.result-error { + color: #dc3545; +} + +.result-alert { + color: #fd7e14; +} + +.result-warn { + color: #ffc107; +} + +.result-running { + color: #6c757d; +} + +.time-cell { + font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; + font-size: 13px; + color: #6c757d; + white-space: nowrap; +} + +.info-cell { + font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; + font-size: 13px; + color: #6c757d; + cursor: pointer; + line-height: 1.4; + user-select: text; +} + +.info-cell:hover { + background-color: #f8f9fa; +} + +.info-cell.truncated { + max-width: 200px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.info-cell.expanded { + max-width: none; + white-space: normal; + word-wrap: break-word; + background-color: #f8f9fa; + border: 1px solid #dee2e6; + border-radius: 4px; + padding: 8px; + margin: 2px; +} + +.info-cell.copyable { + position: relative; +} + +.meta-cell { + font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; + font-size: 12px; + color: #6c757d; + cursor: pointer; + line-height: 1.4; + user-select: text; +} + +.meta-cell:hover { + background-color: #f8f9fa; +} + +.meta-cell.truncated { + max-width: 200px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.meta-cell.expanded { + max-width: none; + white-space: normal; + word-wrap: break-word; + background-color: #f8f9fa; + border: 1px solid #dee2e6; + border-radius: 4px; + padding: 8px; + margin: 2px; +} + +.meta-cell.copyable { + position: relative; +} + +.message-cell { + max-width: 300px; + word-wrap: break-word; + line-height: 1.4; + cursor: pointer; + user-select: text; +} + +.message-cell:hover { + background-color: #f8f9fa; +} + +.message-cell.copyable { + position: relative; +} + +.size-cell { + font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; + font-size: 13px; + color: #6c757d; +} + +.percentage-cell { + font-weight: 500; + color: #495057; +} + +/* Button Styles */ +.refresh-btn { + background: #3498db; + color: white; + border: none; + padding: 10px 20px; + border-radius: 4px; + cursor: pointer; + font-size: 14px; + margin-bottom: 20px; +} + +.refresh-btn:hover { + background: #2980b9; +} + +.btn { + padding: 8px 16px; + border: none; + border-radius: 4px; + cursor: pointer; + font-size: 14px; + transition: all 0.2s ease; +} + +.btn-primary { + background: #007bff; + color: white; +} + +.btn-primary:hover { + background: #0056b3; +} + +.btn-secondary { + background: #6c757d; + color: white; +} + +.btn-secondary:hover { + background: #545b62; +} + +/* Date Navigation Styles */ +.date-nav { + display: flex; + align-items: center; + gap: 12px; + flex-wrap: wrap; + justify-content: center; +} + +.date-nav a { + color: rgba(255, 255, 255, 0.9); + text-decoration: none; + padding: 8px 16px; + border: 1px solid rgba(255, 255, 255, 0.3); + border-radius: 6px; + transition: all 0.2s ease; +} + +.date-nav a:hover { + background: rgba(255, 255, 255, 0.2); + border-color: rgba(255, 255, 255, 0.5); +} + +.current-date { + font-weight: 600; + font-size: 16px; +} + +/* Date Selector Styles */ +.date-selector { + display: flex; + align-items: center; + gap: 12px; + flex-wrap: wrap; + justify-content: center; +} + +.date-selector label { + font-size: 14px; + font-weight: 500; + color: rgba(255, 255, 255, 0.9); +} + +.date-selector input[type="date"] { + padding: 8px 12px; + border: 1px solid rgba(255, 255, 255, 0.3); + border-radius: 6px; + background: rgba(255, 255, 255, 0.1); + color: white; + font-size: 14px; + outline: none; + transition: all 0.2s ease; +} + +.date-selector input[type="date"]:focus { + border-color: rgba(255, 255, 255, 0.6); + background: rgba(255, 255, 255, 0.2); +} + +.date-selector input[type="date"]::-webkit-calendar-picker-indicator { + filter: invert(1); + cursor: pointer; +} + +.date-selector button { + padding: 8px 16px; + background: rgba(255, 255, 255, 0.2); + border: 1px solid rgba(255, 255, 255, 0.3); + border-radius: 6px; + color: white; + font-size: 14px; + cursor: pointer; + transition: all 0.2s ease; +} + +.date-selector button:hover { + background: rgba(255, 255, 255, 0.3); + border-color: rgba(255, 255, 255, 0.5); +} + +/* Summary Section Styles */ +.summary-section { + padding: 20px; + background: #f8f9fa; + border-bottom: 1px solid #e1e5e9; +} + +.summary { + background: #ecf0f1; + padding: 15px 20px; + border-bottom: 1px solid #bdc3c7; +} + +.summary-section h3 { + margin: 0 0 16px 0; + color: #495057; + font-size: 18px; + font-weight: 600; +} + +.summary-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); + gap: 16px; +} + +.summary-card { + background: white; + border: 1px solid #e1e5e9; + border-radius: 8px; + padding: 16px; + box-shadow: 0 1px 3px rgba(0,0,0,0.1); + transition: box-shadow 0.2s ease; +} + +.summary-card:hover { + box-shadow: 0 2px 8px rgba(0,0,0,0.15); +} + +.summary-header { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 8px; +} + +.summary-key { + font-weight: 600; + color: #495057; + font-size: 16px; +} + +.summary-count { + background: #007bff; + color: white; + padding: 4px 8px; + border-radius: 12px; + font-size: 12px; + font-weight: 500; +} + +.summary-time { + color: #6c757d; + font-size: 14px; + font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; +} + +.summary-details { + color: #6c757d; + font-size: 14px; + font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; +} + +/* Summary Stats Styles */ +.summary-stats { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); + gap: 16px; + margin-bottom: 20px; +} + +.stat-card { + background: white; + border: 1px solid #e1e5e9; + border-radius: 8px; + padding: 16px; + text-align: center; + box-shadow: 0 1px 3px rgba(0,0,0,0.1); +} + +.stat-number { + font-size: 24px; + font-weight: bold; + color: #2c3e50; + margin-bottom: 4px; +} + +.stat-label { + font-size: 12px; + color: #6c757d; + text-transform: uppercase; +} + +.stat { + text-align: center; +} + +/* Files List Styles */ +.files-list { + padding: 0; +} + +.file-item { + border-bottom: 1px solid #ecf0f1; + padding: 15px 20px; + transition: background-color 0.2s; +} + +.file-item:hover { + background-color: #f8f9fa; +} + +.file-item:last-child { + border-bottom: none; +} + +.file-path { + font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; + font-size: 14px; + color: #2c3e50; + margin-bottom: 5px; + word-break: break-all; +} + +.file-meta { + display: flex; + gap: 20px; + font-size: 12px; + color: #7f8c8d; + margin-bottom: 8px; +} + +.file-meta span { + display: flex; + align-items: center; + gap: 4px; +} + +.tasks-section { + margin-top: 10px; +} + +.tasks-label { + font-size: 12px; + color: #7f8c8d; + margin-bottom: 5px; + line-height: 1.4; +} + +.task-id { + font-family: monospace; + background: #ecf0f1; + padding: 2px 6px; + border-radius: 3px; + font-size: 11px; + color: #2c3e50; +} + +.task-tags { + display: flex; + gap: 8px; + flex-wrap: wrap; +} + +.task-tag { + background: #3498db; + color: white; + padding: 2px 8px; + border-radius: 12px; + font-size: 11px; + font-weight: 500; +} + +.task-tag.error { + background: #e74c3c; +} + +.task-tag.success { + background: #27ae60; +} + +.task-name { + color: #bdc3c7; + font-weight: normal; + margin-left: 4px; +} + +/* Filter Styles */ +.filters { + background: #f8f9fa; + padding: 20px; + border-bottom: 1px solid #e1e5e9; +} + +.filters h3 { + margin: 0 0 16px 0; + color: #495057; + font-size: 18px; +} + +.filter-row { + display: flex; + gap: 16px; + flex-wrap: wrap; + align-items: center; +} + +.filter-group { + display: flex; + flex-direction: column; + gap: 4px; +} + +.filter-group label { + font-size: 12px; + font-weight: 500; + color: #6c757d; + text-transform: uppercase; +} + +.filter-group input, .filter-group select { + padding: 8px 12px; + border: 1px solid #ced4da; + border-radius: 4px; + font-size: 14px; + min-width: 120px; +} + +.filter-group input:focus, .filter-group select:focus { + outline: none; + border-color: #007bff; + box-shadow: 0 0 0 2px rgba(0, 123, 255, 0.25); +} + +.filter-buttons { + display: flex; + gap: 8px; + margin-left: auto; +} + +/* Stats Styles */ +.stats { + padding: 16px 20px; + background-color: #f8f9fa; + border-top: 1px solid #e1e5e9; + font-size: 14px; + color: #6c757d; +} + +/* No Data Styles */ +.no-tasks, .no-files { + text-align: center; + padding: 40px 20px; + color: #7f8c8d; +} + +.no-tasks h3, .no-files h3 { + margin: 0 0 10px 0; + color: #95a5a6; +} + +.no-tasks { + color: #95a5a6; + font-style: italic; + font-size: 12px; +} + +/* Context Menu Styles */ +.context-menu { + position: absolute; + background: white; + border: 1px solid #dee2e6; + border-radius: 4px; + box-shadow: 0 4px 12px rgba(0,0,0,0.15); + z-index: 1000; + min-width: 120px; + padding: 4px 0; +} + +.context-menu-item { + padding: 8px 16px; + cursor: pointer; + font-size: 14px; + color: #495057; + display: flex; + align-items: center; + gap: 8px; +} + +.context-menu-item:hover { + background-color: #f8f9fa; +} + +.context-menu-item:active { + background-color: #e9ecef; +} + +/* Copy Feedback Styles */ +.copy-feedback { + position: absolute; + top: -25px; + left: 50%; + transform: translateX(-50%); + background: #28a745; + color: white; + padding: 4px 8px; + border-radius: 4px; + font-size: 12px; + font-weight: 500; + z-index: 1001; + pointer-events: none; + animation: copyFeedback 2s ease-out forwards; +} + +@keyframes copyFeedback { + 0% { opacity: 0; transform: translateX(-50%) translateY(0); } + 20% { opacity: 1; transform: translateX(-50%) translateY(-5px); } + 80% { opacity: 1; transform: translateX(-50%) translateY(-5px); } + 100% { opacity: 0; transform: translateX(-50%) translateY(-10px); } +} + +/* Error and Success States */ +.error { + color: #dc3545; +} + +.success { + color: #28a745; +} diff --git a/apps/flowlord/handler/task.tmpl b/apps/flowlord/handler/task.tmpl index 69c5252..86d2482 100644 --- a/apps/flowlord/handler/task.tmpl +++ b/apps/flowlord/handler/task.tmpl @@ -4,490 +4,7 @@ Flowlord Task Dashboard - {{.Date}} - + {{template "header" .}} From ef4f2f559cc5eb7580e34acce867117890678d3f Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Thu, 2 Oct 2025 14:57:52 -0600 Subject: [PATCH 13/40] widen header and task view --- apps/flowlord/handler.go | 70 ++-- apps/flowlord/handler/about.tmpl | 5 - apps/flowlord/handler/alert.tmpl | 8 - apps/flowlord/handler/files.tmpl | 11 - apps/flowlord/handler/header.tmpl | 59 ++- apps/flowlord/handler/static/style.css | 488 +++++++++++++++++-------- apps/flowlord/handler/task.tmpl | 27 +- apps/flowlord/handler_test.go | 12 +- apps/flowlord/sqlite_progress.md | 99 ----- 9 files changed, 453 insertions(+), 326 deletions(-) delete mode 100644 apps/flowlord/sqlite_progress.md diff --git a/apps/flowlord/handler.go b/apps/flowlord/handler.go index c3e532b..5cd4c16 100644 --- a/apps/flowlord/handler.go +++ b/apps/flowlord/handler.go @@ -2,6 +2,7 @@ package main import ( "bytes" + _ "embed" "encoding/json" "errors" "fmt" @@ -15,25 +16,41 @@ import ( "strings" "time" - "github.com/jbsmith7741/uri" - "github.com/go-chi/chi/v5" gtools "github.com/jbsmith7741/go-tools" "github.com/jbsmith7741/go-tools/appenderr" + "github.com/jbsmith7741/uri" "github.com/pcelvng/task" + tools "github.com/pcelvng/task-tools" "github.com/pcelvng/task-tools/apps/flowlord/cache" - "github.com/pcelvng/task-tools/apps/flowlord/handler" + "github.com/pcelvng/task-tools/file" "github.com/pcelvng/task-tools/slack" "github.com/pcelvng/task-tools/tmpl" - - tools "github.com/pcelvng/task-tools" - "github.com/pcelvng/task-tools/file" "github.com/pcelvng/task-tools/workflow" ) +//go:embed handler/alert.tmpl +var AlertTemplate string + +//go:embed handler/files.tmpl +var FilesTemplate string + +//go:embed handler/task.tmpl +var TaskTemplate string + +//go:embed handler/header.tmpl +var HeaderTemplate string + +//go:embed handler/about.tmpl +var AboutTemplate string + func (tm *taskMaster) StartHandler() { router := chi.NewRouter() + + // Static file serving + router.Handle("/static/*", http.StripPrefix("/static/", http.FileServer(http.Dir("handler/static")))) + router.Get("/", tm.Info) router.Get("/info", tm.Info) router.Get("/refresh", tm.refreshHandler) @@ -308,7 +325,7 @@ func (tm *taskMaster) htmlAlert(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) w.Header().Set("Content-Type", "text/html") - w.Write(alertHTML(alerts)) + w.Write(alertHTML(alerts, dt)) } // htmlFiles handles GET /web/files - displays file messages for a specific date @@ -368,14 +385,14 @@ func filesHTML(files []cache.FileMessage, date time.Time) []byte { totalFiles := len(files) matchedFiles := 0 totalTasks := 0 - + for _, file := range files { if len(file.TaskNames) > 0 { matchedFiles++ totalTasks += len(file.TaskNames) } } - + unmatchedFiles := totalFiles - matchedFiles // Calculate navigation dates @@ -393,6 +410,7 @@ func filesHTML(files []cache.FileMessage, date time.Time) []byte { "UnmatchedFiles": unmatchedFiles, "TotalTasks": totalTasks, "CurrentPage": "files", + "PageTitle": "File Messages", } // Template functions @@ -418,7 +436,7 @@ func filesHTML(files []cache.FileMessage, date time.Time) []byte { } // Parse and execute template using the same pattern as alertHTML - tmpl, err := template.New("files").Funcs(funcMap).Parse(handler.HeaderTemplate + handler.FilesTemplate) + tmpl, err := template.New("files").Funcs(funcMap).Parse(HeaderTemplate + FilesTemplate) if err != nil { return []byte(err.Error()) } @@ -434,7 +452,7 @@ func filesHTML(files []cache.FileMessage, date time.Time) []byte { // generateSummaryFromTasks creates a summary of tasks grouped by type:job func generateSummaryFromTasks(tasks []cache.TaskView) map[string]*cache.Stats { summary := make(map[string]*cache.Stats) - + for _, t := range tasks { // Get job from TaskView.Job or extract from Meta job := t.Job @@ -443,10 +461,10 @@ func generateSummaryFromTasks(tasks []cache.TaskView) map[string]*cache.Stats { job = meta.Get("job") } } - + // Create key in format "type:job" key := strings.TrimRight(t.Type+":"+job, ":") - + // Get or create stats for this type:job combination stat, found := summary[key] if !found { @@ -457,7 +475,7 @@ func generateSummaryFromTasks(tasks []cache.TaskView) map[string]*cache.Stats { } summary[key] = stat } - + // Convert TaskView to task.Task for processing taskTime := tmpl.TaskTime(task.Task{ ID: t.ID, @@ -471,7 +489,7 @@ func generateSummaryFromTasks(tasks []cache.TaskView) map[string]*cache.Stats { Started: t.Started, Ended: t.Ended, }) - + // Process based on result type if t.Result == "error" { stat.ErrorCount++ @@ -479,7 +497,7 @@ func generateSummaryFromTasks(tasks []cache.TaskView) map[string]*cache.Stats { } else if t.Result == "complete" { stat.CompletedCount++ stat.CompletedTimes = append(stat.CompletedTimes, taskTime) - + // Add execution time for completed tasks if t.Started != "" && t.Ended != "" { startTime, err1 := time.Parse(time.RFC3339, t.Started) @@ -491,7 +509,7 @@ func generateSummaryFromTasks(tasks []cache.TaskView) map[string]*cache.Stats { } // Note: warn and alert results don't contribute to execution time stats } - + return summary } @@ -544,6 +562,7 @@ func taskHTML(tasks []cache.TaskView, date time.Time, taskType, job, result stri "CurrentJob": job, "CurrentResult": result, "CurrentPage": "task", + "PageTitle": "Task Dashboard", } // Template functions @@ -587,7 +606,7 @@ func taskHTML(tasks []cache.TaskView, date time.Time, taskType, job, result stri } // Parse and execute template - tmpl, err := template.New("task").Funcs(funcMap).Parse(handler.HeaderTemplate + handler.TaskTemplate) + tmpl, err := template.New("task").Funcs(funcMap).Parse(HeaderTemplate + TaskTemplate) if err != nil { return []byte(err.Error()) } @@ -637,10 +656,11 @@ func (tm *taskMaster) aboutHTML() []byte { "TableStats": tableStats, "CurrentPage": "about", "DateValue": "", // About page doesn't need date + "PageTitle": "System Information", } // Parse and execute template - tmpl, err := template.New("about").Parse(handler.HeaderTemplate + handler.AboutTemplate) + tmpl, err := template.New("about").Parse(HeaderTemplate + AboutTemplate) if err != nil { return []byte("Error parsing template: " + err.Error()) } @@ -660,24 +680,25 @@ type AlertData struct { } // alertHTML will take a list of task and display a html webpage that is easily to digest what is going on. -func alertHTML(tasks []cache.AlertRecord) []byte { +func alertHTML(tasks []cache.AlertRecord, date time.Time) []byte { // Generate summary data using BuildCompactSummary summary := cache.BuildCompactSummary(tasks) - + // Create data structure for template data := map[string]interface{}{ "Alerts": tasks, "Summary": summary, "CurrentPage": "alert", - "DateValue": "", // Will be set by the template if needed + "DateValue": date.Format("2006-01-02"), + "Date": date.Format("Monday, January 2, 2006"), + "PageTitle": "Task Alerts", } - tmpl, err := template.New("alert").Parse(handler.HeaderTemplate + handler.AlertTemplate) + tmpl, err := template.New("alert").Parse(HeaderTemplate + AlertTemplate) if err != nil { return []byte(err.Error()) } - var buf bytes.Buffer if err := tmpl.Execute(&buf, data); err != nil { return []byte(err.Error()) @@ -850,4 +871,3 @@ func (m Meta) UnmarshalJSON(d []byte) error { m2 := (map[string][]string)(m) return json.Unmarshal(d, &m2) } - diff --git a/apps/flowlord/handler/about.tmpl b/apps/flowlord/handler/about.tmpl index e6351ae..4891e88 100644 --- a/apps/flowlord/handler/about.tmpl +++ b/apps/flowlord/handler/about.tmpl @@ -9,13 +9,8 @@ {{template "header" .}}
-
-

System Information

-
- -

Application

diff --git a/apps/flowlord/handler/alert.tmpl b/apps/flowlord/handler/alert.tmpl index 4823a92..dc1d0e7 100644 --- a/apps/flowlord/handler/alert.tmpl +++ b/apps/flowlord/handler/alert.tmpl @@ -8,14 +8,6 @@ {{template "header" .}}
-
-

Task Status Report

-
- - - -
-

Alert Summary

diff --git a/apps/flowlord/handler/files.tmpl b/apps/flowlord/handler/files.tmpl index 9c5f314..3279de9 100644 --- a/apps/flowlord/handler/files.tmpl +++ b/apps/flowlord/handler/files.tmpl @@ -9,17 +9,6 @@ {{template "header" .}}
-
-

Flowlord File Messages

-
{{.Date}}
-
- -
- ← Previous Day - {{.Date}} - Next Day → - -
{{if .Files}}
diff --git a/apps/flowlord/handler/header.tmpl b/apps/flowlord/handler/header.tmpl index fa086c1..38a377b 100644 --- a/apps/flowlord/handler/header.tmpl +++ b/apps/flowlord/handler/header.tmpl @@ -1,8 +1,18 @@ {{define "header"}} + + {{end}} diff --git a/apps/flowlord/handler/static/style.css b/apps/flowlord/handler/static/style.css index 60cc21e..c740d28 100644 --- a/apps/flowlord/handler/static/style.css +++ b/apps/flowlord/handler/static/style.css @@ -1,6 +1,137 @@ /* Flowlord Dashboard Styles */ -/* Base Styles */ +/* ===== UTILITY CLASSES ===== */ +/* Common font families */ +.font-mono { + font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; +} + +/* Common colors */ +.text-muted { + color: #6c757d; +} + +.text-primary { + color: #495057; +} + +.text-secondary { + color: #2c3e50; +} + +.bg-light { + background-color: #f8f9fa; +} + +.bg-white { + background: white; +} + +/* Common spacing */ +.p-0 { padding: 0; } +.p-1 { padding: 4px; } +.p-2 { padding: 8px; } +.p-3 { padding: 12px; } +.p-4 { padding: 16px; } +.p-5 { padding: 20px; } + +.m-0 { margin: 0; } +.mb-1 { margin-bottom: 4px; } +.mb-2 { margin-bottom: 8px; } +.mb-3 { margin-bottom: 12px; } +.mb-4 { margin-bottom: 16px; } +.mb-5 { margin-bottom: 20px; } + +/* Common border styles */ +.border { + border: 1px solid #e1e5e9; +} + +.border-light { + border: 1px solid #e9ecef; +} + +.rounded { + border-radius: 4px; +} + +.rounded-lg { + border-radius: 8px; +} + +/* Common shadow styles */ +.shadow-sm { + box-shadow: 0 1px 3px rgba(0,0,0,0.1); +} + +.shadow { + box-shadow: 0 2px 10px rgba(0,0,0,0.1); +} + +.shadow-lg { + box-shadow: 0 4px 12px rgba(0,0,0,0.15); +} + +/* Common display styles */ +.flex { + display: flex; +} + +.flex-center { + display: flex; + align-items: center; + justify-content: center; +} + +.flex-between { + display: flex; + justify-content: space-between; + align-items: center; +} + +.grid { + display: grid; +} + +.grid-auto { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); + gap: 16px; +} + +.grid-stats { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); + gap: 16px; +} + +/* Common text styles */ +.text-center { + text-align: center; +} + +.text-uppercase { + text-transform: uppercase; +} + +.font-weight-500 { + font-weight: 500; +} + +.font-weight-600 { + font-weight: 600; +} + +.font-weight-bold { + font-weight: bold; +} + +/* Common transition */ +.transition { + transition: all 0.2s ease; +} + +/* ===== BASE STYLES ===== */ body { font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; margin: 0; @@ -17,19 +148,19 @@ body { overflow: hidden; } -/* Header Styles */ +/* ===== HEADER STYLES ===== */ .header { - background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 20px; text-align: center; } +.header { + background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); +} + .header.simple { background: #2c3e50; - color: white; - padding: 20px; - text-align: center; } .header h1 { @@ -44,21 +175,30 @@ body { font-size: 14px; } -/* Navigation Header Styles */ +/* ===== NAVIGATION STYLES ===== */ .nav-header { background: linear-gradient(135deg, #2c3e50 0%, #34495e 100%); color: white; padding: 0; box-shadow: 0 2px 4px rgba(0,0,0,0.1); + border-radius: 12px 12px 0 0; + margin: 0; } .nav-container { - max-width: 1200px; - margin: 0 auto; + width: 100%; display: flex; - justify-content: space-between; align-items: center; - padding: 0 20px; + padding: 0 15px; + flex-wrap: nowrap; + gap: 20px; + box-sizing: border-box; +} + +.nav-brand { + flex: 1; + text-align: center; + order: 2; } .nav-brand h1 { @@ -71,6 +211,9 @@ body { .nav-menu { display: flex; gap: 0; + order: 3; + flex-shrink: 0; + white-space: nowrap; } .nav-link { @@ -101,12 +244,137 @@ body { font-size: 16px; } -/* Content Styles */ +/* ===== DATE PICKER STYLES ===== */ +.nav-controls { + display: flex; + align-items: center; + gap: 15px; + order: 1; + margin: 0; + padding: 0; + flex-shrink: 0; +} + +.date-picker-container { + display: flex; + align-items: center; + gap: 8px; + background: rgba(255, 255, 255, 0.1); + padding: 8px 12px; + border-radius: 6px; + border: 1px solid rgba(255, 255, 255, 0.2); +} + +.date-picker-label { + color: rgba(255, 255, 255, 0.9); + font-size: 14px; + font-weight: 500; + margin: 0; + white-space: nowrap; +} + +.date-picker { + background: white; + border: 1px solid #ddd; + border-radius: 4px; + padding: 6px 8px; + font-size: 14px; + color: #333; + min-width: 140px; +} + +.date-picker:focus { + outline: none; + border-color: #3498db; + box-shadow: 0 0 0 2px rgba(52, 152, 219, 0.2); +} + +.btn { + display: inline-block; + padding: 6px 12px; + font-size: 14px; + font-weight: 500; + text-align: center; + text-decoration: none; + border: 1px solid transparent; + border-radius: 4px; + cursor: pointer; + transition: all 0.2s ease; + user-select: none; +} + +.btn-sm { + padding: 4px 8px; + font-size: 12px; +} + +.btn-outline { + color: rgba(255, 255, 255, 0.9); + background: transparent; + border-color: rgba(255, 255, 255, 0.3); +} + +.btn-outline:hover { + color: white; + background: rgba(255, 255, 255, 0.1); + border-color: rgba(255, 255, 255, 0.5); +} + +.btn-outline:active { + background: rgba(255, 255, 255, 0.2); +} + +/* ===== DATE DISPLAY STYLES ===== */ +.date-display { + display: flex; + align-items: center; + gap: 10px; +} + + +.current-date { + font-size: 16px; + font-weight: 500; + color: #2c3e50; + background: #f8f9fa; + padding: 8px 16px; + border-radius: 6px; + border: 1px solid #e1e5e9; +} + +.refresh-btn { + background: #3498db; + color: white; + border: none; + padding: 8px 16px; + border-radius: 6px; + font-size: 14px; + font-weight: 500; + cursor: pointer; + transition: background-color 0.2s ease; +} + +.refresh-btn:hover { + background: #2980b9; +} + +/* ===== CONTENT STYLES ===== */ .content { padding: 30px; } -/* Info Grid Styles */ +.container { + padding-top: 20px; +} + +.full-width { + max-width: 100%; + margin: 0; + padding-left: 10px; + padding-right: 10px; +} + +/* ===== INFO GRID STYLES ===== */ .info-grid { display: grid; grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); @@ -154,7 +422,7 @@ body { font-size: 14px; } -/* Cache Section Styles */ +/* ===== CACHE SECTION STYLES ===== */ .cache-section { background: #f8f9fa; border: 1px solid #e1e5e9; @@ -172,7 +440,7 @@ body { padding-bottom: 8px; } -/* Table Styles */ +/* ===== TABLE STYLES ===== */ .table-container { overflow-x: auto; } @@ -229,28 +497,35 @@ tr:hover { background-color: #e3f2fd; } -/* Cell Styles */ -.id-cell { +/* ===== CELL STYLES ===== */ +.id-cell, .time-cell, .info-cell, .meta-cell, .size-cell { font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; font-size: 13px; color: #6c757d; +} + +.id-cell, .info-cell, .meta-cell { cursor: pointer; position: relative; user-select: text; } -.id-cell:hover { +.id-cell:hover, .info-cell:hover, .meta-cell:hover { background-color: #f8f9fa; } -.id-cell.truncated { - max-width: 100px; +.id-cell.truncated, .info-cell.truncated, .meta-cell.truncated { + max-width: 200px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; } -.id-cell.expanded { +.id-cell.truncated { + max-width: 100px; +} + +.id-cell.expanded, .info-cell.expanded, .meta-cell.expanded { max-width: none; white-space: normal; word-break: break-all; @@ -261,7 +536,11 @@ tr:hover { margin: 2px; } -.id-cell.copyable { +.info-cell.expanded, .meta-cell.expanded { + word-wrap: break-word; +} + +.id-cell.copyable, .info-cell.copyable, .meta-cell.copyable { position: relative; } @@ -279,103 +558,16 @@ tr:hover { font-weight: 500; } -.result-complete { - color: #28a745; -} - -.result-error { - color: #dc3545; -} - -.result-alert { - color: #fd7e14; -} - -.result-warn { - color: #ffc107; -} - -.result-running { - color: #6c757d; -} +.result-complete { color: #28a745; } +.result-error { color: #dc3545; } +.result-alert { color: #fd7e14; } +.result-warn { color: #ffc107; } +.result-running { color: #6c757d; } .time-cell { - font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; - font-size: 13px; - color: #6c757d; white-space: nowrap; } -.info-cell { - font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; - font-size: 13px; - color: #6c757d; - cursor: pointer; - line-height: 1.4; - user-select: text; -} - -.info-cell:hover { - background-color: #f8f9fa; -} - -.info-cell.truncated { - max-width: 200px; - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; -} - -.info-cell.expanded { - max-width: none; - white-space: normal; - word-wrap: break-word; - background-color: #f8f9fa; - border: 1px solid #dee2e6; - border-radius: 4px; - padding: 8px; - margin: 2px; -} - -.info-cell.copyable { - position: relative; -} - -.meta-cell { - font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; - font-size: 12px; - color: #6c757d; - cursor: pointer; - line-height: 1.4; - user-select: text; -} - -.meta-cell:hover { - background-color: #f8f9fa; -} - -.meta-cell.truncated { - max-width: 200px; - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; -} - -.meta-cell.expanded { - max-width: none; - white-space: normal; - word-wrap: break-word; - background-color: #f8f9fa; - border: 1px solid #dee2e6; - border-radius: 4px; - padding: 8px; - margin: 2px; -} - -.meta-cell.copyable { - position: relative; -} - .message-cell { max-width: 300px; word-wrap: break-word; @@ -392,26 +584,24 @@ tr:hover { position: relative; } -.size-cell { - font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; - font-size: 13px; - color: #6c757d; -} - .percentage-cell { font-weight: 500; color: #495057; } -/* Button Styles */ -.refresh-btn { - background: #3498db; - color: white; +/* ===== BUTTON STYLES ===== */ +.refresh-btn, .btn { border: none; - padding: 10px 20px; border-radius: 4px; cursor: pointer; font-size: 14px; + transition: all 0.2s ease; +} + +.refresh-btn { + background: #3498db; + color: white; + padding: 10px 20px; margin-bottom: 20px; } @@ -421,11 +611,6 @@ tr:hover { .btn { padding: 8px 16px; - border: none; - border-radius: 4px; - cursor: pointer; - font-size: 14px; - transition: all 0.2s ease; } .btn-primary { @@ -446,7 +631,7 @@ tr:hover { background: #545b62; } -/* Date Navigation Styles */ +/* ===== DATE NAVIGATION STYLES ===== */ .date-nav { display: flex; align-items: center; @@ -474,7 +659,7 @@ tr:hover { font-size: 16px; } -/* Date Selector Styles */ +/* ===== DATE SELECTOR STYLES ===== */ .date-selector { display: flex; align-items: center; @@ -526,19 +711,13 @@ tr:hover { border-color: rgba(255, 255, 255, 0.5); } -/* Summary Section Styles */ +/* ===== SUMMARY STYLES ===== */ .summary-section { padding: 20px; background: #f8f9fa; border-bottom: 1px solid #e1e5e9; } -.summary { - background: #ecf0f1; - padding: 15px 20px; - border-bottom: 1px solid #bdc3c7; -} - .summary-section h3 { margin: 0 0 16px 0; color: #495057; @@ -546,6 +725,12 @@ tr:hover { font-weight: 600; } +.summary { + background: #ecf0f1; + padding: 15px 20px; + border-bottom: 1px solid #bdc3c7; +} + .summary-grid { display: grid; grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); @@ -587,19 +772,12 @@ tr:hover { font-weight: 500; } -.summary-time { +.summary-time, .summary-details { color: #6c757d; font-size: 14px; font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; } -.summary-details { - color: #6c757d; - font-size: 14px; - font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; -} - -/* Summary Stats Styles */ .summary-stats { display: grid; grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); @@ -633,7 +811,7 @@ tr:hover { text-align: center; } -/* Files List Styles */ +/* ===== FILES LIST STYLES ===== */ .files-list { padding: 0; } @@ -723,7 +901,7 @@ tr:hover { margin-left: 4px; } -/* Filter Styles */ +/* ===== FILTER STYLES ===== */ .filters { background: #f8f9fa; padding: 20px; @@ -776,7 +954,7 @@ tr:hover { margin-left: auto; } -/* Stats Styles */ +/* ===== STATS STYLES ===== */ .stats { padding: 16px 20px; background-color: #f8f9fa; @@ -785,7 +963,7 @@ tr:hover { color: #6c757d; } -/* No Data Styles */ +/* ===== NO DATA STYLES ===== */ .no-tasks, .no-files { text-align: center; padding: 40px 20px; @@ -803,7 +981,7 @@ tr:hover { font-size: 12px; } -/* Context Menu Styles */ +/* ===== CONTEXT MENU STYLES ===== */ .context-menu { position: absolute; background: white; @@ -833,7 +1011,7 @@ tr:hover { background-color: #e9ecef; } -/* Copy Feedback Styles */ +/* ===== COPY FEEDBACK STYLES ===== */ .copy-feedback { position: absolute; top: -25px; @@ -857,11 +1035,11 @@ tr:hover { 100% { opacity: 0; transform: translateX(-50%) translateY(-10px); } } -/* Error and Success States */ +/* ===== ERROR AND SUCCESS STATES ===== */ .error { color: #dc3545; } .success { color: #28a745; -} +} \ No newline at end of file diff --git a/apps/flowlord/handler/task.tmpl b/apps/flowlord/handler/task.tmpl index 86d2482..d042bc4 100644 --- a/apps/flowlord/handler/task.tmpl +++ b/apps/flowlord/handler/task.tmpl @@ -8,15 +8,7 @@ {{template "header" .}} -
- +
@@ -91,13 +83,13 @@ ID Type Job + Message Result Info Meta Created Task Time Queue Time - Message @@ -114,6 +106,15 @@ {{.Type}} {{if .Job}}{{.Job}}{{else}}{{getJobFromMeta .Meta}}{{end}} + + {{if ge (len .Msg) 80}}{{slice .Msg 0 80}}...{{else}}{{.Msg}}{{end}} + {{if .Result}}{{.Result}}{{else}}Running{{end}} @@ -138,12 +139,6 @@ {{if .Created}}{{.Created}}{{else}}N/A{{end}} {{if .TaskTime}}{{.TaskTime}}{{else}}N/A{{end}} {{if .QueueTime}}{{.QueueTime}}{{else}}N/A{{end}} - - {{.Msg}} - {{end}} diff --git a/apps/flowlord/handler_test.go b/apps/flowlord/handler_test.go index 95dd5bc..692577d 100644 --- a/apps/flowlord/handler_test.go +++ b/apps/flowlord/handler_test.go @@ -353,7 +353,7 @@ func TestMeta_UnmarshalJSON(t *testing.T) { // TestWebAlertPreview generates an HTML preview of the alert template for visual inspection // this provides an html file -func TestWebAlertPreview(t *testing.T) { +func TestAlertHTML(t *testing.T) { // Create sample alert data to showcase the templating sampleAlerts := []cache.AlertRecord{ { @@ -399,10 +399,10 @@ func TestWebAlertPreview(t *testing.T) { } // Generate HTML using the alertHTML function - htmlContent := alertHTML(sampleAlerts) + htmlContent := alertHTML(sampleAlerts, trial.TimeDay("2024-01-15")) // Write HTML to a file for easy viewing - outputFile := "alert_preview.html" + outputFile := "handler/alert_preview.html" err := os.WriteFile(outputFile, htmlContent, 0644) if err != nil { t.Fatalf("Failed to write HTML file: %v", err) @@ -448,7 +448,7 @@ func TestFilesHTML(t *testing.T) { html := filesHTML(files, date) // Write HTML to a file for easy viewing - outputFile := "files_preview.html" + outputFile := "handler/files_preview.html" err := os.WriteFile(outputFile, html, 0644) if err != nil { t.Fatalf("Failed to write HTML file: %v", err) @@ -478,7 +478,7 @@ func TestTaskHTML(t *testing.T) { html := taskHTML(testTasks, date, "", "", "") // Write HTML to a file for easy viewing - outputFile := "task_preview.html" + outputFile := "handler/task_preview.html" err = os.WriteFile(outputFile, html, 0644) if err != nil { t.Fatalf("Failed to write HTML file: %v", err) @@ -513,7 +513,7 @@ func TestAboutHTML(t *testing.T) { html := tm.aboutHTML() // Write HTML to a file for easy viewing - outputFile := "about_preview.html" + outputFile := "handler/about_preview.html" err = os.WriteFile(outputFile, html, 0644) if err != nil { t.Fatalf("Failed to write HTML file: %v", err) diff --git a/apps/flowlord/sqlite_progress.md b/apps/flowlord/sqlite_progress.md deleted file mode 100644 index de7df4b..0000000 --- a/apps/flowlord/sqlite_progress.md +++ /dev/null @@ -1,99 +0,0 @@ -# SQLite Migration Progress Tracker - -## Current State Analysis -- ✅ Basic SQLite task caching exists (`events`, `task_log` tables) -- ✅ Memory-based workflow management via `workflow.Cache` -- ✅ File pattern matching with in-memory rules -- ✅ Alert system using channels and Slack notifications -- ❌ No persistent storage for workflows, file patterns, or alert history - -## Implementation Status - -### ✅ Completed: Enhanced Specification Document -**Goal**: Design complete SQLite schema for all data requirements -- Created comprehensive technical specification -- Defined all required tables and relationships -- Documented implementation approach - -### ⏳ Pending: Alert Records System -**Goal**: Replace current channel-based alerts with persistent storage -- Store alert events with timestamps, task references, and severity -- Link alerts to time-based dashboard views -- Track alert frequency and backoff history -- Replace current channel-based system with persistent storage - -### ⏳ Pending: Workflow Phase Storage -**Goal**: Replace in-memory workflow maps with SQLite tables -- Store workflow files, phases, and their configurations -- Enable dependency mapping and validation queries -- Support dynamic workflow updates without application restarts -- Provide configuration issue detection - -### ⏳ Pending: File Topic Message History -**Goal**: Log all file topic messages with matching results -- Log all file topic messages with metadata -- Track pattern matching results and associated phases -- Store file processing timestamps and outcomes -- Enable file processing analytics and debugging - -### ⏳ Pending: Task.Done Message Recording -**Goal**: Enhanced tracking of task.Done message processing -- Enhanced tracking of task.Done message processing -- Record phase matches and triggered child tasks -- Maintain task relationship chains for debugging -- Link to workflow execution flow - -### ⏳ Pending: Task Record Optimization -**Goal**: Redesign task storage for optimal querying and deduplication -- Redesign task storage for optimal querying -- Implement task deduplication logic -- Optimize for tracking by type, job, creation, result, uniqueID -- Consolidate to single table design with proper indexing - -### ⏳ Pending: Backup and Restoration System -**Goal**: Automated database backup and recovery -- Automated GCS backup during application shutdown -- Periodic backup scheduling (hourly/daily configurable) -- Restoration logic comparing local vs GCS timestamps -- Database schema migration support -- Backup verification and integrity checks - -### ⏳ Pending: Retention and Size Management -**Goal**: Database maintenance and optimization -- Configurable retention periods per table type -- Automated cleanup jobs for expired data -- Table size monitoring and alerting -- REST API endpoints for database metrics -- Storage optimization strategies - -### ⏳ Pending: REST API Development -**Goal**: Create API endpoints for UI and external access -- `/api/metrics` - Database size and performance metrics -- `/api/tasks` - Task search and filtering -- `/api/alerts` - Alert history and management -- `/api/files` - File processing history -- `/api/workflows` - Workflow configuration and status -- `/api/summary` - Dashboard summary data - -### ⏳ Pending: Web UI Dashboard Components -**Goal**: Build comprehensive web interface - -**Components**: -- Summary Status - Task breakdowns by type/job with statistics -- Alert Management - Alert timeline with filtering and analysis -- File Processing Dashboard - Searchable file processing history -- Workflow Visualization - Interactive dependency graphs -- Task Search and Management - Advanced task search and filtering - -## Next Actions -1. **Start with Alert Records System** - Most foundational component -2. **Design alert_records table schema** -3. **Modify alert handling in taskmaster.go** -4. **Add persistence to notification system** -5. **Create basic API endpoints for alert data** - -## Development Notes -- Maintain backward compatibility during migration -- Use prepared statements and proper indexing -- Test each milestone thoroughly before proceeding -- Update this progress document after each completed milestone From 46745b7a2d89610462b730c957ce0fbd8d83b89b Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Thu, 2 Oct 2025 15:36:33 -0600 Subject: [PATCH 14/40] workflow spec --- apps/flowlord/sqlite_spec.md | 339 ++++++++++++++++++++++++++++++++--- 1 file changed, 319 insertions(+), 20 deletions(-) diff --git a/apps/flowlord/sqlite_spec.md b/apps/flowlord/sqlite_spec.md index 57d2965..16b0657 100644 --- a/apps/flowlord/sqlite_spec.md +++ b/apps/flowlord/sqlite_spec.md @@ -307,25 +307,323 @@ WHERE created BETWEEN ? AND ?; - Log conflicts on task creation (unexpected duplicates) for monitoring - Maintain existing Cache interface for backward compatibility -### Task Relationships and Dependencies -Track task.Done message processing and child task triggering. +### Workflow and Phase +This replaces the in-memory workflow.Cache system with a persistent SQLite-based approach while maintaining the exact same interface and naming conventions. + +#### Updated Schema Design ```sql -CREATE TABLE task_relationships ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - parent_task_id TEXT NOT NULL, - child_task_id TEXT NOT NULL, - relationship_type TEXT DEFAULT 'triggered', -- 'triggered', 'retry', 'failed_retry' - created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - workflow_phase_id INTEGER, - FOREIGN KEY (parent_task_id) REFERENCES events(id), - FOREIGN KEY (child_task_id) REFERENCES events(id), - FOREIGN KEY (workflow_phase_id) REFERENCES workflow_phases(id) +-- Workflow file tracking (replaces in-memory workflow file cache) +CREATE TABLE workflow_files ( + file_path TEXT PRIMARY KEY, + file_hash TEXT NOT NULL, + loaded_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + last_modified TIMESTAMP, + is_active BOOLEAN DEFAULT TRUE ); -CREATE INDEX idx_task_relationships_parent ON task_relationships (parent_task_id); -CREATE INDEX idx_task_relationships_child ON task_relationships (child_task_id); -``` +-- Workflow phases (matches Phase struct exactly) +CREATE TABLE workflow_phases ( + workflow_file_path TEXT NOT NULL, + task TEXT NOT NULL, -- topic:job format (e.g., "data-load:hourly") + depends_on TEXT, + rule TEXT, -- URI query parameters (e.g., "cron=0 0 * * *&offset=1h") + template TEXT, + retry INTEGER DEFAULT 0, -- threshold of times to retry + status TEXT, -- phase status info (warnings, errors, validation messages) + PRIMARY KEY (workflow_file_path, task), +); + +-- Task relationships are generated dynamically from workflow_phases +-- No separate table needed - relationships are derived from depends_on field +-- This approach is simpler, more maintainable, and always up-to-date + +-- Indexes for performance +CREATE INDEX idx_workflow_phases_task ON workflow_phases (task); +CREATE INDEX idx_workflow_phases_depends_on ON workflow_phases (depends_on); +CREATE INDEX idx_workflow_phases_status ON workflow_phases (status); +``` + +#### Benefits of Human-Readable Keys + +**1. Direct File Path References** +- `workflow_files.file_path` is the primary key (e.g., "workflows/data-load.toml") +- No need to join tables to see which file a phase belongs to +- Easy to identify and debug workflow issues + +**2. Composite Primary Keys for Phases** +- `(workflow_file_path, task_name, job_name)` uniquely identifies each phase +- Directly readable: `("workflows/data-load.toml", "data-load", "hourly")` +- No surrogate IDs to remember or map + +**3. Dynamic Task Relationships** +- Task relationships are generated from `depends_on` field in workflow_phases +- No separate table to maintain or keep in sync +- Always up-to-date with current workflow configuration +- Simpler schema with fewer tables and foreign keys + +**4. Phase Status Tracking** +- `status` field stores validation messages, warnings, and errors for each phase +- Replaces console logging with persistent database storage +- Enables querying and filtering phases by status +- Provides better debugging and monitoring capabilities + +**Status Field Usage Examples:** +- `"invalid phase: rule and dependsOn are blank"` +- `"no valid rule found: cron=invalid"` +- `"parent task not found: data-load"` +- `"ignored rule: cron=0 0 * * *"` +- `"warning: retry count exceeds recommended limit"` +- `""` (empty string for phases with no issues) + +**Example Queries (Much More Readable):** + +```sql +-- Find all phases in a specific workflow file +SELECT task, depends_on, rule, status +FROM workflow_phases +WHERE workflow_file_path = 'workflows/data-load.toml'; + +-- Find phases that depend on a specific task type +SELECT workflow_file_path, task, rule, status +FROM workflow_phases +WHERE depends_on = 'data-load'; + +-- Find phases by topic (using LIKE for topic:job matching) +SELECT workflow_file_path, task, depends_on, rule, status +FROM workflow_phases +WHERE task LIKE 'data-load:%'; + +-- Find phases with warnings or errors +SELECT workflow_file_path, task, status +FROM workflow_phases +WHERE status IS NOT NULL AND status != ''; + +-- Find phases with specific status messages +SELECT workflow_file_path, task, status +FROM workflow_phases +WHERE status LIKE '%warning%' OR status LIKE '%error%'; + +-- Generate task relationships dynamically (parent -> child) +SELECT + parent.depends_on as parent_task, + parent.task as child_task, + parent.workflow_file_path, + parent.rule as child_rule, + parent.status as child_status +FROM workflow_phases parent +WHERE parent.depends_on IS NOT NULL AND parent.depends_on != ''; + +-- Find all children of a specific task +SELECT + child.task as child_task, + child.workflow_file_path, + child.rule as child_rule, + child.status as child_status +FROM workflow_phases child +WHERE child.depends_on = 'data-load'; + +-- Find all parents of a specific task +SELECT + parent.depends_on as parent_task, + parent.workflow_file_path, + parent.rule as parent_rule, + parent.status as parent_status +FROM workflow_phases parent +WHERE parent.task = 'data-load:hourly'; + +-- Get workflow file info with phase count and status summary +SELECT + wf.file_path, + wf.file_hash, + wf.loaded_at, + COUNT(wp.task) as phase_count, + COUNT(CASE WHEN wp.status IS NOT NULL AND wp.status != '' THEN 1 END) as phases_with_status +FROM workflow_files wf +LEFT JOIN workflow_phases wp ON wf.file_path = wp.workflow_file_path +GROUP BY wf.file_path; +``` + +#### Maintained Interface Design +The new SQLite-based implementation will maintain the exact same interface as the current `workflow.Cache`: + +```go +// Keep existing workflow.Cache interface unchanged +type Cache interface { + // Existing methods remain exactly the same + Search(task, job string) (path string, ph Phase) + Get(t task.Task) Phase + Children(t task.Task) []Phase + Refresh() (changedFiles []string, err error) + IsDir() bool + Close() error +} + +// Keep existing Phase struct unchanged +type Phase struct { + Task string // Should use Topic() and Job() for access + Rule string + DependsOn string // Task that the previous workflow depends on + Retry int + Template string // template used to create the task +} + +// Keep existing Workflow struct unchanged +type Workflow struct { + Checksum string // md5 hash for the file to check for changes + Phases []Phase `toml:"phase"` +} +``` + +#### Implementation Strategy +- **Same Package**: Keep everything in `workflow` package +- **Same Structs**: Maintain `Phase` and `Workflow` structs exactly as they are +- **Same Methods**: All existing methods return the same types and behavior +- **SQLite Backend**: Replace in-memory storage with SQLite persistence +- **Zero Breaking Changes**: All existing unit tests continue to work unchanged + +#### Key Benefits +1. **Persistence**: Workflow configurations survive restarts +2. **Historical Tracking**: Full audit trail of task relationships +3. **Performance**: Indexed queries for fast dependency resolution +4. **Scalability**: No memory limitations for large workflow sets +5. **Debugging**: Rich querying capabilities for troubleshooting +6. **Simplified Architecture**: Single SQLite instance replaces in-memory cache + +#### Implementation Plan + +**Phase 1: Update Workflow Package** +1. Modify existing `workflow.Cache` to use SQLite backend +2. Keep all existing interfaces, structs, and method signatures unchanged +3. Add SQLite persistence to workflow file loading +4. Implement task relationship tracking within existing structure + +**Phase 2: Update Flowlord Integration** +1. No changes needed to `taskmaster.go` - same interface +2. Update workflow loading to use SQLite persistence +3. Add task relationship recording to existing task processing +4. Maintain all existing method calls and behavior + +**Phase 3: Handler Updates** +1. Update handlers to query SQLite for workflow data +2. Add task relationship queries to existing endpoints +3. Enhance alert system with SQLite-based data +4. Maintain existing response formats + +**Phase 4: Testing and Validation** +1. All existing unit tests continue to work unchanged +2. Add SQLite-specific integration tests +3. Performance testing for SQLite queries +4. Migration testing from existing workflow files + +#### Migration Strategy + +**Seamless Replacement Approach:** +- Keep existing `workflow.Cache` interface and structs +- Replace in-memory storage with SQLite persistence +- Zero breaking changes to existing code +- All unit tests continue to work without modification + +**Key Implementation Details:** + +```go +// Keep existing Cache struct, add SQLite backend +type Cache struct { + db *sql.DB + path string + isDir bool + fOpts file.Options + mutex sync.RWMutex + // Remove: Workflows map[string]Workflow +} + +// Keep existing methods with SQLite implementation using simplified schema +func (c *Cache) Search(task, job string) (path string, ph Phase) { + // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry, status + // FROM workflow_phases WHERE task = ? OR task LIKE ? + // (where ? is either exact match or topic:job format) + // Return same results as before, with status info available +} + +func (c *Cache) Get(t task.Task) Phase { + // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry, status + // FROM workflow_phases WHERE task = ? OR task LIKE ? + // (where ? is either exact match or topic:job format) + // Return same Phase struct with status info +} + +func (c *Cache) Children(t task.Task) []Phase { + // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry, status + // FROM workflow_phases WHERE depends_on = ? OR depends_on LIKE ? + // (where ? matches the task type or topic:job format) + // Return same []Phase slice with status info +} + +func (c *Cache) Refresh() (changedFiles []string, err error) { + // Check file hashes against workflow_files table using file_path as key + // Load changed files into SQLite using file_path as primary key + // Return same changedFiles list +} + +// Dynamic task relationship queries (no separate table needed) +func (c *Cache) GetTaskRelationships(parentTask string) ([]Phase, error) { + // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry + // FROM workflow_phases WHERE depends_on = ? + // Returns all phases that depend on the parent task +} + +func (c *Cache) GetTaskDependencies(childTask string) ([]Phase, error) { + // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry + // FROM workflow_phases WHERE task = ? + // Returns the phase that defines the child task and its dependencies +} +``` + +**Database Schema Migration:** +- Add new tables alongside existing ones +- Migrate existing workflow data if needed +- Remove old tables after successful migration +- Maintain data integrity throughout process + +#### Required Code Changes + +**1. Package Imports** +```go +// Keep existing import - no changes needed +import "github.com/pcelvng/task-tools/workflow" +``` + +**2. TaskMaster Struct Update** +```go +// Keep existing struct - no changes needed +type taskMaster struct { + // ... other fields + *workflow.Cache // Same as before + // ... other fields +} +``` + +**3. Workflow Loading Changes** +```go +// Keep existing code - no changes needed +if tm.Cache, err = workflow.New(tm.path, tm.fOpts); err != nil { + return fmt.Errorf("workflow setup %w", err) +} +``` + +**4. Dependency Resolution Updates** +```go +// Keep existing code - no changes needed +phase := tm.Cache.Get(task) +children := tm.Cache.Children(task) +``` + +**5. Handler Updates** +```go +// Keep existing code - no changes needed +// All existing method calls work the same +// Only internal implementation changes to use SQLite +``` ## Database Maintenance @@ -569,11 +867,12 @@ GET /api/alerts?date=YYYY-MM-DD&format=json - JSON output for debugging/scripts - Batch operations for high-throughput scenarios - Foreign key constraints for data integrity -### Backward Compatibility Strategy -- Maintain existing Cache interface during migration -- Gradual replacement of memory storage with SQLite -- Preserve current API contracts and behavior -- Seamless transition without downtime +### Workflow Architecture Strategy +- Complete replacement of in-memory storage with SQLite persistence +- Maintain exact same workflow.Cache interface and behavior +- Simplified architecture with single SQLite instance +- Enhanced functionality with persistent task relationship tracking +- Zero breaking changes - all existing code continues to work ### Performance Considerations - Connection pooling and prepared statement caching From b7b60ec313fd485f32cc6b4bfe440e47061d0f36 Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Fri, 3 Oct 2025 11:50:27 -0600 Subject: [PATCH 15/40] css cleanup, interactive stat cards --- apps/flowlord/cache/workflow.go | 71 +++++ apps/flowlord/handler/about.tmpl | 2 +- apps/flowlord/handler/alert.tmpl | 2 +- apps/flowlord/handler/files.tmpl | 73 ++++- apps/flowlord/handler/static/style.css | 406 +++++-------------------- apps/flowlord/handler/task.tmpl | 112 ++++++- 6 files changed, 315 insertions(+), 351 deletions(-) create mode 100644 apps/flowlord/cache/workflow.go diff --git a/apps/flowlord/cache/workflow.go b/apps/flowlord/cache/workflow.go new file mode 100644 index 0000000..d7fbee0 --- /dev/null +++ b/apps/flowlord/cache/workflow.go @@ -0,0 +1,71 @@ +package cache + +import ( + "github.com/pcelvng/task" + + "github.com/pcelvng/task-tools/file" +) + +// Phase is same as workflow.Phase +type Phase struct { + Task string // Should use Topic() and Job() for access + Rule string + DependsOn string // Task that the previous workflow depends on + Retry int + Template string // template used to create the task +} + +func (p Phase) IsEmpty() bool { return false } +func (p Phase) Job() string { return "" } +func (p Phase) Topic() string { return "" } + +// Workflow is a list of phases with a checksum for the file +type Workflow struct { + Checksum string // md5 hash for the file to check for changes + Phases []Phase `toml:"phase"` +} + +type Cache struct{} // replace with sqlite db + +// newWorkflow read the workflow file or directory and updates the underlying db cache +func newWorkflow(path, opts *file.Options) *Cache { return nil } + +// Search +// Do we still need to return the path if its stored in a DB? +// should list return a list of matching phases rather than the first match? +func (c *Cache) Search(task, job string) (path string, ph Phase) { return "", Phase{} } + +// Get Phase associated with task based on Task.Topic and Task.Job and workflow file +func (c *Cache) Get(t task.Task) Phase { + return Phase{} +} + +// Children of the given task t, a child phase is one that dependsOn another task +// Empty slice will be returned if no children are found. +// A task without a type or metadata containing the workflow info +// will result in an error +func (c *Cache) Children(t task.Task) []Phase { return nil } + +// Refresh checks the cache and reloads any files if the checksum has changed. +func (c *Cache) Refresh() (changedFiles []string, err error) { return nil, nil } + +// listAllFiles recursively lists all files in a folder and sub-folders +// Keep as is? +func listAllFiles(p string, opts *file.Options) ([]string, error) { return nil, nil } + +// loadFile checks a files checksum and updates map if required +// loaded file name is returned +// Keep as is? +func (c *Cache) loadFile(path string, opts *file.Options) (f string, err error) { return "", nil } + +// filePath returns a filePath consist of all unique part +// after the path set in the cache +// may not be needed if we store in a DB +func (c *Cache) filePath(p string) (s string) { return "" } + +// Close the cache +// the chanel is used to force a wait until all routines are done +func (c *Cache) Close() error { + // close(c.done) + return nil +} diff --git a/apps/flowlord/handler/about.tmpl b/apps/flowlord/handler/about.tmpl index 4891e88..b992eae 100644 --- a/apps/flowlord/handler/about.tmpl +++ b/apps/flowlord/handler/about.tmpl @@ -3,7 +3,7 @@ - Flowlord About - System Information + Flowlord: About diff --git a/apps/flowlord/handler/alert.tmpl b/apps/flowlord/handler/alert.tmpl index dc1d0e7..59958ff 100644 --- a/apps/flowlord/handler/alert.tmpl +++ b/apps/flowlord/handler/alert.tmpl @@ -2,7 +2,7 @@ - Task Status Report + Flowlord: Alerts diff --git a/apps/flowlord/handler/files.tmpl b/apps/flowlord/handler/files.tmpl index 3279de9..1dc50c2 100644 --- a/apps/flowlord/handler/files.tmpl +++ b/apps/flowlord/handler/files.tmpl @@ -3,7 +3,7 @@ - Flowlord File Messages - {{.Date}} + Flowlord: Files @@ -13,19 +13,19 @@ {{if .Files}}
-
+
{{.TotalFiles}}
Total Files
-
+
{{.MatchedFiles}}
With Tasks
-
+
{{.UnmatchedFiles}}
No Matches
-
+
{{.TotalTasks}}
Tasks Created
@@ -34,7 +34,7 @@
{{range .Files}} -
+
{{.Path}}
Size: {{.Size | formatBytes}} @@ -80,6 +80,67 @@ setTimeout(function() { location.reload(); }, 30000); + + // Filter files by type using stat-cards + function filterFilesByType(filterType) { + // Update stat-card active states + updateFileStatCardActiveStates(filterType); + + // Filter file items + const fileItems = document.querySelectorAll('.file-item'); + + fileItems.forEach(item => { + const fileType = item.getAttribute('data-file-type'); + const hasTasks = item.getAttribute('data-has-tasks') === 'true'; + let shouldShow = false; + + switch(filterType) { + case 'all': + shouldShow = true; + break; + case 'matched': + shouldShow = fileType === 'matched'; + break; + case 'unmatched': + shouldShow = fileType === 'unmatched'; + break; + case 'tasks': + shouldShow = hasTasks; + break; + default: + shouldShow = true; + } + + item.style.display = shouldShow ? '' : 'none'; + }); + + // Update file count display + updateFileCount(); + } + + // Update stat-card active states for files + function updateFileStatCardActiveStates(activeFilter) { + const statCards = document.querySelectorAll('.stat-card'); + statCards.forEach(card => { + const filter = card.getAttribute('data-filter'); + if (filter === activeFilter) { + card.classList.add('active'); + } else { + card.classList.remove('active'); + } + }); + } + + // Update file count display + function updateFileCount() { + const visibleItems = document.querySelectorAll('.file-item[style*="display: none"], .file-item:not([style*="display: none"])'); + const visibleCount = Array.from(visibleItems).filter(item => + !item.style.display || item.style.display !== 'none' + ).length; + + // Update the stat numbers if needed (optional) + console.log(`Showing ${visibleCount} files`); + } diff --git a/apps/flowlord/handler/static/style.css b/apps/flowlord/handler/static/style.css index c740d28..75ec5ed 100644 --- a/apps/flowlord/handler/static/style.css +++ b/apps/flowlord/handler/static/style.css @@ -6,130 +6,19 @@ font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace; } -/* Common colors */ -.text-muted { - color: #6c757d; -} - -.text-primary { - color: #495057; -} - -.text-secondary { - color: #2c3e50; -} - -.bg-light { - background-color: #f8f9fa; -} - -.bg-white { - background: white; -} - -/* Common spacing */ -.p-0 { padding: 0; } -.p-1 { padding: 4px; } -.p-2 { padding: 8px; } -.p-3 { padding: 12px; } -.p-4 { padding: 16px; } -.p-5 { padding: 20px; } - -.m-0 { margin: 0; } -.mb-1 { margin-bottom: 4px; } -.mb-2 { margin-bottom: 8px; } -.mb-3 { margin-bottom: 12px; } -.mb-4 { margin-bottom: 16px; } -.mb-5 { margin-bottom: 20px; } - -/* Common border styles */ -.border { - border: 1px solid #e1e5e9; -} - -.border-light { - border: 1px solid #e9ecef; -} - -.rounded { - border-radius: 4px; -} - -.rounded-lg { - border-radius: 8px; -} - -/* Common shadow styles */ -.shadow-sm { - box-shadow: 0 1px 3px rgba(0,0,0,0.1); -} +/* Common colors - removed unused classes */ -.shadow { - box-shadow: 0 2px 10px rgba(0,0,0,0.1); -} - -.shadow-lg { - box-shadow: 0 4px 12px rgba(0,0,0,0.15); -} - -/* Common display styles */ -.flex { - display: flex; -} - -.flex-center { - display: flex; - align-items: center; - justify-content: center; -} - -.flex-between { - display: flex; - justify-content: space-between; - align-items: center; -} +/* Common spacing - removed unused classes */ -.grid { - display: grid; -} +/* Common border styles - removed unused classes */ -.grid-auto { - display: grid; - grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); - gap: 16px; -} +/* Common shadow styles - removed unused classes */ -.grid-stats { - display: grid; - grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); - gap: 16px; -} +/* Common display styles - removed unused classes */ -/* Common text styles */ -.text-center { - text-align: center; -} +/* Common text styles - removed unused classes */ -.text-uppercase { - text-transform: uppercase; -} - -.font-weight-500 { - font-weight: 500; -} - -.font-weight-600 { - font-weight: 600; -} - -.font-weight-bold { - font-weight: bold; -} - -/* Common transition */ -.transition { - transition: all 0.2s ease; -} +/* Common transition - removed unused classes */ /* ===== BASE STYLES ===== */ body { @@ -148,32 +37,7 @@ body { overflow: hidden; } -/* ===== HEADER STYLES ===== */ -.header { - color: white; - padding: 20px; - text-align: center; -} - -.header { - background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); -} - -.header.simple { - background: #2c3e50; -} - -.header h1 { - margin: 0 0 15px 0; - font-size: 24px; - font-weight: 600; -} - -.header .date { - margin-top: 5px; - opacity: 0.8; - font-size: 14px; -} +/* ===== HEADER STYLES - removed unused classes ===== */ /* ===== NAVIGATION STYLES ===== */ .nav-header { @@ -324,39 +188,7 @@ body { background: rgba(255, 255, 255, 0.2); } -/* ===== DATE DISPLAY STYLES ===== */ -.date-display { - display: flex; - align-items: center; - gap: 10px; -} - - -.current-date { - font-size: 16px; - font-weight: 500; - color: #2c3e50; - background: #f8f9fa; - padding: 8px 16px; - border-radius: 6px; - border: 1px solid #e1e5e9; -} - -.refresh-btn { - background: #3498db; - color: white; - border: none; - padding: 8px 16px; - border-radius: 6px; - font-size: 14px; - font-weight: 500; - cursor: pointer; - transition: background-color 0.2s ease; -} - -.refresh-btn:hover { - background: #2980b9; -} +/* ===== DATE DISPLAY STYLES - removed unused classes ===== */ /* ===== CONTENT STYLES ===== */ .content { @@ -562,7 +394,6 @@ tr:hover { .result-error { color: #dc3545; } .result-alert { color: #fd7e14; } .result-warn { color: #ffc107; } -.result-running { color: #6c757d; } .time-cell { white-space: nowrap; @@ -613,14 +444,7 @@ tr:hover { padding: 8px 16px; } -.btn-primary { - background: #007bff; - color: white; -} - -.btn-primary:hover { - background: #0056b3; -} +/* Removed unused btn-primary styles */ .btn-secondary { background: #6c757d; @@ -631,91 +455,67 @@ tr:hover { background: #545b62; } -/* ===== DATE NAVIGATION STYLES ===== */ -.date-nav { - display: flex; - align-items: center; - gap: 12px; - flex-wrap: wrap; - justify-content: center; -} +/* ===== DATE NAVIGATION STYLES - removed unused classes ===== */ -.date-nav a { - color: rgba(255, 255, 255, 0.9); - text-decoration: none; - padding: 8px 16px; - border: 1px solid rgba(255, 255, 255, 0.3); - border-radius: 6px; - transition: all 0.2s ease; -} +/* ===== DATE SELECTOR STYLES - removed unused classes ===== */ -.date-nav a:hover { - background: rgba(255, 255, 255, 0.2); - border-color: rgba(255, 255, 255, 0.5); +/* ===== SUMMARY STYLES ===== */ +.summary-section { + padding: 20px; + background: #f8f9fa; + border-bottom: 1px solid #e1e5e9; } -.current-date { - font-weight: 600; - font-size: 16px; +/* ===== COLLAPSIBLE SECTION STYLES ===== */ +.collapsible-section { + margin-bottom: 20px; } -/* ===== DATE SELECTOR STYLES ===== */ -.date-selector { +.collapsible-header { display: flex; - align-items: center; - gap: 12px; - flex-wrap: wrap; - justify-content: center; -} - -.date-selector label { - font-size: 14px; - font-weight: 500; - color: rgba(255, 255, 255, 0.9); + align-items: baseline; + gap: 6px; + margin: 0 0 16px 0; + cursor: pointer; + transition: all 0.2s ease; + user-select: none; } -.date-selector input[type="date"] { - padding: 8px 12px; - border: 1px solid rgba(255, 255, 255, 0.3); - border-radius: 6px; - background: rgba(255, 255, 255, 0.1); - color: white; - font-size: 14px; - outline: none; - transition: all 0.2s ease; +.collapsible-header:hover h3 { + color: #3498db; } -.date-selector input[type="date"]:focus { - border-color: rgba(255, 255, 255, 0.6); - background: rgba(255, 255, 255, 0.2); +.collapsible-header h3 { + margin: 0; + font-size: 18px; + font-weight: 600; + color: #495057; + transition: color 0.2s ease; + line-height: 1; } -.date-selector input[type="date"]::-webkit-calendar-picker-indicator { - filter: invert(1); - cursor: pointer; +.collapsible-toggle { + font-size: 12px; + color: #6c757d; + transition: transform 0.2s ease; + line-height: 1; + display: inline-block; + vertical-align: baseline; + margin-top: 2px; } -.date-selector button { - padding: 8px 16px; - background: rgba(255, 255, 255, 0.2); - border: 1px solid rgba(255, 255, 255, 0.3); - border-radius: 6px; - color: white; - font-size: 14px; - cursor: pointer; - transition: all 0.2s ease; +.collapsible-toggle.expanded { + transform: rotate(180deg); } -.date-selector button:hover { - background: rgba(255, 255, 255, 0.3); - border-color: rgba(255, 255, 255, 0.5); +.collapsible-content { + transition: all 0.3s ease; + overflow: hidden; } -/* ===== SUMMARY STYLES ===== */ -.summary-section { - padding: 20px; - background: #f8f9fa; - border-bottom: 1px solid #e1e5e9; +.collapsible-content.collapsed { + max-height: 0; + opacity: 0; } .summary-section h3 { @@ -792,6 +592,29 @@ tr:hover { padding: 16px; text-align: center; box-shadow: 0 1px 3px rgba(0,0,0,0.1); + cursor: pointer; + transition: all 0.2s ease; +} + +.stat-card:hover { + background: #f8f9fa; + border-color: #3498db; + box-shadow: 0 2px 8px rgba(0,0,0,0.15); +} + +.stat-card.active { + background: #3498db; + border-color: #2980b9; + color: white; + box-shadow: 0 2px 8px rgba(52, 152, 219, 0.3); +} + +.stat-card.active .stat-number { + color: white; +} + +.stat-card.active .stat-label { + color: rgba(255, 255, 255, 0.9); } .stat-number { @@ -807,9 +630,7 @@ tr:hover { text-transform: uppercase; } -.stat { - text-align: center; -} +/* Removed unused .stat class - now using .stat-card */ /* ===== FILES LIST STYLES ===== */ .files-list { @@ -872,12 +693,6 @@ tr:hover { color: #2c3e50; } -.task-tags { - display: flex; - gap: 8px; - flex-wrap: wrap; -} - .task-tag { background: #3498db; color: white; @@ -895,12 +710,6 @@ tr:hover { background: #27ae60; } -.task-name { - color: #bdc3c7; - font-weight: normal; - margin-left: 4px; -} - /* ===== FILTER STYLES ===== */ .filters { background: #f8f9fa; @@ -981,65 +790,8 @@ tr:hover { font-size: 12px; } -/* ===== CONTEXT MENU STYLES ===== */ -.context-menu { - position: absolute; - background: white; - border: 1px solid #dee2e6; - border-radius: 4px; - box-shadow: 0 4px 12px rgba(0,0,0,0.15); - z-index: 1000; - min-width: 120px; - padding: 4px 0; -} +/* ===== CONTEXT MENU STYLES - removed unused classes ===== */ -.context-menu-item { - padding: 8px 16px; - cursor: pointer; - font-size: 14px; - color: #495057; - display: flex; - align-items: center; - gap: 8px; -} - -.context-menu-item:hover { - background-color: #f8f9fa; -} - -.context-menu-item:active { - background-color: #e9ecef; -} - -/* ===== COPY FEEDBACK STYLES ===== */ -.copy-feedback { - position: absolute; - top: -25px; - left: 50%; - transform: translateX(-50%); - background: #28a745; - color: white; - padding: 4px 8px; - border-radius: 4px; - font-size: 12px; - font-weight: 500; - z-index: 1001; - pointer-events: none; - animation: copyFeedback 2s ease-out forwards; -} - -@keyframes copyFeedback { - 0% { opacity: 0; transform: translateX(-50%) translateY(0); } - 20% { opacity: 1; transform: translateX(-50%) translateY(-5px); } - 80% { opacity: 1; transform: translateX(-50%) translateY(-5px); } - 100% { opacity: 0; transform: translateX(-50%) translateY(-10px); } -} - -/* ===== ERROR AND SUCCESS STATES ===== */ -.error { - color: #dc3545; -} +/* ===== COPY FEEDBACK STYLES - removed unused classes ===== */ -.success { - color: #28a745; -} \ No newline at end of file +/* ===== ERROR AND SUCCESS STATES - removed unused classes ===== */ \ No newline at end of file diff --git a/apps/flowlord/handler/task.tmpl b/apps/flowlord/handler/task.tmpl index d042bc4..7be5cc4 100644 --- a/apps/flowlord/handler/task.tmpl +++ b/apps/flowlord/handler/task.tmpl @@ -3,7 +3,7 @@ - Flowlord Task Dashboard - {{.Date}} + Flowlord: Tasks @@ -14,43 +14,50 @@

Task Summary

-
+
{{.TotalTasks}}
Total Tasks
-
+
{{.CompletedTasks}}
Completed
-
+
{{.ErrorTasks}}
Errors
-
+
{{.AlertTasks}}
Alerts
-
+
{{.WarnTasks}}
Warnings
-
+
{{.RunningTasks}}
Running
-

Task Type Summary

-
- {{range $key, $stat := .Summary}} -
-
- {{$key}} - {{add $stat.CompletedCount $stat.ErrorCount}} tasks +
+
+

Task Type Summary

+ +
+
+
+ {{range $key, $stat := .Summary}} +
+
+ {{$key}} + {{add $stat.CompletedCount $stat.ErrorCount}} tasks +
+
{{$stat.String}}
+
+ {{end}}
-
{{$stat.String}}
- {{end}}
@@ -428,9 +435,82 @@ jobOptions.forEach(option => option.remove()); } + // Clear stat-card active states and reset to "all" + clearStatCardActiveStates(); + updateStatCardActiveStates('all'); + filterTable(); } + // Filter by result type using stat-cards + function filterByResult(resultType) { + // Update stat-card active states + updateStatCardActiveStates(resultType); + + // Update the existing filter logic to include result filtering + const table = document.getElementById('taskTable'); + if (!table) return; + + const rows = Array.from(table.querySelectorAll('tbody tr')); + + rows.forEach(row => { + const resultCell = row.cells[4]; // Result column (0-indexed) + + if (resultCell) { + const result = resultCell.textContent.trim().toLowerCase(); + let shouldShow = false; + + if (resultType === 'all') { + shouldShow = true; + } else if (resultType === 'running') { + shouldShow = result === '' || result === 'running'; + } else { + shouldShow = result === resultType; + } + + row.style.display = shouldShow ? '' : 'none'; + } + }); + + // Update task count + updateTaskCount(); + } + + // Update stat-card active states + function updateStatCardActiveStates(activeFilter) { + const statCards = document.querySelectorAll('.stat-card'); + statCards.forEach(card => { + const filter = card.getAttribute('data-filter'); + if (filter === activeFilter) { + card.classList.add('active'); + } else { + card.classList.remove('active'); + } + }); + } + + // Clear all stat-card active states + function clearStatCardActiveStates() { + const statCards = document.querySelectorAll('.stat-card'); + statCards.forEach(card => { + card.classList.remove('active'); + }); + } + + // Toggle collapsible section + function toggleCollapsible(sectionId) { + const content = document.getElementById(sectionId + '-content'); + const toggle = document.getElementById(sectionId + '-toggle'); + + if (content.classList.contains('collapsed')) { + content.classList.remove('collapsed'); + toggle.classList.add('expanded'); + } else { + content.classList.add('collapsed'); + toggle.classList.remove('expanded'); + } + } + // Toggle field expansion/collapse functionality function toggleField(element, fullText) { // Prevent event bubbling to avoid conflicts with sorting From 0fff37ab2ed8e5e15b8c126548a26712662d0e86 Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Fri, 3 Oct 2025 13:30:26 -0600 Subject: [PATCH 16/40] fix style.css --- apps/flowlord/handler.go | 15 ++++++++++++--- apps/flowlord/handler/about.tmpl | 2 +- apps/flowlord/handler/alert.tmpl | 2 +- apps/flowlord/handler/files.tmpl | 2 +- apps/flowlord/handler/task.tmpl | 2 +- 5 files changed, 16 insertions(+), 7 deletions(-) diff --git a/apps/flowlord/handler.go b/apps/flowlord/handler.go index 5cd4c16..66d986f 100644 --- a/apps/flowlord/handler.go +++ b/apps/flowlord/handler.go @@ -2,12 +2,13 @@ package main import ( "bytes" - _ "embed" + "embed" "encoding/json" "errors" "fmt" "html/template" "io" + "io/fs" "log" "net/http" "net/url" @@ -45,11 +46,19 @@ var HeaderTemplate string //go:embed handler/about.tmpl var AboutTemplate string +//go:embed handler/static/* +var StaticFiles embed.FS + func (tm *taskMaster) StartHandler() { router := chi.NewRouter() - // Static file serving - router.Handle("/static/*", http.StripPrefix("/static/", http.FileServer(http.Dir("handler/static")))) + // Static file serving - serve embedded static files + // Create a sub-filesystem that strips the "handler/" prefix + staticFS, err := fs.Sub(StaticFiles, "handler/static") + if err != nil { + log.Fatal("Failed to create static filesystem:", err) + } + router.Handle("/static/*", http.StripPrefix("/static/", http.FileServer(http.FS(staticFS)))) router.Get("/", tm.Info) router.Get("/info", tm.Info) diff --git a/apps/flowlord/handler/about.tmpl b/apps/flowlord/handler/about.tmpl index b992eae..5eacafa 100644 --- a/apps/flowlord/handler/about.tmpl +++ b/apps/flowlord/handler/about.tmpl @@ -4,7 +4,7 @@ Flowlord: About - + {{template "header" .}} diff --git a/apps/flowlord/handler/alert.tmpl b/apps/flowlord/handler/alert.tmpl index 59958ff..531681a 100644 --- a/apps/flowlord/handler/alert.tmpl +++ b/apps/flowlord/handler/alert.tmpl @@ -3,7 +3,7 @@ Flowlord: Alerts - + {{template "header" .}} diff --git a/apps/flowlord/handler/files.tmpl b/apps/flowlord/handler/files.tmpl index 1dc50c2..641e998 100644 --- a/apps/flowlord/handler/files.tmpl +++ b/apps/flowlord/handler/files.tmpl @@ -4,7 +4,7 @@ Flowlord: Files - + {{template "header" .}} diff --git a/apps/flowlord/handler/task.tmpl b/apps/flowlord/handler/task.tmpl index 7be5cc4..b6fd29a 100644 --- a/apps/flowlord/handler/task.tmpl +++ b/apps/flowlord/handler/task.tmpl @@ -4,7 +4,7 @@ Flowlord: Tasks - + {{template "header" .}} From 5fa19a7dcac77df5ef1710300cbeabf229a17e45 Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Fri, 3 Oct 2025 14:00:09 -0600 Subject: [PATCH 17/40] resolve static file hosting vs local running --- apps/flowlord/handler.go | 6 ++++++ apps/flowlord/handler/about.tmpl | 2 +- apps/flowlord/handler/alert.tmpl | 2 +- apps/flowlord/handler/files.tmpl | 2 +- apps/flowlord/handler/task.tmpl | 2 +- apps/flowlord/handler_test.go | 7 +++++++ 6 files changed, 17 insertions(+), 4 deletions(-) diff --git a/apps/flowlord/handler.go b/apps/flowlord/handler.go index 66d986f..c9e0671 100644 --- a/apps/flowlord/handler.go +++ b/apps/flowlord/handler.go @@ -49,6 +49,8 @@ var AboutTemplate string //go:embed handler/static/* var StaticFiles embed.FS +var staticPath = "/static" + func (tm *taskMaster) StartHandler() { router := chi.NewRouter() @@ -420,6 +422,7 @@ func filesHTML(files []cache.FileMessage, date time.Time) []byte { "TotalTasks": totalTasks, "CurrentPage": "files", "PageTitle": "File Messages", + "staticPath": staticPath, } // Template functions @@ -572,6 +575,7 @@ func taskHTML(tasks []cache.TaskView, date time.Time, taskType, job, result stri "CurrentResult": result, "CurrentPage": "task", "PageTitle": "Task Dashboard", + "staticPath": staticPath, } // Template functions @@ -666,6 +670,7 @@ func (tm *taskMaster) aboutHTML() []byte { "CurrentPage": "about", "DateValue": "", // About page doesn't need date "PageTitle": "System Information", + "staticPath": staticPath, } // Parse and execute template @@ -701,6 +706,7 @@ func alertHTML(tasks []cache.AlertRecord, date time.Time) []byte { "DateValue": date.Format("2006-01-02"), "Date": date.Format("Monday, January 2, 2006"), "PageTitle": "Task Alerts", + "staticPath": staticPath, } tmpl, err := template.New("alert").Parse(HeaderTemplate + AlertTemplate) diff --git a/apps/flowlord/handler/about.tmpl b/apps/flowlord/handler/about.tmpl index 5eacafa..c3e513b 100644 --- a/apps/flowlord/handler/about.tmpl +++ b/apps/flowlord/handler/about.tmpl @@ -4,7 +4,7 @@ Flowlord: About - + {{template "header" .}} diff --git a/apps/flowlord/handler/alert.tmpl b/apps/flowlord/handler/alert.tmpl index 531681a..ee2ace7 100644 --- a/apps/flowlord/handler/alert.tmpl +++ b/apps/flowlord/handler/alert.tmpl @@ -3,7 +3,7 @@ Flowlord: Alerts - + {{template "header" .}} diff --git a/apps/flowlord/handler/files.tmpl b/apps/flowlord/handler/files.tmpl index 641e998..c423b1e 100644 --- a/apps/flowlord/handler/files.tmpl +++ b/apps/flowlord/handler/files.tmpl @@ -4,7 +4,7 @@ Flowlord: Files - + {{template "header" .}} diff --git a/apps/flowlord/handler/task.tmpl b/apps/flowlord/handler/task.tmpl index b6fd29a..0de9fe2 100644 --- a/apps/flowlord/handler/task.tmpl +++ b/apps/flowlord/handler/task.tmpl @@ -4,7 +4,7 @@ Flowlord: Tasks - + {{template "header" .}} diff --git a/apps/flowlord/handler_test.go b/apps/flowlord/handler_test.go index 692577d..e7e0181 100644 --- a/apps/flowlord/handler_test.go +++ b/apps/flowlord/handler_test.go @@ -17,6 +17,12 @@ import ( const testPath = "../../internal/test" +func TestMain(t *testing.M) { + staticPath = "./static" + t.Run() +} + + // loadTaskViewData loads TaskView data from a JSON file func loadTaskViewData(filename string) ([]cache.TaskView, error) { data, err := os.ReadFile(filename) @@ -354,6 +360,7 @@ func TestMeta_UnmarshalJSON(t *testing.T) { // TestWebAlertPreview generates an HTML preview of the alert template for visual inspection // this provides an html file func TestAlertHTML(t *testing.T) { + // Create sample alert data to showcase the templating sampleAlerts := []cache.AlertRecord{ { From b731ffbf562cadefa984f1758ff69e5c27994c23 Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Wed, 8 Oct 2025 15:20:34 -0600 Subject: [PATCH 18/40] backup sqlite db on close --- apps/flowlord/Workflow_plan.md | 363 ++++++++++++++ apps/flowlord/cache/sqlite.go | 163 +++++-- apps/flowlord/cache/workflow.go | 29 +- apps/flowlord/handler.go | 8 +- apps/flowlord/handler/about.tmpl | 2 +- apps/flowlord/main.go | 17 +- apps/flowlord/sqlite_spec.md | 812 ++++++++----------------------- apps/flowlord/taskmaster.go | 12 +- apps/utils/fz/main.go | 38 +- 9 files changed, 736 insertions(+), 708 deletions(-) create mode 100644 apps/flowlord/Workflow_plan.md diff --git a/apps/flowlord/Workflow_plan.md b/apps/flowlord/Workflow_plan.md new file mode 100644 index 0000000..bace238 --- /dev/null +++ b/apps/flowlord/Workflow_plan.md @@ -0,0 +1,363 @@ +# Workflow SQLite Persistence Plan +Purpose: Detailed specification for converting the in-memory workflow.Cache system to use SQLite persistence while maintaining exact interface compatibility. + +## Current Status +**❌ NOT IMPLEMENTED** - The workflow system currently uses in-memory `workflow.Cache` implementation. + +## Overview +This plan outlines the conversion of the existing in-memory workflow cache to a SQLite-based persistent system while maintaining 100% interface compatibility. The goal is zero breaking changes - all existing code continues to work unchanged. + +## Database Schema Design + +### Workflow File Tracking +```sql +-- Workflow file tracking (replaces in-memory workflow file cache) +CREATE TABLE workflow_files ( + file_path TEXT PRIMARY KEY, + file_hash TEXT NOT NULL, + loaded_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + last_modified TIMESTAMP, + is_active BOOLEAN DEFAULT TRUE +); + +-- Workflow phases (matches Phase struct exactly) +CREATE TABLE workflow_phases ( + workflow_file_path TEXT NOT NULL, + task TEXT NOT NULL, -- topic:job format (e.g., "data-load:hourly") + depends_on TEXT, + rule TEXT, -- URI query parameters (e.g., "cron=0 0 * * *&offset=1h") + template TEXT, + retry INTEGER DEFAULT 0, -- threshold of times to retry + status TEXT, -- phase status info (warnings, errors, validation messages) + PRIMARY KEY (workflow_file_path, task) +); + +-- Task relationships are generated dynamically from workflow_phases +-- No separate table needed - relationships are derived from depends_on field +-- This approach is simpler, more maintainable, and always up-to-date + +-- Indexes for performance +CREATE INDEX idx_workflow_phases_task ON workflow_phases (task); +CREATE INDEX idx_workflow_phases_depends_on ON workflow_phases (depends_on); +CREATE INDEX idx_workflow_phases_status ON workflow_phases (status); +``` + +## Benefits of Human-Readable Keys + +### 1. Direct File Path References +- `workflow_files.file_path` is the primary key (e.g., "workflows/data-load.toml") +- No need to join tables to see which file a phase belongs to +- Easy to identify and debug workflow issues + +### 2. Composite Primary Keys for Phases +- `(workflow_file_path, task_name, job_name)` uniquely identifies each phase +- Directly readable: `("workflows/data-load.toml", "data-load", "hourly")` +- No surrogate IDs to remember or map + +### 3. Dynamic Task Relationships +- Task relationships are generated from `depends_on` field in workflow_phases +- No separate table to maintain or keep in sync +- Always up-to-date with current workflow configuration +- Simpler schema with fewer tables and foreign keys + +### 4. Phase Status Tracking +- `status` field stores validation messages, warnings, and errors for each phase +- Replaces console logging with persistent database storage +- Enables querying and filtering phases by status +- Provides better debugging and monitoring capabilities + +**Status Field Usage Examples:** +- `"invalid phase: rule and dependsOn are blank"` +- `"no valid rule found: cron=invalid"` +- `"parent task not found: data-load"` +- `"ignored rule: cron=0 0 * * *"` +- `"warning: retry count exceeds recommended limit"` +- `""` (empty string for phases with no issues) + +## Example Queries + +```sql +-- Find all phases in a specific workflow file +SELECT task, depends_on, rule, status +FROM workflow_phases +WHERE workflow_file_path = 'workflows/data-load.toml'; + +-- Find phases that depend on a specific task type +SELECT workflow_file_path, task, rule, status +FROM workflow_phases +WHERE depends_on = 'data-load'; + +-- Find phases by topic (using LIKE for topic:job matching) +SELECT workflow_file_path, task, depends_on, rule, status +FROM workflow_phases +WHERE task LIKE 'data-load:%'; + +-- Find phases with warnings or errors +SELECT workflow_file_path, task, status +FROM workflow_phases +WHERE status IS NOT NULL AND status != ''; + +-- Find phases with specific status messages +SELECT workflow_file_path, task, status +FROM workflow_phases +WHERE status LIKE '%warning%' OR status LIKE '%error%'; + +-- Generate task relationships dynamically (parent -> child) +SELECT + parent.depends_on as parent_task, + parent.task as child_task, + parent.workflow_file_path, + parent.rule as child_rule, + parent.status as child_status +FROM workflow_phases parent +WHERE parent.depends_on IS NOT NULL AND parent.depends_on != ''; + +-- Find all children of a specific task +SELECT + child.task as child_task, + child.workflow_file_path, + child.rule as child_rule, + child.status as child_status +FROM workflow_phases child +WHERE child.depends_on = 'data-load'; + +-- Find all parents of a specific task +SELECT + parent.depends_on as parent_task, + parent.workflow_file_path, + parent.rule as parent_rule, + parent.status as parent_status +FROM workflow_phases parent +WHERE parent.task = 'data-load:hourly'; + +-- Get workflow file info with phase count and status summary +SELECT + wf.file_path, + wf.file_hash, + wf.loaded_at, + COUNT(wp.task) as phase_count, + COUNT(CASE WHEN wp.status IS NOT NULL AND wp.status != '' THEN 1 END) as phases_with_status +FROM workflow_files wf +LEFT JOIN workflow_phases wp ON wf.file_path = wp.workflow_file_path +GROUP BY wf.file_path; +``` + +## Maintained Interface Design + +The new SQLite-based implementation will maintain the exact same interface as the current `workflow.Cache`: + +```go +// Keep existing workflow.Cache interface unchanged +type Cache interface { + // Existing methods remain exactly the same + Search(task, job string) (path string, ph Phase) + Get(t task.Task) Phase + Children(t task.Task) []Phase + Refresh() (changedFiles []string, err error) + IsDir() bool + Close() error +} + +// Keep existing Phase struct unchanged +type Phase struct { + Task string // Should use Topic() and Job() for access + Rule string + DependsOn string // Task that the previous workflow depends on + Retry int + Template string // template used to create the task +} + +// Keep existing Workflow struct unchanged +type Workflow struct { + Checksum string // md5 hash for the file to check for changes + Phases []Phase `toml:"phase"` +} +``` + +## Implementation Strategy + +- **Same Package**: Keep everything in `workflow` package +- **Same Structs**: Maintain `Phase` and `Workflow` structs exactly as they are +- **Same Methods**: All existing methods return the same types and behavior +- **SQLite Backend**: Replace in-memory storage with SQLite persistence +- **Zero Breaking Changes**: All existing unit tests continue to work unchanged + +## Key Benefits + +1. **Persistence**: Workflow configurations survive restarts +2. **Historical Tracking**: Full audit trail of task relationships +3. **Performance**: Indexed queries for fast dependency resolution +4. **Scalability**: No memory limitations for large workflow sets +5. **Debugging**: Rich querying capabilities for troubleshooting +6. **Simplified Architecture**: Single SQLite instance replaces in-memory cache + +## Implementation Plan + +### Phase 1: Update Workflow Package +1. Modify existing `workflow.Cache` to use SQLite backend +2. Keep all existing interfaces, structs, and method signatures unchanged +3. Add SQLite persistence to workflow file loading +4. Implement task relationship tracking within existing structure + +### Phase 2: Update Flowlord Integration +1. No changes needed to `taskmaster.go` - same interface +2. Update workflow loading to use SQLite persistence +3. Add task relationship recording to existing task processing +4. Maintain all existing method calls and behavior + +### Phase 3: Handler Updates +1. Update handlers to query SQLite for workflow data +2. Add task relationship queries to existing endpoints +3. Enhance alert system with SQLite-based data +4. Maintain existing response formats + +### Phase 4: Testing and Validation +1. All existing unit tests continue to work unchanged +2. Add SQLite-specific integration tests +3. Performance testing for SQLite queries +4. Migration testing from existing workflow files + +## Migration Strategy + +### Seamless Replacement Approach +- Keep existing `workflow.Cache` interface and structs +- Replace in-memory storage with SQLite persistence +- Zero breaking changes to existing code +- All unit tests continue to work without modification + +### Key Implementation Details + +```go +// Keep existing Cache struct, add SQLite backend +type Cache struct { + db *sql.DB + path string + isDir bool + fOpts file.Options + mutex sync.RWMutex + // Remove: Workflows map[string]Workflow +} + +// Keep existing methods with SQLite implementation using simplified schema +func (c *Cache) Search(task, job string) (path string, ph Phase) { + // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry, status + // FROM workflow_phases WHERE task = ? OR task LIKE ? + // (where ? is either exact match or topic:job format) + // Return same results as before, with status info available +} + +func (c *Cache) Get(t task.Task) Phase { + // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry, status + // FROM workflow_phases WHERE task = ? OR task LIKE ? + // (where ? is either exact match or topic:job format) + // Return same Phase struct with status info +} + +func (c *Cache) Children(t task.Task) []Phase { + // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry, status + // FROM workflow_phases WHERE depends_on = ? OR depends_on LIKE ? + // (where ? matches the task type or topic:job format) + // Return same []Phase slice with status info +} + +func (c *Cache) Refresh() (changedFiles []string, err error) { + // Check file hashes against workflow_files table using file_path as key + // Load changed files into SQLite using file_path as primary key + // Return same changedFiles list +} + +// Dynamic task relationship queries (no separate table needed) +func (c *Cache) GetTaskRelationships(parentTask string) ([]Phase, error) { + // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry + // FROM workflow_phases WHERE depends_on = ? + // Returns all phases that depend on the parent task +} + +func (c *Cache) GetTaskDependencies(childTask string) ([]Phase, error) { + // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry + // FROM workflow_phases WHERE task = ? + // Returns the phase that defines the child task and its dependencies +} +``` + +### Database Schema Migration +- Add new tables alongside existing ones +- Migrate existing workflow data if needed +- Remove old tables after successful migration +- Maintain data integrity throughout process + +## Required Code Changes + +### 1. Package Imports +```go +// Keep existing import - no changes needed +import "github.com/pcelvng/task-tools/workflow" +``` + +### 2. TaskMaster Struct Update +```go +// Keep existing struct - no changes needed +type taskMaster struct { + // ... other fields + *workflow.Cache // Same as before + // ... other fields +} +``` + +### 3. Workflow Loading Changes +```go +// Keep existing code - no changes needed +if tm.Cache, err = workflow.New(tm.path, tm.fOpts); err != nil { + return fmt.Errorf("workflow setup %w", err) +} +``` + +### 4. Dependency Resolution Updates +```go +// Keep existing code - no changes needed +phase := tm.Cache.Get(task) +children := tm.Cache.Children(task) +``` + +### 5. Handler Updates +```go +// Keep existing code - no changes needed +// All existing method calls work the same +// Only internal implementation changes to use SQLite +``` + +## Technical Considerations + +### Performance +- Indexed queries for fast dependency resolution +- Prepared statements for security and performance +- Connection pooling for concurrent access +- WAL mode for better concurrent access + +### Data Integrity +- Foreign key constraints for data integrity +- Transaction support for atomic operations +- Proper error handling and rollback +- Data validation on insert/update + +### Monitoring +- Query performance tracking +- Database size monitoring +- Connection pool metrics +- Error rate tracking + +## Future Enhancements + +### Advanced Features +- Workflow versioning and history +- Phase execution statistics +- Dependency visualization +- Workflow validation and testing +- Hot reloading of workflow changes + +### Integration +- REST API endpoints for workflow management +- Web UI for workflow editing +- Workflow import/export functionality +- Integration with external workflow tools + diff --git a/apps/flowlord/cache/sqlite.go b/apps/flowlord/cache/sqlite.go index 1853142..9a52e36 100644 --- a/apps/flowlord/cache/sqlite.go +++ b/apps/flowlord/cache/sqlite.go @@ -5,6 +5,7 @@ import ( _ "embed" "encoding/json" "fmt" + "io" "log" "net/url" "sort" @@ -12,10 +13,12 @@ import ( "sync" "time" + "github.com/jbsmith7741/go-tools/appenderr" "github.com/pcelvng/task" "github.com/pcelvng/task/bus" _ "modernc.org/sqlite" + "github.com/pcelvng/task-tools/file" "github.com/pcelvng/task-tools/file/stat" "github.com/pcelvng/task-tools/tmpl" ) @@ -24,32 +27,89 @@ import ( var schema string type SQLite struct { - db *sql.DB - ttl time.Duration - mu sync.Mutex + LocalPath string + BackupPath string + + TaskTTL time.Duration `toml:"task-ttl" comment:"time that tasks are expected to have completed in. This values tells the cache how long to keep track of items and alerts if items haven't completed when the cache is cleared"` + Retention time.Duration // 90 days + + db *sql.DB + fOpts file.Options + //ttl time.Duration + mu sync.Mutex } -func NewSQLite(ttl time.Duration, dbPath string) (*SQLite, error) { - if ttl < time.Hour { - ttl = time.Hour +// Open the sqlite DB. If localPath doesn't exist then check if BackupPath exists and copy it to localPath +// ?: should this open the workflow file and load that into the database as well? +func (o *SQLite) Open(workflowPath string, fOpts file.Options) error { + o.fOpts = fOpts + if o.TaskTTL < time.Hour { + o.TaskTTL = time.Hour + } + + backupSts, _ := file.Stat(o.BackupPath, &fOpts) + localSts, _ := file.Stat(o.LocalPath, &fOpts) + + if localSts.Size == 0 && backupSts.Size > 0 { + log.Printf("Restoring local DB from backup %s", o.BackupPath) + // no local file but backup exists so copy it down + if err := copyFiles(o.BackupPath, o.LocalPath, fOpts); err != nil { + log.Println(err) // TODO: should this be fatal? + } } // Open the database - db, err := sql.Open("sqlite", dbPath) + db, err := sql.Open("sqlite", o.LocalPath) if err != nil { - return nil, err + return err } + o.db = db - // Execute the schema + // Execute the schema if the migration version is not the same as the current schema version + //TODO: version the schema and migrate if needed _, err = db.Exec(schema) if err != nil { - return nil, err + return err } - return &SQLite{ - db: db, - ttl: ttl, - }, nil + //TODO: load workflow file into the database + + return nil +} + +func copyFiles(src, dst string, fOpts file.Options) error { + r, err := file.NewReader(src, &fOpts) + if err != nil { + return fmt.Errorf("init reader err: %w", err) + } + w, err := file.NewWriter(dst, &fOpts) + if err != nil { + return fmt.Errorf("init writer err: %w", err) + } + _, err = io.Copy(w, r) + if err != nil { + return fmt.Errorf("copy err: %w", err) + } + if err := w.Close(); err != nil { + return fmt.Errorf("close writer err: %w", err) + } + return r.Close() +} + +// Close the DB connection and copy the current file to the backup location +func (o *SQLite) Close() error { + errs := appenderr.New() + errs.Add(o.db.Close()) + if o.BackupPath != "" { + log.Printf("Backing up DB to %s", o.BackupPath) + errs.Add(o.Sync()) + } + return errs.ErrOrNil() +} + +// Sync the local DB to the backup location +func (o *SQLite) Sync() error { + return copyFiles(o.LocalPath, o.BackupPath, o.fOpts) } func (s *SQLite) Add(t task.Task) { @@ -64,7 +124,7 @@ func (s *SQLite) Add(t task.Task) { result, err := s.db.Exec(` INSERT INTO task_records (id, type, job, info, result, meta, msg, created, started, ended) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) - ON CONFLICT (type, job, id, created) + ON CONFLICT (TYPE, job, id, created) DO UPDATE SET result = excluded.result, meta = excluded.meta, @@ -86,12 +146,12 @@ func (s *SQLite) Add(t task.Task) { if rowsAffected == 0 { // This indicates a conflict occurred and the record was updated // Log this as it's unexpected for new task creation - log.Printf("WARNING: Task creation conflict detected - task %s:%s:%s at %s was updated instead of inserted", + log.Printf("WARNING: Task creation conflict detected - task %s:%s:%s at %s was updated instead of inserted", t.Type, t.Job, t.ID, t.Created) } } -func (s *SQLite) Get(id string) TaskJob { +func (s *SQLite) GetTask(id string) TaskJob { s.mu.Lock() defer s.mu.Unlock() @@ -179,7 +239,7 @@ func (s *SQLite) Recycle() Stat { created, started, ended FROM task_records WHERE created < ? - `, t.Add(-s.ttl)) + `, t.Add(-s.TaskTTL)) if err != nil { return Stat{} } @@ -257,7 +317,7 @@ func (s *SQLite) CheckIncompleteTasks() Stat { WHERE tr.created < ? AND tr.result = '' AND ar.id IS NULL - `, t.Add(-s.ttl)) + `, t.Add(-s.TaskTTL)) if err != nil { return Stat{} } @@ -276,7 +336,7 @@ func (s *SQLite) CheckIncompleteTasks() Stat { // Add incomplete task to alert list tasks = append(tasks, task) - + // Add alert directly (no need to check for duplicates since JOIN already filtered them) taskID := task.ID if taskID == "" { @@ -289,7 +349,7 @@ func (s *SQLite) CheckIncompleteTasks() Stat { INSERT INTO alert_records (task_id, task_time, task_type, job, msg) VALUES (?, ?, ?, ?, ?) `, taskID, taskTime, task.Type, task.Job, "INCOMPLETE: unfinished task detected") - + if err == nil { alertsAdded++ } @@ -356,10 +416,6 @@ func (s *SQLite) SendFunc(p bus.Producer) func(string, *task.Task) error { } } -func (s *SQLite) Close() error { - return s.db.Close() -} - // AddAlert stores an alert record in the database func (s *SQLite) AddAlert(t task.Task, message string) error { s.mu.Lock() @@ -385,7 +441,6 @@ func (s *SQLite) AddAlert(t task.Task, message string) error { return err } - // extractJobFromTask is a helper function to get job from task func extractJobFromTask(t task.Task) string { job := t.Job @@ -645,7 +700,7 @@ func (s *SQLite) GetFileMessages(limit int, offset int) ([]FileMessage, error) { defer s.mu.Unlock() query := ` - SELECT id, path, size, last_modified, received_at, task_time, task_ids, task_names + SELECT id, path, SIZE, last_modified, received_at, task_time, task_ids, task_names FROM file_messages ORDER BY received_at DESC LIMIT ? OFFSET ? @@ -691,7 +746,7 @@ func (s *SQLite) GetFileMessagesByDate(date time.Time) ([]FileMessage, error) { dateStr := date.Format("2006-01-02") query := ` - SELECT id, path, size, last_modified, received_at, task_time, task_ids, task_names + SELECT id, path, SIZE, last_modified, received_at, task_time, task_ids, task_names FROM file_messages WHERE DATE(received_at) = ? ORDER BY received_at DESC @@ -738,15 +793,15 @@ func (s *SQLite) GetFileMessagesWithTasks(limit int, offset int) ([]FileMessageW query := ` SELECT fm.id, fm.path, fm.task_time, fm.received_at, - json_extract(t.value, '$') as task_id, - tl.type as task_type, - tl.job as task_job, - tl.result as task_result, - tl.created as task_created, - tl.started as task_started, - tl.ended as task_ended + json_extract(t.value, '$') AS task_id, + tl.type AS task_type, + tl.job AS task_job, + tl.result AS task_result, + tl.created AS task_created, + tl.started AS task_started, + tl.ended AS task_ended FROM file_messages fm, - json_each(fm.task_ids) as t + json_each(fm.task_ids) AS t JOIN task_log tl ON json_extract(t.value, '$') = tl.id WHERE fm.task_ids IS NOT NULL ORDER BY fm.received_at DESC, fm.id @@ -814,30 +869,30 @@ func (s *SQLite) GetTasksByDate(date time.Time, taskType, job, result string) ([ defer s.mu.Unlock() dateStr := date.Format("2006-01-02") - + // Build query with optional filters using the tasks view query := `SELECT id, type, job, info, result, meta, msg, task_seconds, task_time, queue_seconds, queue_time, created, started, ended FROM tasks WHERE DATE(created) = ?` args := []interface{}{dateStr} - + if taskType != "" { query += " AND type = ?" args = append(args, taskType) } - + if job != "" { query += " AND job = ?" args = append(args, job) } - + if result != "" { query += " AND result = ?" args = append(args, result) } - + query += " ORDER BY created DESC" - + rows, err := s.db.Query(query, args...) if err != nil { return nil, err @@ -867,12 +922,12 @@ func (s *SQLite) GetTaskSummaryByDate(date time.Time) (map[string]*Stats, error) defer s.mu.Unlock() dateStr := date.Format("2006-01-02") - + query := `SELECT id, type, job, info, result, meta, msg, created, started, ended FROM task_records WHERE DATE(created) = ? ORDER BY created` - + rows, err := s.db.Query(query, dateStr) if err != nil { return nil, err @@ -928,19 +983,19 @@ type FileMessageWithTasks struct { // DBSizeInfo contains database size information type DBSizeInfo struct { - TotalSize string `json:"total_size"` - PageCount int64 `json:"page_count"` - PageSize int64 `json:"page_size"` - DBPath string `json:"db_path"` + TotalSize string `json:"total_size"` + PageCount int64 `json:"page_count"` + PageSize int64 `json:"page_size"` + DBPath string `json:"db_path"` } // TableStat contains information about a database table type TableStat struct { - Name string `json:"name"` - RowCount int64 `json:"row_count"` - SizeBytes int64 `json:"size_bytes"` - SizeHuman string `json:"size_human"` - Percentage float64 `json:"percentage"` + Name string `json:"name"` + RowCount int64 `json:"row_count"` + SizeBytes int64 `json:"size_bytes"` + SizeHuman string `json:"size_human"` + Percentage float64 `json:"percentage"` } // GetDBSize returns database size information @@ -954,7 +1009,7 @@ func (s *SQLite) GetDBSize() (*DBSizeInfo, error) { if err != nil { return nil, err } - + err = s.db.QueryRow("PRAGMA page_size").Scan(&pageSize) if err != nil { return nil, err diff --git a/apps/flowlord/cache/workflow.go b/apps/flowlord/cache/workflow.go index d7fbee0..49b5a05 100644 --- a/apps/flowlord/cache/workflow.go +++ b/apps/flowlord/cache/workflow.go @@ -19,24 +19,24 @@ func (p Phase) IsEmpty() bool { return false } func (p Phase) Job() string { return "" } func (p Phase) Topic() string { return "" } +/* // Workflow is a list of phases with a checksum for the file type Workflow struct { Checksum string // md5 hash for the file to check for changes Phases []Phase `toml:"phase"` } - -type Cache struct{} // replace with sqlite db +*/ // newWorkflow read the workflow file or directory and updates the underlying db cache -func newWorkflow(path, opts *file.Options) *Cache { return nil } +func newWorkflow(path, opts *file.Options) *SQLite { return nil } // Search -// Do we still need to return the path if its stored in a DB? +// Do we still need to return the path if it's stored in a DB? // should list return a list of matching phases rather than the first match? -func (c *Cache) Search(task, job string) (path string, ph Phase) { return "", Phase{} } +func (c *SQLite) Search(task, job string) (path string, ph Phase) { return "", Phase{} } -// Get Phase associated with task based on Task.Topic and Task.Job and workflow file -func (c *Cache) Get(t task.Task) Phase { +// GetPhase associated with task based on Task.Topic and Task.Job and workflow file +func (c *SQLite) GetPhase(t task.Task) Phase { return Phase{} } @@ -44,10 +44,10 @@ func (c *Cache) Get(t task.Task) Phase { // Empty slice will be returned if no children are found. // A task without a type or metadata containing the workflow info // will result in an error -func (c *Cache) Children(t task.Task) []Phase { return nil } +func (c *SQLite) Children(t task.Task) []Phase { return nil } // Refresh checks the cache and reloads any files if the checksum has changed. -func (c *Cache) Refresh() (changedFiles []string, err error) { return nil, nil } +func (c *SQLite) Refresh() (changedFiles []string, err error) { return nil, nil } // listAllFiles recursively lists all files in a folder and sub-folders // Keep as is? @@ -56,16 +56,9 @@ func listAllFiles(p string, opts *file.Options) ([]string, error) { return nil, // loadFile checks a files checksum and updates map if required // loaded file name is returned // Keep as is? -func (c *Cache) loadFile(path string, opts *file.Options) (f string, err error) { return "", nil } +func (c *SQLite) loadFile(path string, opts *file.Options) (f string, err error) { return "", nil } // filePath returns a filePath consist of all unique part // after the path set in the cache // may not be needed if we store in a DB -func (c *Cache) filePath(p string) (s string) { return "" } - -// Close the cache -// the chanel is used to force a wait until all routines are done -func (c *Cache) Close() error { - // close(c.done) - return nil -} +func (c *SQLite) filePath(p string) (s string) { return "" } diff --git a/apps/flowlord/handler.go b/apps/flowlord/handler.go index c9e0671..8dbe82b 100644 --- a/apps/flowlord/handler.go +++ b/apps/flowlord/handler.go @@ -53,7 +53,7 @@ var staticPath = "/static" func (tm *taskMaster) StartHandler() { router := chi.NewRouter() - + // Static file serving - serve embedded static files // Create a sub-filesystem that strips the "handler/" prefix staticFS, err := fs.Sub(StaticFiles, "handler/static") @@ -61,7 +61,7 @@ func (tm *taskMaster) StartHandler() { log.Fatal("Failed to create static filesystem:", err) } router.Handle("/static/*", http.StripPrefix("/static/", http.FileServer(http.FS(staticFS)))) - + router.Get("/", tm.Info) router.Get("/info", tm.Info) router.Get("/refresh", tm.refreshHandler) @@ -202,7 +202,7 @@ func (tm *taskMaster) Info(w http.ResponseWriter, r *http.Request) { } children := tm.getAllChildren(v.Topic(), f, v.Job()) - // todo: remove children from Cache + // todo: remove children from SQLite if _, found := sts.Workflow[f]; !found { sts.Workflow[f] = make(map[string]cEntry) } @@ -244,7 +244,7 @@ func (tm *taskMaster) refreshHandler(w http.ResponseWriter, _ *http.Request) { func (tm *taskMaster) taskHandler(w http.ResponseWriter, r *http.Request) { id := chi.URLParam(r, "id") - v := tm.taskCache.Get(id) + v := tm.taskCache.GetTask(id) b, _ := json.Marshal(v) w.Header().Add("Content-Type", "application/json") w.Write(b) diff --git a/apps/flowlord/handler/about.tmpl b/apps/flowlord/handler/about.tmpl index c3e513b..bb7d397 100644 --- a/apps/flowlord/handler/about.tmpl +++ b/apps/flowlord/handler/about.tmpl @@ -81,7 +81,7 @@ {{.RowCount}} {{.SizeBytes}} {{.SizeHuman}} - {{.Percentage}}% + {{printf "%.1f" .Percentage}}% {{end}} diff --git a/apps/flowlord/main.go b/apps/flowlord/main.go index ba7a64d..8355b61 100644 --- a/apps/flowlord/main.go +++ b/apps/flowlord/main.go @@ -13,6 +13,7 @@ import ( "github.com/pcelvng/task/bus" tools "github.com/pcelvng/task-tools" + "github.com/pcelvng/task-tools/apps/flowlord/cache" "github.com/pcelvng/task-tools/file" ) @@ -32,10 +33,10 @@ Field | Field name | Allowed values | Allowed special characters ) type options struct { - Workflow string `toml:"workflow" comment:"path to workflow file or directory"` - Refresh time.Duration `toml:"refresh" comment:"the workflow changes refresh duration value default is 15 min"` - TaskTTL time.Duration `toml:"task-ttl" comment:"time that tasks are expected to have completed in. This values tells the cache how long to keep track of items and alerts if items haven't completed when the cache is cleared"` - DBPath string `toml:"db-path" comment:"path to the sqlite DB file"` + Workflow string `toml:"workflow" comment:"path to workflow file or directory"` + Refresh time.Duration `toml:"refresh" comment:"the workflow changes refresh duration value default is 15 min"` + //TaskTTL time.Duration `toml:"task-ttl" comment:"time that tasks are expected to have completed in. This values tells the cache how long to keep track of items and alerts if items haven't completed when the cache is cleared"` + //DBPath string `toml:"db-path" comment:"path to the sqlite DB file"` DoneTopic string `toml:"done_topic" comment:"default is done"` FileTopic string `toml:"file_topic" comment:"file topic for file watching"` FailedTopic string `toml:"failed_topic" comment:"all retry failures published to this topic default is retry-failed, disable with '-'"` @@ -44,6 +45,8 @@ type options struct { Slack *Notification `toml:"slack"` Bus bus.Options `toml:"bus"` File *file.Options `toml:"file"` + + DB *cache.SQLite `toml:"sqlite"` } func main() { @@ -51,13 +54,15 @@ func main() { opts := &options{ Refresh: time.Minute * 15, - TaskTTL: 4 * time.Hour, DoneTopic: "done", Host: "localhost", - DBPath: "./tasks.db", FailedTopic: "retry-failed", File: file.NewOptions(), Slack: &Notification{}, + DB: &cache.SQLite{ + TaskTTL: 4 * time.Hour, + LocalPath: "./tasks.db", + }, } config.New(opts).Version(tools.String()).Description(description).LoadOrDie() diff --git a/apps/flowlord/sqlite_spec.md b/apps/flowlord/sqlite_spec.md index 16b0657..aa2b7c5 100644 --- a/apps/flowlord/sqlite_spec.md +++ b/apps/flowlord/sqlite_spec.md @@ -1,97 +1,57 @@ # SQLite Technical Specification Purpose: Technical specification for converting flowlord to fully utilize SQLite for troubleshooting, historical records and configuration management. +## Current Implementation Status + +**✅ COMPLETED:** +- Task Records: Single table with composite primary key implemented +- Alert Records: Simplified schema implemented and integrated +- File Messages: File processing history tracking implemented +- Web Dashboard: Alert, Files, and Task dashboards implemented +- Cache Integration: SQLite cache fully integrated into taskmaster + +**❌ NOT IMPLEMENTED:** +- Database Maintenance: Simple configuration-driven backup and retention system + ## Database Schema Design ### Alert Records Store individual alert records immediately when tasks are sent to the alert channel. Replace file-based reporting with database storage. +**CURRENT IMPLEMENTATION:** ```sql -CREATE TABLE alert_records ( +CREATE TABLE IF NOT EXISTS alert_records ( id INTEGER PRIMARY KEY AUTOINCREMENT, - task_id TEXT NOT NULL, - alert_type TEXT NOT NULL, -- 'retry_failed', 'alerted', 'unfinished', 'job_send_failed' + task_id TEXT, -- task ID (can be empty for job send failures) + task_time TIMESTAMP, -- task time (can be empty) task_type TEXT NOT NULL, -- task type for quick filtering job TEXT, -- task job for quick filtering - msg TEXT, -- alert message (can be task_msg or custom alert message) - created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - task_created TIMESTAMP -- keep for alert timeline context + msg TEXT NOT NULL, -- alert message (contains alert context) + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ); -CREATE INDEX idx_alert_records_created_at ON alert_records (created_at); - --- Example queries for alert management - --- Startup check and day-based dashboard - get today's alerts -SELECT * FROM alert_records -WHERE date(created_at) = date('now') -ORDER BY created_at DESC; - --- Get alerts for specific date (for dashboard) -SELECT * FROM alert_records -WHERE date(created_at) = ? -ORDER BY created_at DESC; - --- Get full task details for an alert (when needed) -SELECT ar.*, t.* FROM alert_records ar -JOIN task_log t ON ar.task_id = t.id -WHERE ar.id = ?; - --- Raw data for compact summary (grouping done in Go) --- This provides the data for: "task.file-check: 3 2025/09/19T11-2025/09/19T13" -SELECT task_type, job, task_created, created_at -FROM alert_records -WHERE date(created_at) = date('now') -ORDER BY task_type, job, created_at; +CREATE INDEX IF NOT EXISTS idx_alert_records_created_at ON alert_records (created_at); ``` -### Workflow Phase Storage -Store loaded workflow files and phase configurations for dependency mapping and validation. - +**ORIGINAL SPECIFICATION (NOT IMPLEMENTED):** ```sql -CREATE TABLE workflow_files ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - file_path TEXT UNIQUE NOT NULL, - file_hash TEXT NOT NULL, - loaded_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - last_modified TIMESTAMP, - is_active BOOLEAN DEFAULT TRUE -); - -CREATE TABLE workflow_phases ( +CREATE TABLE alert_records ( id INTEGER PRIMARY KEY AUTOINCREMENT, - workflow_file_id INTEGER NOT NULL, - task_name TEXT NOT NULL, - job_name TEXT, - depends_on TEXT, - rule TEXT, - template TEXT, - retry_count INTEGER DEFAULT 0, + task_id TEXT NOT NULL, + alert_type TEXT NOT NULL, -- 'retry_failed', 'alerted', 'unfinished', 'job_send_failed' + task_type TEXT NOT NULL, -- task type for quick filtering + job TEXT, -- task job for quick filtering + msg TEXT, -- alert message (can be task_msg or custom alert message) created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - FOREIGN KEY (workflow_file_id) REFERENCES workflow_files(id), - UNIQUE(workflow_file_id, task_name, job_name) -); - -CREATE TABLE workflow_dependencies ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - parent_phase_id INTEGER NOT NULL, - child_phase_id INTEGER NOT NULL, - dependency_type TEXT DEFAULT 'direct', -- 'direct', 'conditional' - FOREIGN KEY (parent_phase_id) REFERENCES workflow_phases(id), - FOREIGN KEY (child_phase_id) REFERENCES workflow_phases(id), - UNIQUE(parent_phase_id, child_phase_id) + task_created TIMESTAMP -- keep for alert timeline context ); - -CREATE INDEX idx_workflow_phases_task ON workflow_phases (task_name, job_name); -CREATE INDEX idx_workflow_dependencies_parent ON workflow_dependencies (parent_phase_id); -CREATE INDEX idx_workflow_dependencies_child ON workflow_dependencies (child_phase_id); ``` ### File Topic Message History -Log every message that comes through the files topic with pattern matching results. +**✅ IMPLEMENTED** - File processing history tracking with pattern matching results. ```sql -CREATE TABLE file_messages ( +CREATE TABLE IF NOT EXISTS file_messages ( id INTEGER PRIMARY KEY AUTOINCREMENT, path TEXT NOT NULL, -- File path (e.g., "gs://bucket/path/file.json") size INTEGER, -- File size in bytes @@ -103,8 +63,8 @@ CREATE TABLE file_messages ( ); -- Indexes for efficient querying -CREATE INDEX idx_file_messages_path ON file_messages (path); -CREATE INDEX idx_file_messages_received ON file_messages (received_at); +CREATE INDEX IF NOT EXISTS idx_file_messages_path ON file_messages (path); +CREATE INDEX IF NOT EXISTS idx_file_messages_received ON file_messages (received_at); -- Example queries for file message history @@ -223,15 +183,11 @@ ORDER BY file_count DESC; ``` ### Enhanced Task Recording -Replace dual-table system with single table for simplified task tracking. +**✅ IMPLEMENTED** - Single table system for simplified task tracking. ```sql --- Remove existing tables -DROP TABLE IF EXISTS events; -DROP TABLE IF EXISTS task_log; - -- Single table for all task records -CREATE TABLE task_records ( +CREATE TABLE IF NOT EXISTS task_records ( id TEXT, type TEXT, job TEXT, @@ -246,11 +202,11 @@ CREATE TABLE task_records ( ); -- Indexes for efficient querying -CREATE INDEX idx_task_records_type ON task_records (type); -CREATE INDEX idx_task_records_job ON task_records (job); -CREATE INDEX idx_task_records_created ON task_records (created); -CREATE INDEX idx_task_records_type_job ON task_records (type, job); -CREATE INDEX idx_task_records_date_range ON task_records (created, ended); +CREATE INDEX IF NOT EXISTS idx_task_records_type ON task_records (type); +CREATE INDEX IF NOT EXISTS idx_task_records_job ON task_records (job); +CREATE INDEX IF NOT EXISTS idx_task_records_created ON task_records (created); +CREATE INDEX IF NOT EXISTS idx_task_records_type_job ON task_records (type, job); +CREATE INDEX IF NOT EXISTS idx_task_records_date_range ON task_records (created, ended); -- Create a view that calculates task and queue times CREATE VIEW IF NOT EXISTS tasks AS @@ -307,432 +263,204 @@ WHERE created BETWEEN ? AND ?; - Log conflicts on task creation (unexpected duplicates) for monitoring - Maintain existing Cache interface for backward compatibility -### Workflow and Phase -This replaces the in-memory workflow.Cache system with a persistent SQLite-based approach while maintaining the exact same interface and naming conventions. - -#### Updated Schema Design -```sql --- Workflow file tracking (replaces in-memory workflow file cache) -CREATE TABLE workflow_files ( - file_path TEXT PRIMARY KEY, - file_hash TEXT NOT NULL, - loaded_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - last_modified TIMESTAMP, - is_active BOOLEAN DEFAULT TRUE -); - --- Workflow phases (matches Phase struct exactly) -CREATE TABLE workflow_phases ( - workflow_file_path TEXT NOT NULL, - task TEXT NOT NULL, -- topic:job format (e.g., "data-load:hourly") - depends_on TEXT, - rule TEXT, -- URI query parameters (e.g., "cron=0 0 * * *&offset=1h") - template TEXT, - retry INTEGER DEFAULT 0, -- threshold of times to retry - status TEXT, -- phase status info (warnings, errors, validation messages) - PRIMARY KEY (workflow_file_path, task), -); - --- Task relationships are generated dynamically from workflow_phases --- No separate table needed - relationships are derived from depends_on field --- This approach is simpler, more maintainable, and always up-to-date - --- Indexes for performance -CREATE INDEX idx_workflow_phases_task ON workflow_phases (task); -CREATE INDEX idx_workflow_phases_depends_on ON workflow_phases (depends_on); -CREATE INDEX idx_workflow_phases_status ON workflow_phases (status); -``` - -#### Benefits of Human-Readable Keys +## Database Maintenance Plan -**1. Direct File Path References** -- `workflow_files.file_path` is the primary key (e.g., "workflows/data-load.toml") -- No need to join tables to see which file a phase belongs to -- Easy to identify and debug workflow issues +### Simple Configuration-Driven Approach -**2. Composite Primary Keys for Phases** -- `(workflow_file_path, task_name, job_name)` uniquely identifies each phase -- Directly readable: `("workflows/data-load.toml", "data-load", "hourly")` -- No surrogate IDs to remember or map +The database maintenance system will be kept as simple as possible with all configuration defined in the Options struct at startup. No configuration values will be stored in the database except for the active migration script version. -**3. Dynamic Task Relationships** -- Task relationships are generated from `depends_on` field in workflow_phases -- No separate table to maintain or keep in sync -- Always up-to-date with current workflow configuration -- Simpler schema with fewer tables and foreign keys +**Important Note**: Database maintenance is non-critical to the application's core functionality. The system is designed to handle complete database loss gracefully and can start fresh without any issues. The database serves as a convenience for troubleshooting and historical records, but the application will continue to function normally even if the entire database is deleted and recreated. -**4. Phase Status Tracking** -- `status` field stores validation messages, warnings, and errors for each phase -- Replaces console logging with persistent database storage -- Enables querying and filtering phases by status -- Provides better debugging and monitoring capabilities - -**Status Field Usage Examples:** -- `"invalid phase: rule and dependsOn are blank"` -- `"no valid rule found: cron=invalid"` -- `"parent task not found: data-load"` -- `"ignored rule: cron=0 0 * * *"` -- `"warning: retry count exceeds recommended limit"` -- `""` (empty string for phases with no issues) - -**Example Queries (Much More Readable):** - -```sql --- Find all phases in a specific workflow file -SELECT task, depends_on, rule, status -FROM workflow_phases -WHERE workflow_file_path = 'workflows/data-load.toml'; - --- Find phases that depend on a specific task type -SELECT workflow_file_path, task, rule, status -FROM workflow_phases -WHERE depends_on = 'data-load'; - --- Find phases by topic (using LIKE for topic:job matching) -SELECT workflow_file_path, task, depends_on, rule, status -FROM workflow_phases -WHERE task LIKE 'data-load:%'; - --- Find phases with warnings or errors -SELECT workflow_file_path, task, status -FROM workflow_phases -WHERE status IS NOT NULL AND status != ''; - --- Find phases with specific status messages -SELECT workflow_file_path, task, status -FROM workflow_phases -WHERE status LIKE '%warning%' OR status LIKE '%error%'; - --- Generate task relationships dynamically (parent -> child) -SELECT - parent.depends_on as parent_task, - parent.task as child_task, - parent.workflow_file_path, - parent.rule as child_rule, - parent.status as child_status -FROM workflow_phases parent -WHERE parent.depends_on IS NOT NULL AND parent.depends_on != ''; - --- Find all children of a specific task -SELECT - child.task as child_task, - child.workflow_file_path, - child.rule as child_rule, - child.status as child_status -FROM workflow_phases child -WHERE child.depends_on = 'data-load'; - --- Find all parents of a specific task -SELECT - parent.depends_on as parent_task, - parent.workflow_file_path, - parent.rule as parent_rule, - parent.status as parent_status -FROM workflow_phases parent -WHERE parent.task = 'data-load:hourly'; - --- Get workflow file info with phase count and status summary -SELECT - wf.file_path, - wf.file_hash, - wf.loaded_at, - COUNT(wp.task) as phase_count, - COUNT(CASE WHEN wp.status IS NOT NULL AND wp.status != '' THEN 1 END) as phases_with_status -FROM workflow_files wf -LEFT JOIN workflow_phases wp ON wf.file_path = wp.workflow_file_path -GROUP BY wf.file_path; -``` - -#### Maintained Interface Design -The new SQLite-based implementation will maintain the exact same interface as the current `workflow.Cache`: +### Options Struct Configuration ```go -// Keep existing workflow.Cache interface unchanged -type Cache interface { - // Existing methods remain exactly the same - Search(task, job string) (path string, ph Phase) - Get(t task.Task) Phase - Children(t task.Task) []Phase - Refresh() (changedFiles []string, err error) - IsDir() bool - Close() error -} - -// Keep existing Phase struct unchanged -type Phase struct { - Task string // Should use Topic() and Job() for access - Rule string - DependsOn string // Task that the previous workflow depends on - Retry int - Template string // template used to create the task -} - -// Keep existing Workflow struct unchanged -type Workflow struct { - Checksum string // md5 hash for the file to check for changes - Phases []Phase `toml:"phase"` -} -``` - -#### Implementation Strategy -- **Same Package**: Keep everything in `workflow` package -- **Same Structs**: Maintain `Phase` and `Workflow` structs exactly as they are -- **Same Methods**: All existing methods return the same types and behavior -- **SQLite Backend**: Replace in-memory storage with SQLite persistence -- **Zero Breaking Changes**: All existing unit tests continue to work unchanged - -#### Key Benefits -1. **Persistence**: Workflow configurations survive restarts -2. **Historical Tracking**: Full audit trail of task relationships -3. **Performance**: Indexed queries for fast dependency resolution -4. **Scalability**: No memory limitations for large workflow sets -5. **Debugging**: Rich querying capabilities for troubleshooting -6. **Simplified Architecture**: Single SQLite instance replaces in-memory cache - -#### Implementation Plan - -**Phase 1: Update Workflow Package** -1. Modify existing `workflow.Cache` to use SQLite backend -2. Keep all existing interfaces, structs, and method signatures unchanged -3. Add SQLite persistence to workflow file loading -4. Implement task relationship tracking within existing structure - -**Phase 2: Update Flowlord Integration** -1. No changes needed to `taskmaster.go` - same interface -2. Update workflow loading to use SQLite persistence -3. Add task relationship recording to existing task processing -4. Maintain all existing method calls and behavior - -**Phase 3: Handler Updates** -1. Update handlers to query SQLite for workflow data -2. Add task relationship queries to existing endpoints -3. Enhance alert system with SQLite-based data -4. Maintain existing response formats - -**Phase 4: Testing and Validation** -1. All existing unit tests continue to work unchanged -2. Add SQLite-specific integration tests -3. Performance testing for SQLite queries -4. Migration testing from existing workflow files - -#### Migration Strategy - -**Seamless Replacement Approach:** -- Keep existing `workflow.Cache` interface and structs -- Replace in-memory storage with SQLite persistence -- Zero breaking changes to existing code -- All unit tests continue to work without modification - -**Key Implementation Details:** - -```go -// Keep existing Cache struct, add SQLite backend -type Cache struct { - db *sql.DB - path string - isDir bool - fOpts file.Options - mutex sync.RWMutex - // Remove: Workflows map[string]Workflow -} - -// Keep existing methods with SQLite implementation using simplified schema -func (c *Cache) Search(task, job string) (path string, ph Phase) { - // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry, status - // FROM workflow_phases WHERE task = ? OR task LIKE ? - // (where ? is either exact match or topic:job format) - // Return same results as before, with status info available -} - -func (c *Cache) Get(t task.Task) Phase { - // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry, status - // FROM workflow_phases WHERE task = ? OR task LIKE ? - // (where ? is either exact match or topic:job format) - // Return same Phase struct with status info -} - -func (c *Cache) Children(t task.Task) []Phase { - // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry, status - // FROM workflow_phases WHERE depends_on = ? OR depends_on LIKE ? - // (where ? matches the task type or topic:job format) - // Return same []Phase slice with status info -} - -func (c *Cache) Refresh() (changedFiles []string, err error) { - // Check file hashes against workflow_files table using file_path as key - // Load changed files into SQLite using file_path as primary key - // Return same changedFiles list -} - -// Dynamic task relationship queries (no separate table needed) -func (c *Cache) GetTaskRelationships(parentTask string) ([]Phase, error) { - // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry - // FROM workflow_phases WHERE depends_on = ? - // Returns all phases that depend on the parent task -} - -func (c *Cache) GetTaskDependencies(childTask string) ([]Phase, error) { - // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry - // FROM workflow_phases WHERE task = ? - // Returns the phase that defines the child task and its dependencies +type Options struct { + LocalPath string + BackupPath string + Retention time.Duration // 90 days } ``` -**Database Schema Migration:** -- Add new tables alongside existing ones -- Migrate existing workflow data if needed -- Remove old tables after successful migration -- Maintain data integrity throughout process +### Database Initialization Logic -#### Required Code Changes +The SQLite database will be initialized using `sqlite.Options` with an `Open() error` method instead of `NewSQLite`. The initialization process will: -**1. Package Imports** -```go -// Keep existing import - no changes needed -import "github.com/pcelvng/task-tools/workflow" -``` +1. **Check for existing backup**: Use `file.Stat()` to check if a backup file exists at `BackupPath` +2. **Compare file dates**: Compare the modification dates of the local database file vs the backup file using `stat.Stats.ModTime` +3. **Use latest version**: Initialize with the most recent database file (local or backup) +4. **Fallback to local**: If no backup exists or backup is older, use the local database file -**2. TaskMaster Struct Update** ```go -// Keep existing struct - no changes needed -type taskMaster struct { - // ... other fields - *workflow.Cache // Same as before - // ... other fields +// Database initialization logic +func (opts *Options) Open() error { + // Check if backup exists using file.Stat + backupStats, backupErr := file.Stat(opts.BackupPath, nil) + localStats, localErr := file.Stat(opts.LocalPath, nil) + + // If backup exists and local doesn't, copy backup to local + if backupErr == nil && localErr != nil { + return opts.copyBackupToLocal() + } + + // If both exist, compare modification times + if backupErr == nil && localErr == nil { + if backupStats.ModTime.After(localStats.ModTime) { + // Backup is newer, copy backup to local + return opts.copyBackupToLocal() + } + } + + // Use local database (either no backup or local is newer) + return opts.openLocal() } -``` -**3. Workflow Loading Changes** -```go -// Keep existing code - no changes needed -if tm.Cache, err = workflow.New(tm.path, tm.fOpts); err != nil { - return fmt.Errorf("workflow setup %w", err) +// copyBackupToLocal copies the backup file to the local path +func (opts *Options) copyBackupToLocal() error { + // Open backup file for reading + backupReader, err := file.NewReader(opts.BackupPath, nil) + if err != nil { + return fmt.Errorf("failed to open backup file: %w", err) + } + defer backupReader.Close() + + // Create local file for writing + localWriter, err := file.NewWriter(opts.LocalPath, nil) + if err != nil { + return fmt.Errorf("failed to create local file: %w", err) + } + defer localWriter.Close() + + // Copy backup to local + _, err = io.Copy(localWriter, backupReader) + if err != nil { + return fmt.Errorf("failed to copy backup to local: %w", err) + } + + // Open the local database + return opts.openLocal() } ``` -**4. Dependency Resolution Updates** -```go -// Keep existing code - no changes needed -phase := tm.Cache.Get(task) -children := tm.Cache.Children(task) -``` +### Default Configuration -**5. Handler Updates** -```go -// Keep existing code - no changes needed -// All existing method calls work the same -// Only internal implementation changes to use SQLite -``` - -## Database Maintenance - -### Backup and Restoration -```sql --- Create metadata table for backup tracking -CREATE TABLE backup_metadata ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - backup_type TEXT NOT NULL, -- 'scheduled', 'shutdown', 'manual' - backup_path TEXT NOT NULL, - backup_size INTEGER, - created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - verified BOOLEAN DEFAULT FALSE, - gcs_path TEXT -); +```toml +# Database Maintenance Configuration +local_path = "./tasks.db" +backup_path = "./backups/tasks.db" +retention = "2160h" # 90 days ``` -**Implementation Requirements**: -- Automated GCS backup during application shutdown -- Periodic backup scheduling (configurable intervals) -- Restoration logic comparing local vs GCS timestamps -- Database schema migration support -- Backup verification and integrity checks +### Migration Version Tracking -### Retention and Size Management ```sql --- Create retention policies table -CREATE TABLE retention_policies ( - table_name TEXT PRIMARY KEY, - retention_days INTEGER NOT NULL, - max_records INTEGER, - cleanup_enabled BOOLEAN DEFAULT TRUE, - last_cleanup TIMESTAMP +-- Simple migration version tracking (only config value stored in DB) +CREATE TABLE IF NOT EXISTS schema_migrations ( + version INTEGER PRIMARY KEY, + applied_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ); - --- Default retention policies -INSERT INTO retention_policies (table_name, retention_days, max_records) VALUES -('alert_records', 90, 100000), -('file_messages', 30, 50000), -('file_pattern_matches', 30, 50000), -('task_execution_log', 7, 500000), -('task_relationships', 7, 100000); ``` -## API Endpoints Specification +### Implementation Features -### Required REST Endpoints +**Backup System:** +- Local backup creation at application start and stop +- Automatic copying of backup to local if backup is newer than local database +- Backup verification and integrity checks +- Simple file-based backup management + +**Data Retention:** +- Single retention period applies to all tables (90 days default) +- Automatic cleanup based on age (retention duration) +- Graceful handling of incomplete tasks during cleanup +- Startup maintenance checks and cleanup + +**Startup Maintenance:** +- Check for existing backup and compare file dates +- Copy backup to local if it's newer than local database +- Migration version checking and application +- Database integrity verification +- Cleanup of expired data based on retention period + +### Maintenance Operations + +**Startup Operations:** +1. Check for backup file existence using file.Stat +2. Compare local vs backup file modification dates +3. Copy backup to local if backup is newer +4. Check and apply any pending schema migrations +5. Verify database integrity +6. Run cleanup based on retention period +7. Create initial backup + +**Runtime Operations:** +1. Periodic cleanup based on retention period + +**Shutdown Operations:** +1. Create final backup before shutdown +2. Verify backup integrity + +### Benefits of This Approach + +- **Simple Configuration**: All settings in Options struct, no database config storage +- **Minimal Database Schema**: Only migration version tracking in database +- **Flexible**: Easy to adjust policies without database changes +- **Non-Critical**: Database loss is not catastrophic - application continues normally +- **Self-Healing**: System can start fresh and rebuild database as needed +- **Maintainable**: Clear separation of configuration and data +- **Scalable**: Configurable limits prevent unbounded growth + +## Current Web Dashboard Implementation + +**✅ IMPLEMENTED** - Basic web dashboards for monitoring and troubleshooting. + +### Available Endpoints ``` -GET /api/metrics - Database size and performance metrics -GET /api/tasks - Task search and filtering -POST /api/tasks/search - Advanced task search -GET /api/alerts - Alert history and management -GET /api/files - File processing history -GET /api/workflows - Workflow configuration and status -GET /api/summary - Dashboard summary data -DELETE /api/cleanup - Manual cleanup operations -POST /api/backup - Manual backup trigger +GET /web/alert?date=YYYY-MM-DD - Alert dashboard for specific date +GET /web/files?date=YYYY-MM-DD - File processing dashboard for specific date +GET /web/task?date=YYYY-MM-DD - Task summary dashboard for specific date +GET /web/about - About page with system information +GET /info - JSON status information +GET /refresh - Manual workflow refresh trigger ``` -### Response Formats -```json -// GET /api/summary response -{ - "time_period": "24h", - "task_summary": { - "total": 1500, - "completed": 1200, - "failed": 50, - "in_progress": 250 - }, - "by_type": { - "data-load": {"completed": 400, "failed": 10, "avg_duration": "2m30s"}, - "transform": {"completed": 800, "failed": 40, "avg_duration": "5m15s"} - } -} +### Dashboard Features +- **Alert Dashboard**: Shows alerts grouped by task type with time ranges +- **Files Dashboard**: Displays file processing history with pattern matching results +- **Task Dashboard**: Shows task execution summary with filtering by type, job, result +- **Date Navigation**: All dashboards support date parameter for historical viewing +- **Responsive Design**: Basic HTML/CSS with embedded static files -// GET /api/alerts response -{ - "alerts": [ - { - "id": 123, - "task_id": "abc-123", - "alert_type": "retry_exhausted", - "severity": "critical", - "message": "Task failed after 3 retries", - "created_at": "2023-12-01T10:30:00Z", - "dashboard_link": "/dashboard/alerts/2023-12-01" - } - ], - "pagination": {"page": 1, "total": 150} -} -``` -## UI Component Specifications +## Current UI Implementation -### Summary Status Dashboard -- Time-based filtering (hour/day/week/month) -- Task breakdown by type and job with completion statistics -- Average execution time calculations and trends -- Error rate visualization and alerting thresholds +**✅ IMPLEMENTED** - Basic web dashboards with HTML templates and embedded static files. ### Alert Management Interface -#### Alert Dashboard Design -Replace the current file-based alert reporting with a comprehensive web dashboard for alert management and analysis. +#### Current Alert Dashboard Implementation +**✅ IMPLEMENTED** - Web-based alert dashboard with compact summary view. -#### Dashboard Components +**Current Features:** +- **Alert Summary View**: Compact grouped display by task type with time ranges +- **Date Navigation**: URL parameter `?date=YYYY-MM-DD` for historical viewing +- **HTML Templates**: Server-side rendering using embedded Go templates +- **Static Assets**: Embedded CSS and JavaScript files + +**Current Implementation:** +```go +// GET /web/alert?date=YYYY-MM-DD +func (tm *taskMaster) htmlAlert(w http.ResponseWriter, r *http.Request) { + dt, _ := time.Parse("2006-01-02", r.URL.Query().Get("date")) + if dt.IsZero() { + dt = time.Now() + } + alerts, err := tm.taskCache.GetAlertsByDate(dt) + // ... render HTML template +} +``` -**1. Alert Summary View (Compact)** +**Current Alert Summary Format:** ``` Flowlord Alerts - Today (2025/09/19) ════════════════════════════════════════════════════════════ @@ -743,140 +471,22 @@ task.data-load: 1 2025/09/19T14 Total: 13 alerts across 3 task types ``` -**Implementation:** -- Use simple SQL: `SELECT * FROM alert_records WHERE date(created_at) = date('now')` -- Process in Go to create compact summary (replaces current Slack formatting logic) -- Group by `task_type:job`, count occurrences, calculate time ranges -- Display both count and time span (first alert → last alert) - -**2. Detailed Alert Timeline** -- Chronological list of all alerts for the selected day -- Expandable rows showing full task details (via JOIN with task_log) -- Click-through to individual task execution details -- Color coding by alert_type: retry_exhausted (red), alert_result (orange), unfinished (yellow) - -**3. Alert Filtering and Navigation** -- Date picker for viewing historical alerts -- Filter by alert_type, task_type, job -- Quick links: Today, Yesterday, Last 7 days -- Search by task_id or message content - -#### Dashboard Implementation - -**Simple HTML Rendering Approach:** -- Go queries SQLite directly and renders HTML via templates -- No JSON APIs needed for dashboard UI -- Vanilla JavaScript only for basic interactions (date picker, auto-refresh) -- Leverage existing `handler/alert.tmpl` pattern - -**Page Structure:** -```html -/alerts - Today's alert dashboard (default) -/alerts/2025-09-19 - Specific date dashboard -/alerts/summary - Compact summary view only -``` - -**Implementation Pattern:** -```go -func (tm *taskMaster) alertDashboard(w http.ResponseWriter, r *http.Request) { - date := chi.URLParam(r, "date") // or default to today - alerts := tm.taskCache.GetAlertsByDate(date) // single query - summary := buildCompactSummary(alerts) // process in memory, same logic as current Slack formatting - - data := struct { - Date string - Alerts []AlertRecord - Summary []SummaryLine // processed summary data - Total int - }{date, alerts, summary, len(alerts)} - - tmpl.Execute(w, data) // render HTML directly -} - -// Process alerts in memory to create compact summary -func buildCompactSummary(alerts []AlertRecord) []SummaryLine { - groups := make(map[string]*SummaryLine) - for _, alert := range alerts { - key := alert.TaskType + ":" + alert.Job - if summary, exists := groups[key]; exists { - summary.Count++ - summary.updateTimeRange(alert.TaskCreated) - } else { - groups[key] = &SummaryLine{ - Key: key, Count: 1, - FirstTime: alert.TaskCreated, - LastTime: alert.TaskCreated, - } - } - } - return mapToSlice(groups) // convert to sorted slice -} -``` - -**Optional API Endpoint (for troubleshooting only):** -``` -GET /api/alerts?date=YYYY-MM-DD&format=json - JSON output for debugging/scripts -``` +### File Processing Dashboard -**Features:** -- Server-side rendering for fast page loads -- Simple date navigation via URL parameters -- Auto-refresh via basic JavaScript setInterval -- Minimal client-side complexity +**✅ IMPLEMENTED** - File processing history with pattern matching results. -#### Startup Alert Integration +**Current Features:** +- **File History**: Shows all files processed with size, timestamps, and task matches +- **Pattern Matching**: Displays which tasks were created from each file +- **Date Filtering**: Historical file processing data by date +- **JSON Arrays**: Task IDs and names stored as JSON for efficient querying -**On Application Start:** -1. Query today's alerts: `SELECT * FROM alert_records WHERE date(created_at) = date('now')` -2. If alerts found, generate compact summary in existing Slack format -3. Send single startup notification: "Found X alerts from today" + summary + dashboard link -4. Dashboard link: `/alerts` (always points to today) +### Task Summary Dashboard -**Benefits:** -- Replaces file-based alert reports entirely -- Real-time alert visibility via web dashboard -- Maintains familiar compact summary format for Slack -- Historical alert analysis and trending -- Better debugging with full task context -- Mobile-friendly alert monitoring +**✅ IMPLEMENTED** - Task execution summary with filtering capabilities. -### File Processing Dashboard -- Searchable file processing history with pattern match results -- File processing timeline and status indicators -- Pattern rule debugging and performance metrics -- File processing success/failure analytics - -### Workflow Visualization -- Interactive dependency graphs showing phase relationships -- Phase configuration validation and issue highlighting -- Next scheduled execution times and cron schedules -- Workflow performance analytics and bottleneck identification - -### Task Search and Management -- Advanced search by type, job, result, uniqueID, time range -- Task lifecycle tracking and execution history -- Retry and failure analysis with root cause identification -- Bulk operations for task management - -## Technical Architecture - -### Database Design Principles -- Single SQLite file for simplicity and performance -- Optimized indexes for common query patterns -- Prepared statements for security and performance -- Batch operations for high-throughput scenarios -- Foreign key constraints for data integrity - -### Workflow Architecture Strategy -- Complete replacement of in-memory storage with SQLite persistence -- Maintain exact same workflow.Cache interface and behavior -- Simplified architecture with single SQLite instance -- Enhanced functionality with persistent task relationship tracking -- Zero breaking changes - all existing code continues to work - -### Performance Considerations -- Connection pooling and prepared statement caching -- Asynchronous operations for non-critical writes -- Query optimization with proper indexing strategy -- WAL mode for better concurrent access -- Vacuum and analyze operations for maintenance \ No newline at end of file +**Current Features:** +- **Task Filtering**: Filter by type, job, result status +- **Time Calculations**: Task duration and queue time calculations via SQL view +- **Date Range**: View tasks for specific dates +- **Execution History**: Complete task lifecycle tracking \ No newline at end of file diff --git a/apps/flowlord/taskmaster.go b/apps/flowlord/taskmaster.go index ab517fb..c652dca 100644 --- a/apps/flowlord/taskmaster.go +++ b/apps/flowlord/taskmaster.go @@ -98,15 +98,14 @@ func New(opts *options) *taskMaster { if opts.Slack.MaxFrequency <= opts.Slack.MinFrequency { opts.Slack.MaxFrequency = 16 * opts.Slack.MinFrequency } - db, err := cache.NewSQLite(opts.TaskTTL, opts.DBPath) - if err != nil { + if err := opts.DB.Open(*opts.File); err != nil { log.Fatal("db init", err) } opts.Slack.file = opts.File tm := &taskMaster{ initTime: time.Now(), - taskCache: db, + taskCache: opts.DB, path: opts.Workflow, doneTopic: opts.DoneTopic, failedTopic: opts.FailedTopic, @@ -206,8 +205,7 @@ func (tm *taskMaster) Run(ctx context.Context) (err error) { go tm.handleNotifications(tm.alerts, ctx) <-ctx.Done() log.Println("shutting down") - return nil - + return tm.taskCache.Close() } func validatePhase(p workflow.Phase) string { @@ -473,10 +471,10 @@ func (tm *taskMaster) handleNotifications(taskChan chan task.Task, ctx context.C dur := tm.slack.MinFrequency for ; ; time.Sleep(dur) { var err error - + // Check for incomplete tasks and add them to alerts tm.taskCache.CheckIncompleteTasks() - + // Get all alerts (including newly added incomplete task alerts) alerts, err = tm.taskCache.GetAlertsByDate(time.Now()) if err != nil { diff --git a/apps/utils/fz/main.go b/apps/utils/fz/main.go index 959d43d..ab89065 100644 --- a/apps/utils/fz/main.go +++ b/apps/utils/fz/main.go @@ -2,15 +2,17 @@ package main import ( "fmt" - "github.com/pcelvng/task-tools/slack" + "io" "log" "os" - "path/filepath" "strconv" "strings" "time" + "github.com/pcelvng/task-tools/slack" + "github.com/hydronica/go-config" + "github.com/pcelvng/task-tools/file" "github.com/pcelvng/task-tools/file/stat" ) @@ -42,6 +44,8 @@ func main() { err = ls(f1, &conf) case "cat": err = cat(f1, &conf) + case "stat": + err = stats(f1, &conf) case "cp": err = cp(f1, f2, &conf) case "slack": @@ -146,26 +150,26 @@ func cp(from, to string, opt *file.Options) error { if to == "" || from == "" { return fmt.Errorf(usage) } - sts, _ := file.Stat(to, opt) - if sts.IsDir { - _, fName := filepath.Split(from) - to = strings.TrimRight(to, "/") + "/" + fName - } r, err := file.NewReader(from, opt) if err != nil { - return fmt.Errorf("reader init for %s %w", from, err) + return fmt.Errorf("init reader err: %w", err) } w, err := file.NewWriter(to, opt) if err != nil { - return fmt.Errorf("writer init for %s %w", to, err) + return fmt.Errorf("init writer err: %w", err) } - - s := file.NewScanner(r) - for s.Scan() { - if err := w.WriteLine(s.Bytes()); err != nil { - w.Abort() - return fmt.Errorf("write error: %w", err) - } + _, err = io.Copy(w, r) + if err != nil { + return fmt.Errorf("copy err: %w", err) + } + if err := w.Close(); err != nil { + return fmt.Errorf("close writer err: %w", err) } - return w.Close() + return r.Close() +} + +func stats(path string, opt *file.Options) error { + sts, err := file.Stat(path, opt) + fmt.Println(sts.JSONString()) + return err } From 4b469e4f14c88598407f94a5a8fff543217a35f0 Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Wed, 8 Oct 2025 16:22:38 -0600 Subject: [PATCH 19/40] 1st draft of workflows in sqlite --- apps/flowlord/Workflow_plan.md | 54 +-- apps/flowlord/cache/schema.sql | 28 +- apps/flowlord/cache/sqlite.go | 16 +- apps/flowlord/cache/workflow.go | 600 ++++++++++++++++++++++++++++++-- apps/flowlord/handler.go | 19 +- apps/flowlord/taskmaster.go | 43 ++- 6 files changed, 685 insertions(+), 75 deletions(-) diff --git a/apps/flowlord/Workflow_plan.md b/apps/flowlord/Workflow_plan.md index bace238..c219c21 100644 --- a/apps/flowlord/Workflow_plan.md +++ b/apps/flowlord/Workflow_plan.md @@ -176,11 +176,12 @@ type Workflow struct { ## Implementation Strategy -- **Same Package**: Keep everything in `workflow` package +- **Same Package**: Keep everything in `cache` package (not workflow package) - **Same Structs**: Maintain `Phase` and `Workflow` structs exactly as they are - **Same Methods**: All existing methods return the same types and behavior -- **SQLite Backend**: Replace in-memory storage with SQLite persistence +- **SQLite Backend**: Add workflow methods directly to existing `SQLite` struct - **Zero Breaking Changes**: All existing unit tests continue to work unchanged +- **No Interface Switching**: Do not create new interfaces or compatibility layers ## Key Benefits @@ -228,55 +229,55 @@ type Workflow struct { ### Key Implementation Details ```go -// Keep existing Cache struct, add SQLite backend -type Cache struct { - db *sql.DB - path string - isDir bool +// Add workflow methods directly to existing SQLite struct +type SQLite struct { + LocalPath string + BackupPath string + TaskTTL time.Duration + Retention time.Duration + db *sql.DB fOpts file.Options - mutex sync.RWMutex - // Remove: Workflows map[string]Workflow + mu sync.Mutex + + // Add workflow-specific fields + workflowPath string + isDir bool } -// Keep existing methods with SQLite implementation using simplified schema -func (c *Cache) Search(task, job string) (path string, ph Phase) { +// Add workflow methods to existing SQLite struct +func (s *SQLite) Search(task, job string) (path string, ph Phase) { // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry, status // FROM workflow_phases WHERE task = ? OR task LIKE ? // (where ? is either exact match or topic:job format) // Return same results as before, with status info available } -func (c *Cache) Get(t task.Task) Phase { +func (s *SQLite) Get(t task.Task) Phase { // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry, status // FROM workflow_phases WHERE task = ? OR task LIKE ? // (where ? is either exact match or topic:job format) // Return same Phase struct with status info } -func (c *Cache) Children(t task.Task) []Phase { +func (s *SQLite) Children(t task.Task) []Phase { // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry, status // FROM workflow_phases WHERE depends_on = ? OR depends_on LIKE ? // (where ? matches the task type or topic:job format) // Return same []Phase slice with status info } -func (c *Cache) Refresh() (changedFiles []string, err error) { +func (s *SQLite) Refresh() (changedFiles []string, err error) { // Check file hashes against workflow_files table using file_path as key // Load changed files into SQLite using file_path as primary key // Return same changedFiles list } -// Dynamic task relationship queries (no separate table needed) -func (c *Cache) GetTaskRelationships(parentTask string) ([]Phase, error) { - // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry - // FROM workflow_phases WHERE depends_on = ? - // Returns all phases that depend on the parent task +func (s *SQLite) IsDir() bool { + return s.isDir } -func (c *Cache) GetTaskDependencies(childTask string) ([]Phase, error) { - // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry - // FROM workflow_phases WHERE task = ? - // Returns the phase that defines the child task and its dependencies +func (s *SQLite) Close() error { + // Existing close logic + any workflow cleanup } ``` @@ -306,10 +307,13 @@ type taskMaster struct { ### 3. Workflow Loading Changes ```go -// Keep existing code - no changes needed -if tm.Cache, err = workflow.New(tm.path, tm.fOpts); err != nil { +// Update to use SQLite struct directly instead of workflow.New() +// The SQLite struct will implement the workflow.Cache interface +if tm.Cache, err = tm.taskCache; err != nil { return fmt.Errorf("workflow setup %w", err) } +// Or assign directly since SQLite now implements the interface +tm.Cache = tm.taskCache ``` ### 4. Dependency Resolution Updates diff --git a/apps/flowlord/cache/schema.sql b/apps/flowlord/cache/schema.sql index 8a82d63..d8d25d4 100644 --- a/apps/flowlord/cache/schema.sql +++ b/apps/flowlord/cache/schema.sql @@ -80,4 +80,30 @@ CREATE TABLE IF NOT EXISTS file_messages ( -- Indexes for efficient querying CREATE INDEX IF NOT EXISTS idx_file_messages_path ON file_messages (path); -CREATE INDEX IF NOT EXISTS idx_file_messages_received ON file_messages (received_at); \ No newline at end of file +CREATE INDEX IF NOT EXISTS idx_file_messages_received ON file_messages (received_at); + +-- Workflow file tracking (replaces in-memory workflow file cache) +CREATE TABLE IF NOT EXISTS workflow_files ( + file_path TEXT PRIMARY KEY, + file_hash TEXT NOT NULL, + loaded_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + last_modified TIMESTAMP, + is_active BOOLEAN DEFAULT TRUE +); + +-- Workflow phases (matches Phase struct exactly) +CREATE TABLE IF NOT EXISTS workflow_phases ( + workflow_file_path TEXT NOT NULL, + task TEXT NOT NULL, -- topic:job format (e.g., "data-load:hourly") + depends_on TEXT, + rule TEXT, -- URI query parameters (e.g., "cron=0 0 * * *&offset=1h") + template TEXT, + retry INTEGER DEFAULT 0, -- threshold of times to retry + status TEXT, -- phase status info (warnings, errors, validation messages) + PRIMARY KEY (workflow_file_path, task) +); + +-- Indexes for performance +CREATE INDEX IF NOT EXISTS idx_workflow_phases_task ON workflow_phases (task); +CREATE INDEX IF NOT EXISTS idx_workflow_phases_depends_on ON workflow_phases (depends_on); +CREATE INDEX IF NOT EXISTS idx_workflow_phases_status ON workflow_phases (status); \ No newline at end of file diff --git a/apps/flowlord/cache/sqlite.go b/apps/flowlord/cache/sqlite.go index 9a52e36..c0f6140 100644 --- a/apps/flowlord/cache/sqlite.go +++ b/apps/flowlord/cache/sqlite.go @@ -37,12 +37,17 @@ type SQLite struct { fOpts file.Options //ttl time.Duration mu sync.Mutex + + // Workflow-specific fields + workflowPath string + isDir bool } // Open the sqlite DB. If localPath doesn't exist then check if BackupPath exists and copy it to localPath -// ?: should this open the workflow file and load that into the database as well? +// Also initializes workflow path and determines if it's a directory func (o *SQLite) Open(workflowPath string, fOpts file.Options) error { o.fOpts = fOpts + o.workflowPath = workflowPath if o.TaskTTL < time.Hour { o.TaskTTL = time.Hour } @@ -73,8 +78,15 @@ func (o *SQLite) Open(workflowPath string, fOpts file.Options) error { } //TODO: load workflow file into the database + // Determine if workflow path is a directory + sts, err := file.Stat(workflowPath, &fOpts) + if err != nil { + return fmt.Errorf("problem with workflow path %s %w", workflowPath, err) + } + o.isDir = sts.IsDir + _, err = o.loadFile(o.workflowPath, &o.fOpts) - return nil + return err } func copyFiles(src, dst string, fOpts file.Options) error { diff --git a/apps/flowlord/cache/workflow.go b/apps/flowlord/cache/workflow.go index 49b5a05..2efe90e 100644 --- a/apps/flowlord/cache/workflow.go +++ b/apps/flowlord/cache/workflow.go @@ -1,12 +1,22 @@ package cache import ( + "database/sql" + "fmt" + "io" + "net/url" + "path/filepath" + "strings" + + "github.com/hydronica/toml" + "github.com/jbsmith7741/go-tools/appenderr" "github.com/pcelvng/task" "github.com/pcelvng/task-tools/file" + "github.com/pcelvng/task-tools/workflow" ) -// Phase is same as workflow.Phase +// Phase represents a workflow phase (same as workflow.Phase) type Phase struct { Task string // Should use Topic() and Job() for access Rule string @@ -15,28 +25,234 @@ type Phase struct { Template string // template used to create the task } -func (p Phase) IsEmpty() bool { return false } -func (p Phase) Job() string { return "" } -func (p Phase) Topic() string { return "" } +func (p Phase) IsEmpty() bool { + return p.Task == "" && p.Rule == "" && p.DependsOn == "" && p.Template == "" +} + +// Job portion of the Task +func (p Phase) Job() string { + s := strings.Split(p.Task, ":") + if len(s) > 1 { + return s[1] + } + + r, _ := url.ParseQuery(p.Rule) + if j := r.Get("job"); j != "" { + return j + } + return "" +} + +// Topic portion of the Task +func (p Phase) Topic() string { + s := strings.Split(p.Task, ":") + return s[0] +} -/* -// Workflow is a list of phases with a checksum for the file +// ToWorkflowPhase converts cache.Phase to workflow.Phase +func (p Phase) ToWorkflowPhase() workflow.Phase { + return workflow.Phase{ + Task: p.Task, + Rule: p.Rule, + DependsOn: p.DependsOn, + Retry: p.Retry, + Template: p.Template, + } +} + +// Workflow represents a workflow file with phases type Workflow struct { Checksum string // md5 hash for the file to check for changes Phases []Phase `toml:"phase"` } -*/ -// newWorkflow read the workflow file or directory and updates the underlying db cache -func newWorkflow(path, opts *file.Options) *SQLite { return nil } +// Workflow Cache Methods - implementing workflow.Cache interface + +// IsDir returns true if the original workflow path is a folder rather than a file +func (s *SQLite) IsDir() bool { + return s.isDir +} -// Search -// Do we still need to return the path if it's stored in a DB? -// should list return a list of matching phases rather than the first match? -func (c *SQLite) Search(task, job string) (path string, ph Phase) { return "", Phase{} } +// Search the all workflows within the cache and return the first +// matching phase with the specific task and job (optional) +func (s *SQLite) Search(task, job string) (path string, ph Phase) { + if s == nil { + return "", Phase{} + } + + s.mu.Lock() + defer s.mu.Unlock() + + // Query for phases matching the task topic + query := ` + SELECT workflow_file_path, task, depends_on, rule, template, retry, status + FROM workflow_phases + WHERE task LIKE ? OR task = ? + ORDER BY workflow_file_path, task + LIMIT 1 + ` + + var rows *sql.Rows + var err error + + if job == "" { + // Search by topic only + rows, err = s.db.Query(query, task+":%", task) + } else { + // Search by topic:job + rows, err = s.db.Query(query, task+":"+job, task+":"+job) + } + + if err != nil { + return "", Phase{} + } + defer rows.Close() + + if rows.Next() { + var workflowPath, taskStr, dependsOn, rule, template, status string + var retry int + + err := rows.Scan(&workflowPath, &taskStr, &dependsOn, &rule, &template, &retry, &status) + if err != nil { + return "", Phase{} + } + + // Check if job matches if specified + if job != "" { + phase := Phase{ + Task: taskStr, + Rule: rule, + DependsOn: dependsOn, + Retry: retry, + Template: template, + } + if phase.Job() != job { + return "", Phase{} + } + } + + return workflowPath, Phase{ + Task: taskStr, + Rule: rule, + DependsOn: dependsOn, + Retry: retry, + Template: template, + } + } + + return "", Phase{} +} -// GetPhase associated with task based on Task.Topic and Task.Job and workflow file -func (c *SQLite) GetPhase(t task.Task) Phase { +// Get the Phase associated with the task +// looks for matching phases within a workflow defined in meta +// that matches the task Type and job. +func (s *SQLite) Get(t task.Task) Phase { + s.mu.Lock() + defer s.mu.Unlock() + + values, _ := url.ParseQuery(t.Meta) + key := values.Get("workflow") + job := t.Job + if job == "" { + job = values.Get("job") + } + + if key == "*" { // search all workflows for first match + query := ` + SELECT workflow_file_path, task, depends_on, rule, template, retry, status + FROM workflow_phases + WHERE task LIKE ? OR task = ? + ORDER BY workflow_file_path, task + LIMIT 1 + ` + + rows, err := s.db.Query(query, t.Type+":%", t.Type) + if err != nil { + return Phase{} + } + defer rows.Close() + + if rows.Next() { + var workflowPath, taskStr, dependsOn, rule, template, status string + var retry int + + err := rows.Scan(&workflowPath, &taskStr, &dependsOn, &rule, &template, &retry, &status) + if err != nil { + return Phase{} + } + + // Check if job matches if specified + if job != "" { + phase := Phase{ + Task: taskStr, + Rule: rule, + DependsOn: dependsOn, + Retry: retry, + Template: template, + } + if phase.Job() != job { + return Phase{} + } + } + + return Phase{ + Task: taskStr, + Rule: rule, + DependsOn: dependsOn, + Retry: retry, + Template: template, + } + } + return Phase{} + } + + // Search within specific workflow + query := ` + SELECT task, depends_on, rule, template, retry, status + FROM workflow_phases + WHERE workflow_file_path = ? AND (task LIKE ? OR task = ?) + ORDER BY task + LIMIT 1 + ` + + rows, err := s.db.Query(query, key, t.Type+":%", t.Type) + if err != nil { + return Phase{} + } + defer rows.Close() + + if rows.Next() { + var taskStr, dependsOn, rule, template, status string + var retry int + + err := rows.Scan(&taskStr, &dependsOn, &rule, &template, &retry, &status) + if err != nil { + return Phase{} + } + + // Check if job matches if specified + if job != "" { + phase := Phase{ + Task: taskStr, + Rule: rule, + DependsOn: dependsOn, + Retry: retry, + Template: template, + } + if phase.Job() != job { + return Phase{} + } + } + + return Phase{ + Task: taskStr, + Rule: rule, + DependsOn: dependsOn, + Retry: retry, + Template: template, + } + } + return Phase{} } @@ -44,21 +260,357 @@ func (c *SQLite) GetPhase(t task.Task) Phase { // Empty slice will be returned if no children are found. // A task without a type or metadata containing the workflow info // will result in an error -func (c *SQLite) Children(t task.Task) []Phase { return nil } +func (s *SQLite) Children(t task.Task) []Phase { + s.mu.Lock() + defer s.mu.Unlock() + + if t.Type == "" { + return nil + } + + values, _ := url.ParseQuery(t.Meta) + key := values.Get("workflow") + job := t.Job + if job == "" { + job = values.Get("job") + } + + if key == "" { + return nil + } + + // Find phases that depend on this task + query := ` + SELECT task, depends_on, rule, template, retry, status + FROM workflow_phases + WHERE workflow_file_path = ? AND (depends_on LIKE ? OR depends_on = ?) + ORDER BY task + ` + + rows, err := s.db.Query(query, key, t.Type+":%", t.Type) + if err != nil { + return nil + } + defer rows.Close() + + var result []Phase + for rows.Next() { + var taskStr, dependsOn, rule, template, status string + var retry int + + err := rows.Scan(&taskStr, &dependsOn, &rule, &template, &retry, &status) + if err != nil { + continue + } + + // Parse depends_on to check if it matches the task + v := strings.Split(dependsOn, ":") + depends := v[0] + var j string + if len(v) > 1 { + j = v[1] + } + + if depends == t.Type { + if j == "" || j == job { + result = append(result, Phase{ + Task: taskStr, + Rule: rule, + DependsOn: dependsOn, + Retry: retry, + Template: template, + }) + } + } + } + + return result +} // Refresh checks the cache and reloads any files if the checksum has changed. -func (c *SQLite) Refresh() (changedFiles []string, err error) { return nil, nil } +func (s *SQLite) Refresh() (changedFiles []string, err error) { + if !s.isDir { + f, err := s.loadFile(s.workflowPath, &s.fOpts) + if len(f) > 0 { + changedFiles = append(changedFiles, f) + } + return changedFiles, err + } + + // List and read all files + allFiles, err := listAllFiles(s.workflowPath, &s.fOpts) + if err != nil { + return changedFiles, err + } + + errs := appenderr.New() + for _, filePath := range allFiles { + f, err := s.loadFile(filePath, &s.fOpts) + if err != nil { + errs.Add(err) + } + if len(f) > 0 { + changedFiles = append(changedFiles, f) + } + } + + // Remove deleted workflows from database + for key := range s.GetWorkflowFiles() { + found := false + for _, v := range allFiles { + f := s.filePath(v) + if f == key { + found = true + break + } + } + if !found { + s.removeWorkflow(key) + changedFiles = append(changedFiles, "-"+key) + } + } + + return changedFiles, errs.ErrOrNil() +} + +// Helper methods for workflow operations // listAllFiles recursively lists all files in a folder and sub-folders -// Keep as is? -func listAllFiles(p string, opts *file.Options) ([]string, error) { return nil, nil } +func listAllFiles(p string, opts *file.Options) ([]string, error) { + files := make([]string, 0) + sts, err := file.List(p, opts) + if err != nil { + return nil, err + } + for _, f := range sts { + if f.IsDir { + s, err := listAllFiles(f.Path, opts) + if err != nil { + return nil, err + } + files = append(files, s...) + continue + } + files = append(files, f.Path) + } + return files, nil +} -// loadFile checks a files checksum and updates map if required +// loadFile checks a files checksum and updates database if required // loaded file name is returned -// Keep as is? -func (c *SQLite) loadFile(path string, opts *file.Options) (f string, err error) { return "", nil } +func (s *SQLite) loadFile(path string, opts *file.Options) (f string, err error) { + f = s.filePath(path) + sts, err := file.Stat(path, opts) + // permission issues + if err != nil { + return "", fmt.Errorf("stats %s %w", path, err) + } + // We can't process a directory here + if sts.IsDir { + return "", fmt.Errorf("can not read directory %s", path) + } + + // Check if file has changed by comparing checksum + existingHash := s.getFileHash(f) + if existingHash == sts.Checksum { + return "", nil // No changes + } + + // Read and parse the workflow file + r, err := file.NewReader(path, opts) + if err != nil { + return "", fmt.Errorf("new reader %s %w", path, err) + } + b, err := io.ReadAll(r) + if err != nil { + return "", fmt.Errorf("read-all: %s %w", path, err) + } + + var workflow Workflow + if _, err := toml.Decode(string(b), &workflow); err != nil { + return "", fmt.Errorf("decode: %s %w", string(b), err) + } + + // Update database with new workflow data + err = s.updateWorkflowInDB(f, sts.Checksum, workflow.Phases) + if err != nil { + return "", fmt.Errorf("update workflow in db: %w", err) + } + + return f, nil +} // filePath returns a filePath consist of all unique part // after the path set in the cache -// may not be needed if we store in a DB -func (c *SQLite) filePath(p string) (s string) { return "" } +func (s *SQLite) filePath(p string) (filePath string) { + path := strings.TrimLeft(s.workflowPath, ".") + if i := strings.LastIndex(p, path); i != -1 { + filePath = strings.TrimLeft(p[i+len(path):], "/") + } + if filePath == "" { + _, filePath = filepath.Split(p) + } + return filePath +} + +// getFileHash retrieves the current hash for a workflow file +func (s *SQLite) getFileHash(filePath string) string { + var hash string + err := s.db.QueryRow("SELECT file_hash FROM workflow_files WHERE file_path = ?", filePath).Scan(&hash) + if err != nil { + return "" + } + return hash +} + +// GetWorkflowFiles returns a map of all workflow files in the database +func (s *SQLite) GetWorkflowFiles() map[string]bool { + files := make(map[string]bool) + rows, err := s.db.Query("SELECT file_path FROM workflow_files") + if err != nil { + return files + } + defer rows.Close() + + for rows.Next() { + var filePath string + if err := rows.Scan(&filePath); err == nil { + files[filePath] = true + } + } + return files +} + +// GetPhasesForWorkflow returns all phases for a specific workflow file +func (s *SQLite) GetPhasesForWorkflow(filePath string) ([]Phase, error) { + rows, err := s.db.Query(` + SELECT task, depends_on, rule, template, retry, status + FROM workflow_phases + WHERE workflow_file_path = ? + ORDER BY task + `, filePath) + if err != nil { + return nil, err + } + defer rows.Close() + + var phases []Phase + for rows.Next() { + var taskStr, dependsOn, rule, template, status string + var retry int + + err := rows.Scan(&taskStr, &dependsOn, &rule, &template, &retry, &status) + if err != nil { + continue + } + + phases = append(phases, Phase{ + Task: taskStr, + Rule: rule, + DependsOn: dependsOn, + Retry: retry, + Template: template, + }) + } + + return phases, nil +} + +// updateWorkflowInDB updates the workflow data in the database +func (s *SQLite) updateWorkflowInDB(filePath, checksum string, phases []Phase) error { + // Start transaction + tx, err := s.db.Begin() + if err != nil { + return err + } + defer tx.Rollback() + + // Update or insert workflow file record + _, err = tx.Exec(` + INSERT INTO workflow_files (file_path, file_hash, loaded_at, last_modified, is_active) + VALUES (?, ?, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP, TRUE) + ON CONFLICT (file_path) DO UPDATE SET + file_hash = excluded.file_hash, + loaded_at = CURRENT_TIMESTAMP, + last_modified = CURRENT_TIMESTAMP, + is_active = TRUE + `, filePath, checksum) + if err != nil { + return err + } + + // Remove existing phases for this workflow + _, err = tx.Exec("DELETE FROM workflow_phases WHERE workflow_file_path = ?", filePath) + if err != nil { + return err + } + + // Insert new phases + for _, phase := range phases { + status := s.validatePhase(phase) + _, err = tx.Exec(` + INSERT INTO workflow_phases (workflow_file_path, task, depends_on, rule, template, retry, status) + VALUES (?, ?, ?, ?, ?, ?, ?) + `, filePath, phase.Task, phase.DependsOn, phase.Rule, phase.Template, phase.Retry, status) + if err != nil { + return err + } + } + + return tx.Commit() +} + +// removeWorkflow removes a workflow and its phases from the database +func (s *SQLite) removeWorkflow(filePath string) error { + // Start transaction + tx, err := s.db.Begin() + if err != nil { + return err + } + defer tx.Rollback() + + // Remove phases first + _, err = tx.Exec("DELETE FROM workflow_phases WHERE workflow_file_path = ?", filePath) + if err != nil { + return err + } + + // Remove workflow file record + _, err = tx.Exec("DELETE FROM workflow_files WHERE file_path = ?", filePath) + if err != nil { + return err + } + + return tx.Commit() +} + +// validatePhase validates a phase and returns status message +func (s *SQLite) validatePhase(phase Phase) string { + // Basic validation logic + if phase.Rule == "" && phase.DependsOn == "" { + return "invalid phase: rule and dependsOn are blank" + } + + // Check for valid cron rule + if phase.Rule != "" { + values, err := url.ParseQuery(phase.Rule) + if err != nil { + return fmt.Sprintf("invalid rule format: %s", phase.Rule) + } + + cron := values.Get("cron") + if cron != "" { + // Basic cron validation (could be enhanced) + if cron == "invalid" { + return fmt.Sprintf("no valid rule found: cron=%s", cron) + } + } + } + + // Check retry count + if phase.Retry > 10 { + return "warning: retry count exceeds recommended limit" + } + + return "" // No issues +} diff --git a/apps/flowlord/handler.go b/apps/flowlord/handler.go index 8dbe82b..ceaef05 100644 --- a/apps/flowlord/handler.go +++ b/apps/flowlord/handler.go @@ -110,12 +110,17 @@ func (tm *taskMaster) Info(w http.ResponseWriter, r *http.Request) { // create a copy of all workflows wCache := make(map[string]map[string]workflow.Phase) // [file][task:job]Phase - for key, w := range tm.Cache.Workflows { - phases := make(map[string]workflow.Phase) - for _, j := range w.Phases { - phases[pName(j.Topic(), j.Job())] = j + workflowFiles := tm.taskCache.GetWorkflowFiles() + for filePath := range workflowFiles { + phases, err := tm.taskCache.GetPhasesForWorkflow(filePath) + if err != nil { + continue + } + phaseMap := make(map[string]workflow.Phase) + for _, j := range phases { + phaseMap[pName(j.Topic(), j.Job())] = j.ToWorkflowPhase() } - wCache[key] = phases + wCache[filePath] = phaseMap } entries := tm.cron.Entries() for i := 0; i < len(entries); i++ { @@ -282,7 +287,7 @@ func (tm *taskMaster) workflowFiles(w http.ResponseWriter, r *http.Request) { } pth := tm.path // support directory and single file for workflow path lookup. - if tm.Cache.IsDir() { + if tm.taskCache.IsDir() { pth += "/" + fName } @@ -807,7 +812,7 @@ func (tm *taskMaster) backload(req request) response { end = at } - workflowPath, phase := tm.Cache.Search(req.Task, req.Job) + workflowPath, phase := tm.taskCache.Search(req.Task, req.Job) if workflowPath != "" { msg = append(msg, "phase found in "+workflowPath) req.Template = phase.Template diff --git a/apps/flowlord/taskmaster.go b/apps/flowlord/taskmaster.go index c652dca..1b188de 100644 --- a/apps/flowlord/taskmaster.go +++ b/apps/flowlord/taskmaster.go @@ -41,7 +41,6 @@ type taskMaster struct { doneTopic string failedTopic string taskCache *cache.SQLite - *workflow.Cache HostName string port int cron *cron.Cron @@ -98,7 +97,7 @@ func New(opts *options) *taskMaster { if opts.Slack.MaxFrequency <= opts.Slack.MinFrequency { opts.Slack.MaxFrequency = 16 * opts.Slack.MinFrequency } - if err := opts.DB.Open(*opts.File); err != nil { + if err := opts.DB.Open(opts.Workflow, *opts.File); err != nil { log.Fatal("db init", err) } @@ -142,7 +141,7 @@ func pName(topic, job string) string { } func (tm *taskMaster) getAllChildren(topic, workflow, job string) (s []string) { - for _, c := range tm.Children(task.Task{Type: topic, Meta: "workflow=" + workflow + "&job=" + job}) { + for _, c := range tm.taskCache.Children(task.Task{Type: topic, Meta: "workflow=" + workflow + "&job=" + job}) { job := strings.Trim(c.Topic()+":"+c.Job(), ":") if children := tm.getAllChildren(c.Task, workflow, c.Job()); len(children) > 0 { job += " ➞ " + strings.Join(children, " ➞ ") @@ -154,7 +153,7 @@ func (tm *taskMaster) getAllChildren(topic, workflow, job string) (s []string) { func (tm *taskMaster) refreshCache() ([]string, error) { // Reload workflow files - files, err := tm.Cache.Refresh() + files, err := tm.taskCache.Refresh() if err != nil { return nil, fmt.Errorf("error reloading workflow: %w", err) } @@ -176,9 +175,7 @@ func (tm *taskMaster) refreshCache() ([]string, error) { } func (tm *taskMaster) Run(ctx context.Context) (err error) { - if tm.Cache, err = workflow.New(tm.path, tm.fOpts); err != nil { - return fmt.Errorf("workflow setup %w", err) - } + // The SQLite struct now implements the workflow.Cache interface directly // check for alerts from today on startup // refresh the workflow if the file(s) have been changed _, err = tm.refreshCache() @@ -233,18 +230,30 @@ func validatePhase(p workflow.Phase) string { // schedule the tasks and refresh the schedule when updated func (tm *taskMaster) schedule() (err error) { errs := make([]error, 0) - if len(tm.Workflows) == 0 { + + // Get all workflow files from database + workflowFiles := tm.taskCache.GetWorkflowFiles() + + if len(workflowFiles) == 0 { return fmt.Errorf("no workflows found check path %s", tm.path) } - for path, workflow := range tm.Workflows { - for _, w := range workflow.Phases { + + // Get all phases for each workflow file + for filePath := range workflowFiles { + phases, err := tm.taskCache.GetPhasesForWorkflow(filePath) + if err != nil { + errs = append(errs, fmt.Errorf("error getting phases for %s: %w", filePath, err)) + continue + } + + for _, w := range phases { rules, _ := url.ParseQuery(w.Rule) cronSchedule := rules.Get("cron") if f := rules.Get("files"); f != "" { r := fileRule{ SrcPattern: f, - workflowFile: path, - Phase: w, + workflowFile: filePath, + Phase: w.ToWorkflowPhase(), CronCheck: cronSchedule, } r.CountCheck, _ = strconv.Atoi(rules.Get("count")) @@ -256,16 +265,18 @@ func (tm *taskMaster) schedule() (err error) { if cronSchedule == "" { log.Printf("no cron: task:%s, rule:%s", w.Task, w.Rule) + //TODO: update the phase table with this status message continue } - j, err := tm.NewJob(w, path) + j, err := tm.NewJob(w.ToWorkflowPhase(), filePath) if err != nil { errs = append(errs, fmt.Errorf("issue with %s %w", w.Task, err)) } if _, err = tm.cron.AddJob(cronSchedule, j); err != nil { - errs = append(errs, fmt.Errorf("invalid rule for %s:%s %s %w", path, w.Task, w.Rule, err)) + // TODO: update the phase table with this status messgae + errs = append(errs, fmt.Errorf("invalid rule for %s:%s %s %w", filePath, w.Task, w.Rule, err)) } } } @@ -287,7 +298,7 @@ func (tm *taskMaster) Process(t *task.Task) error { case task.AlertResult: tm.alerts <- *t case task.ErrResult: - p := tm.Get(*t) + p := tm.taskCache.Get(*t) rules, _ := url.ParseQuery(p.Rule) r := meta.Get("retry") @@ -341,7 +352,7 @@ func (tm *taskMaster) Process(t *task.Task) error { case task.CompleteResult: // start off any children tasks taskTime := tmpl.TaskTime(*t) - phases := tm.Children(*t) + phases := tm.taskCache.Children(*t) for _, p := range phases { if !isReady(p.Rule, t.Meta) { From 2e023e2598f5a40b2873e44a7d04963cb70bca3b Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Thu, 9 Oct 2025 14:16:19 -0600 Subject: [PATCH 20/40] workflow fixes and dashboard --- apps/flowlord/cache/cache_test.go | 2 + apps/flowlord/cache/schema.sql | 3 +- apps/flowlord/cache/sqlite.go | 124 +++++-- apps/flowlord/cache/workflow.go | 6 +- apps/flowlord/handler.go | 88 +++++ apps/flowlord/handler/about.tmpl | 10 +- apps/flowlord/handler/header.tmpl | 4 + apps/flowlord/handler/workflow.tmpl | 526 ++++++++++++++++++++++++++++ apps/flowlord/handler_test.go | 40 ++- apps/flowlord/taskmaster.go | 2 +- apps/flowlord/taskmaster_test.go | 20 +- 11 files changed, 768 insertions(+), 57 deletions(-) create mode 100644 apps/flowlord/handler/workflow.tmpl diff --git a/apps/flowlord/cache/cache_test.go b/apps/flowlord/cache/cache_test.go index af22b53..09839df 100644 --- a/apps/flowlord/cache/cache_test.go +++ b/apps/flowlord/cache/cache_test.go @@ -1,5 +1,7 @@ package cache + + /* func TestAdd(t *testing.T) { fn := func(tasks []task.Task) (map[string]TaskJob, error) { diff --git a/apps/flowlord/cache/schema.sql b/apps/flowlord/cache/schema.sql index d8d25d4..b536735 100644 --- a/apps/flowlord/cache/schema.sql +++ b/apps/flowlord/cache/schema.sql @@ -99,8 +99,7 @@ CREATE TABLE IF NOT EXISTS workflow_phases ( rule TEXT, -- URI query parameters (e.g., "cron=0 0 * * *&offset=1h") template TEXT, retry INTEGER DEFAULT 0, -- threshold of times to retry - status TEXT, -- phase status info (warnings, errors, validation messages) - PRIMARY KEY (workflow_file_path, task) + status TEXT -- phase status info (warnings, errors, validation messages) ); -- Indexes for performance diff --git a/apps/flowlord/cache/sqlite.go b/apps/flowlord/cache/sqlite.go index c0f6140..60ae671 100644 --- a/apps/flowlord/cache/sqlite.go +++ b/apps/flowlord/cache/sqlite.go @@ -34,7 +34,7 @@ type SQLite struct { Retention time.Duration // 90 days db *sql.DB - fOpts file.Options + fOpts *file.Options //ttl time.Duration mu sync.Mutex @@ -45,15 +45,15 @@ type SQLite struct { // Open the sqlite DB. If localPath doesn't exist then check if BackupPath exists and copy it to localPath // Also initializes workflow path and determines if it's a directory -func (o *SQLite) Open(workflowPath string, fOpts file.Options) error { +func (o *SQLite) Open(workflowPath string, fOpts *file.Options) error { o.fOpts = fOpts o.workflowPath = workflowPath if o.TaskTTL < time.Hour { o.TaskTTL = time.Hour } - backupSts, _ := file.Stat(o.BackupPath, &fOpts) - localSts, _ := file.Stat(o.LocalPath, &fOpts) + backupSts, _ := file.Stat(o.BackupPath, fOpts) + localSts, _ := file.Stat(o.LocalPath, fOpts) if localSts.Size == 0 && backupSts.Size > 0 { log.Printf("Restoring local DB from backup %s", o.BackupPath) @@ -68,6 +68,19 @@ func (o *SQLite) Open(workflowPath string, fOpts file.Options) error { if err != nil { return err } + + // Set a smaller page size to reduce DB file size + _, err = db.Exec("PRAGMA page_size = 4096;") + if err != nil { + return fmt.Errorf("failed to set page size: %w", err) + } + + // Enable auto vacuum to reclaim space when records are deleted + _, err = db.Exec("PRAGMA auto_vacuum = INCREMENTAL;") + if err != nil { + return fmt.Errorf("failed to set auto vacuum: %w", err) + } + o.db = db // Execute the schema if the migration version is not the same as the current schema version @@ -79,22 +92,22 @@ func (o *SQLite) Open(workflowPath string, fOpts file.Options) error { //TODO: load workflow file into the database // Determine if workflow path is a directory - sts, err := file.Stat(workflowPath, &fOpts) + sts, err := file.Stat(workflowPath, fOpts) if err != nil { return fmt.Errorf("problem with workflow path %s %w", workflowPath, err) } o.isDir = sts.IsDir - _, err = o.loadFile(o.workflowPath, &o.fOpts) + _, err = o.Refresh() return err } -func copyFiles(src, dst string, fOpts file.Options) error { - r, err := file.NewReader(src, &fOpts) +func copyFiles(src, dst string, fOpts *file.Options) error { + r, err := file.NewReader(src, fOpts) if err != nil { return fmt.Errorf("init reader err: %w", err) } - w, err := file.NewWriter(dst, &fOpts) + w, err := file.NewWriter(dst, fOpts) if err != nil { return fmt.Errorf("init writer err: %w", err) } @@ -1003,11 +1016,15 @@ type DBSizeInfo struct { // TableStat contains information about a database table type TableStat struct { - Name string `json:"name"` - RowCount int64 `json:"row_count"` - SizeBytes int64 `json:"size_bytes"` - SizeHuman string `json:"size_human"` - Percentage float64 `json:"percentage"` + Name string `json:"name"` + RowCount int64 `json:"row_count"` + TableBytes int64 `json:"table_bytes"` + TableHuman string `json:"table_human"` + IndexBytes int64 `json:"index_bytes"` + IndexHuman string `json:"index_human"` + TotalBytes int64 `json:"total_bytes"` + TotalHuman string `json:"total_human"` + Percentage float64 `json:"percentage"` } // GetDBSize returns database size information @@ -1027,12 +1044,9 @@ func (s *SQLite) GetDBSize() (*DBSizeInfo, error) { return nil, err } - // Get database file path - var dbPath string - err = s.db.QueryRow("PRAGMA database_list").Scan(&dbPath, nil, nil) - if err != nil { - // If we can't get the path, use a default - dbPath = "unknown" + dbPath := s.LocalPath + if s.BackupPath != "" { + dbPath = s.BackupPath } totalSize := pageCount * pageSize @@ -1079,6 +1093,8 @@ func (s *SQLite) GetTableStats() ([]TableStat, error) { } var stats []TableStat + var totalTableSize int64 + for _, tableName := range tables { // Get row count var rowCount int64 @@ -1087,26 +1103,40 @@ func (s *SQLite) GetTableStats() ([]TableStat, error) { continue // Skip tables we can't read } - // Get table size using pragma table_info and estimate - // This is an approximation since SQLite doesn't provide exact table sizes - var sizeBytes int64 + // Calculate more accurate table size + var tableBytes int64 if rowCount > 0 { - // Estimate size based on row count and average row size - // This is a rough approximation - avgRowSize := int64(200) // Estimated average row size in bytes - sizeBytes = rowCount * avgRowSize + // Try to get actual table size using dbstat if available + var actualSize int64 + err := s.db.QueryRow(fmt.Sprintf(` + SELECT SUM(pgsize) FROM dbstat WHERE name = '%s' AND aggregate = 1 + `, tableName)).Scan(&actualSize) + + if err == nil && actualSize > 0 { + // Use actual size from dbstat + tableBytes = actualSize + } } + // Get index sizes for this table + indexBytes := s.getIndexSize(tableName) + totalBytes := tableBytes + indexBytes + totalTableSize += totalBytes + percentage := float64(0) if totalSize > 0 { - percentage = float64(sizeBytes) / float64(totalSize) * 100 + percentage = float64(totalBytes) / float64(totalSize) * 100 } stats = append(stats, TableStat{ Name: tableName, RowCount: rowCount, - SizeBytes: sizeBytes, - SizeHuman: formatBytes(sizeBytes), + TableBytes: tableBytes, + TableHuman: formatBytes(tableBytes), + IndexBytes: indexBytes, + IndexHuman: formatBytes(indexBytes), + TotalBytes: totalBytes, + TotalHuman: formatBytes(totalBytes), Percentage: percentage, }) } @@ -1114,6 +1144,40 @@ func (s *SQLite) GetTableStats() ([]TableStat, error) { return stats, nil } + +// getIndexSize calculates the total size of all indexes for a table +func (s *SQLite) getIndexSize(tableName string) int64 { + // Get all indexes for this table + rows, err := s.db.Query(fmt.Sprintf(` + SELECT name FROM sqlite_master + WHERE type='index' AND tbl_name='%s' AND name NOT LIKE 'sqlite_%%' + `, tableName)) + if err != nil { + return 0 + } + defer rows.Close() + + var totalIndexSize int64 + for rows.Next() { + var indexName string + if err := rows.Scan(&indexName); err != nil { + continue + } + + // Try to get actual index size using dbstat + var indexSize int64 + err := s.db.QueryRow(fmt.Sprintf(` + SELECT SUM(pgsize) FROM dbstat WHERE name = '%s' AND aggregate = 1 + `, indexName)).Scan(&indexSize) + + if err == nil && indexSize > 0 { + totalIndexSize += indexSize + } + } + + return totalIndexSize +} + // formatBytes converts bytes to human readable format func formatBytes(bytes int64) string { const unit = 1024 diff --git a/apps/flowlord/cache/workflow.go b/apps/flowlord/cache/workflow.go index 2efe90e..0c4d274 100644 --- a/apps/flowlord/cache/workflow.go +++ b/apps/flowlord/cache/workflow.go @@ -330,7 +330,7 @@ func (s *SQLite) Children(t task.Task) []Phase { // Refresh checks the cache and reloads any files if the checksum has changed. func (s *SQLite) Refresh() (changedFiles []string, err error) { if !s.isDir { - f, err := s.loadFile(s.workflowPath, &s.fOpts) + f, err := s.loadFile(s.workflowPath, s.fOpts) if len(f) > 0 { changedFiles = append(changedFiles, f) } @@ -338,14 +338,14 @@ func (s *SQLite) Refresh() (changedFiles []string, err error) { } // List and read all files - allFiles, err := listAllFiles(s.workflowPath, &s.fOpts) + allFiles, err := listAllFiles(s.workflowPath, s.fOpts) if err != nil { return changedFiles, err } errs := appenderr.New() for _, filePath := range allFiles { - f, err := s.loadFile(filePath, &s.fOpts) + f, err := s.loadFile(filePath, s.fOpts) if err != nil { errs.Add(err) } diff --git a/apps/flowlord/handler.go b/apps/flowlord/handler.go index ceaef05..3d35519 100644 --- a/apps/flowlord/handler.go +++ b/apps/flowlord/handler.go @@ -40,6 +40,9 @@ var FilesTemplate string //go:embed handler/task.tmpl var TaskTemplate string +//go:embed handler/workflow.tmpl +var WorkflowTemplate string + //go:embed handler/header.tmpl var HeaderTemplate string @@ -87,6 +90,7 @@ func (tm *taskMaster) StartHandler() { router.Get("/web/alert", tm.htmlAlert) router.Get("/web/files", tm.htmlFiles) router.Get("/web/task", tm.htmlTask) + router.Get("/web/workflow", tm.htmlWorkflow) router.Get("/web/about", tm.htmlAbout) if tm.port == 0 { @@ -388,6 +392,13 @@ func (tm *taskMaster) htmlTask(w http.ResponseWriter, r *http.Request) { w.Write(taskHTML(tasks, dt, taskType, job, result)) } +// htmlWorkflow handles GET /web/workflow - displays workflow phases from database +func (tm *taskMaster) htmlWorkflow(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "text/html") + w.Write(workflowHTML(tm.taskCache)) +} + // htmlAbout handles GET /web/about - displays system information and cache statistics func (tm *taskMaster) htmlAbout(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) @@ -637,6 +648,71 @@ func taskHTML(tasks []cache.TaskView, date time.Time, taskType, job, result stri return buf.Bytes() } +// workflowHTML renders the workflow phases HTML page +func workflowHTML(cache *cache.SQLite) []byte { + // Get all workflow files and their phases + workflowFiles := cache.GetWorkflowFiles() + + var allPhases []WorkflowPhaseView + workflowFileSummary := make(map[string]int) + + for filePath := range workflowFiles { + phases, err := cache.GetPhasesForWorkflow(filePath) + if err != nil { + continue + } + + workflowFileSummary[filePath] = len(phases) + + for _, phase := range phases { + allPhases = append(allPhases, WorkflowPhaseView{ + WorkflowFile: filePath, + Task: phase.Topic(), + Job: phase.Job(), + Rule: phase.Rule, + DependsOn: phase.DependsOn, + Retry: phase.Retry, + Template: phase.Template, + Status: "", // TODO: Get status from database + }) + } + } + + data := map[string]interface{}{ + "Phases": allPhases, + "WorkflowFileSummary": workflowFileSummary, + "CurrentPage": "workflow", + "PageTitle": "Workflow Dashboard", + "staticPath": staticPath, + } + + // Template functions + funcMap := template.FuncMap{ + "slice": func(s string, start, end int) string { + if start >= len(s) { + return "" + } + if end > len(s) { + end = len(s) + } + return s[start:end] + }, + } + + // Parse and execute template + tmpl, err := template.New("workflow").Funcs(funcMap).Parse(HeaderTemplate + WorkflowTemplate) + if err != nil { + return []byte(err.Error()) + } + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return []byte(err.Error()) + } + + return buf.Bytes() +} + // aboutHTML renders the about page HTML func (tm *taskMaster) aboutHTML() []byte { // Get basic system information @@ -785,6 +861,18 @@ type response struct { code int } +// WorkflowPhaseView represents a workflow phase for display in the web interface +type WorkflowPhaseView struct { + WorkflowFile string + Task string + Job string + Rule string + DependsOn string + Retry int + Template string + Status string +} + func (tm *taskMaster) backload(req request) response { // handle start and end date at := parseTime(req.At) diff --git a/apps/flowlord/handler/about.tmpl b/apps/flowlord/handler/about.tmpl index bb7d397..3a2ddaa 100644 --- a/apps/flowlord/handler/about.tmpl +++ b/apps/flowlord/handler/about.tmpl @@ -69,8 +69,9 @@ Table Name Row Count - Size (Bytes) - Size (Human) + Table Size + Index Size + Total Size Percentage @@ -79,8 +80,9 @@ {{.Name}} {{.RowCount}} - {{.SizeBytes}} - {{.SizeHuman}} + {{.TableHuman}} + {{.IndexHuman}} + {{.TotalHuman}} {{printf "%.1f" .Percentage}}% {{end}} diff --git a/apps/flowlord/handler/header.tmpl b/apps/flowlord/handler/header.tmpl index 38a377b..ac68d48 100644 --- a/apps/flowlord/handler/header.tmpl +++ b/apps/flowlord/handler/header.tmpl @@ -27,6 +27,10 @@ 📊 Tasks + + 🌳 + Workflow + ℹ️ About diff --git a/apps/flowlord/handler/workflow.tmpl b/apps/flowlord/handler/workflow.tmpl new file mode 100644 index 0000000..1427362 --- /dev/null +++ b/apps/flowlord/handler/workflow.tmpl @@ -0,0 +1,526 @@ + + + + + + Flowlord: Workflows + + + + {{template "header" .}} +
+ +
+

Workflow File Summary

+
+ {{range $filePath, $count := .WorkflowFileSummary}} +
+
+ {{$filePath}} + {{$count}} phases +
+
Workflow file with {{$count}} phase{{if ne $count 1}}s{{end}}
+
+ {{end}} +
+
+ +
+

Filters

+
+
+ + +
+
+ + +
+
+ + +
+
+ +
+
+
+ + {{if .Phases}} +
+ + + + + + + + + + + + + + + {{range .Phases}} + + + + + + + + + + + {{end}} + +
Workflow FileTaskJobRuleDepends OnRetryTemplateStatus
{{.WorkflowFile}}{{.Task}}{{.Job}} + {{if ge (len .Rule) 80}}{{slice .Rule 0 80}}...{{else}}{{.Rule}}{{end}} + {{.DependsOn}}{{.Retry}} + {{if ge (len .Template) 80}}{{slice .Template 0 80}}...{{else}}{{.Template}}{{end}} + + {{if .Status}}{{.Status}}{{else}}OK{{end}} +
+
+
+ Total Phases: {{len .Phases}} +
+ {{else}} +
+

No workflow phases found

+

No workflow phases were found in the database.

+
+ {{end}} +
+ + + + diff --git a/apps/flowlord/handler_test.go b/apps/flowlord/handler_test.go index e7e0181..7308b1f 100644 --- a/apps/flowlord/handler_test.go +++ b/apps/flowlord/handler_test.go @@ -12,7 +12,6 @@ import ( "github.com/pcelvng/task" "github.com/pcelvng/task-tools/apps/flowlord/cache" - "github.com/pcelvng/task-tools/workflow" ) const testPath = "../../internal/test" @@ -20,6 +19,7 @@ const testPath = "../../internal/test" func TestMain(t *testing.M) { staticPath = "./static" t.Run() + os.Remove(":memory") } @@ -40,14 +40,16 @@ func loadTaskViewData(filename string) ([]cache.TaskView, error) { } func TestBackloader(t *testing.T) { - cache, err := workflow.New(testPath+"/workflow/f3.toml", nil) + sqlDB := &cache.SQLite{LocalPath: ":memory"} + err := sqlDB.Open(testPath+"/workflow/f3.toml", nil ) + //cache, err := workflow.New(testPath+"/workflow/f3.toml", nil) today := time.Now().Format("2006-01-02") toHour := time.Now().Format(DateHour) if err != nil { t.Fatal(err) } tm := &taskMaster{ - Cache: cache, + taskCache: sqlDB, } fn := func(req request) (response, error) { @@ -500,11 +502,37 @@ func TestTaskHTML(t *testing.T) { } +func TestWorkflowHTML(t *testing.T) { + // Load workflow files + taskCache := &cache.SQLite{LocalPath: ":memory"} + if err := taskCache.Open(testPath+"/workflow/", nil); err != nil { + t.Fatalf("Failed to create test cache: %v", err) + } + + // Test with no filters - summary will be generated from tasks data + html := workflowHTML(taskCache) + + // Write HTML to a file for easy viewing + outputFile := "handler/workflow_preview.html" + err := os.WriteFile(outputFile, html, 0644) + if err != nil { + t.Fatalf("Failed to write HTML file: %v", err) + } + + t.Logf("Task preview generated and saved to: ./%s", outputFile) + + // Basic checks + if len(html) == 0 { + t.Error("Expected HTML output, got empty") + } + +} + func TestAboutHTML(t *testing.T) { // Create a real SQLite cache for testing - taskCache, err := cache.NewSQLite(time.Hour, ":memory:") - if err != nil { + taskCache := &cache.SQLite{LocalPath: ":memory"} + if err := taskCache.Open(testPath+"/workflow/", nil); err != nil { t.Fatalf("Failed to create test cache: %v", err) } @@ -521,7 +549,7 @@ func TestAboutHTML(t *testing.T) { // Write HTML to a file for easy viewing outputFile := "handler/about_preview.html" - err = os.WriteFile(outputFile, html, 0644) + err := os.WriteFile(outputFile, html, 0644) if err != nil { t.Fatalf("Failed to write HTML file: %v", err) } diff --git a/apps/flowlord/taskmaster.go b/apps/flowlord/taskmaster.go index 1b188de..d3d630c 100644 --- a/apps/flowlord/taskmaster.go +++ b/apps/flowlord/taskmaster.go @@ -97,7 +97,7 @@ func New(opts *options) *taskMaster { if opts.Slack.MaxFrequency <= opts.Slack.MinFrequency { opts.Slack.MaxFrequency = 16 * opts.Slack.MinFrequency } - if err := opts.DB.Open(opts.Workflow, *opts.File); err != nil { + if err := opts.DB.Open(opts.Workflow, opts.File); err != nil { log.Fatal("db init", err) } diff --git a/apps/flowlord/taskmaster_test.go b/apps/flowlord/taskmaster_test.go index 9e91457..85e56e4 100644 --- a/apps/flowlord/taskmaster_test.go +++ b/apps/flowlord/taskmaster_test.go @@ -23,7 +23,9 @@ const base_test_path string = "../../internal/test/" func TestTaskMaster_Process(t *testing.T) { delayRegex := regexp.MustCompile(`delayed=(\d+.\d+)`) - workflowCache, fatalErr := workflow.New(base_test_path+"workflow", nil) + // Initialize taskCache for the test + taskCache := &cache.SQLite{LocalPath: ":memory"} + fatalErr := taskCache.Open(base_test_path+"workflow", nil) if fatalErr != nil { t.Fatal("cache init", fatalErr) } @@ -33,12 +35,8 @@ func TestTaskMaster_Process(t *testing.T) { } fn := func(tsk task.Task) ([]task.Task, error) { var alerts int64 - // Initialize taskCache for the test - taskCache, err := cache.NewSQLite(time.Hour, ":memory:") - if err != nil { - return nil, err - } - tm := taskMaster{doneConsumer: consumer, Cache: workflowCache, taskCache: taskCache, failedTopic: "failed-topic", alerts: make(chan task.Task), slack: &Notification{}} + + tm := taskMaster{doneConsumer: consumer, taskCache: taskCache, failedTopic: "failed-topic", alerts: make(chan task.Task), slack: &Notification{}} producer, _ := nop.NewProducer("") tm.producer = producer nop.FakeMsg = tsk.JSONBytes() @@ -277,12 +275,12 @@ func TestTaskMaster_Schedule(t *testing.T) { Files []fileRule } fn := func(in string) (expected, error) { - cache, err := workflow.New(base_test_path+in, nil) - if err != nil { + tm := taskMaster{ cron: cron.New()} + tm.taskCache = &cache.SQLite{LocalPath: ":memory"} + if err := tm.taskCache.Open(base_test_path+in, nil); err != nil { return expected{}, err } - tm := taskMaster{Cache: cache, cron: cron.New()} - err = tm.schedule() + err := tm.schedule() exp := expected{ Jobs: make([]Cronjob, 0), Files: tm.files, From 962f97527dc33b402640c7b8ae2f4ff7cbb87b0f Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Fri, 10 Oct 2025 15:07:23 -0600 Subject: [PATCH 21/40] unit test fixes --- apps/flowlord/Workflow_plan.md | 30 +-- apps/flowlord/cache/schema.sql | 7 +- apps/flowlord/cache/sqlite.go | 35 +-- apps/flowlord/cache/workflow.go | 334 ++++++++++++--------------- apps/flowlord/cache/workflow_test.go | 307 ++++++++++++++++++++++++ apps/flowlord/handler.go | 48 +--- apps/flowlord/handler/workflow.tmpl | 12 +- apps/flowlord/handler_test.go | 44 ++-- apps/flowlord/taskmaster.go | 28 +-- apps/flowlord/taskmaster_test.go | 10 +- 10 files changed, 555 insertions(+), 300 deletions(-) create mode 100644 apps/flowlord/cache/workflow_test.go diff --git a/apps/flowlord/Workflow_plan.md b/apps/flowlord/Workflow_plan.md index c219c21..9f16c91 100644 --- a/apps/flowlord/Workflow_plan.md +++ b/apps/flowlord/Workflow_plan.md @@ -22,14 +22,14 @@ CREATE TABLE workflow_files ( -- Workflow phases (matches Phase struct exactly) CREATE TABLE workflow_phases ( - workflow_file_path TEXT NOT NULL, + file_path TEXT NOT NULL, task TEXT NOT NULL, -- topic:job format (e.g., "data-load:hourly") depends_on TEXT, rule TEXT, -- URI query parameters (e.g., "cron=0 0 * * *&offset=1h") template TEXT, retry INTEGER DEFAULT 0, -- threshold of times to retry status TEXT, -- phase status info (warnings, errors, validation messages) - PRIMARY KEY (workflow_file_path, task) + PRIMARY KEY (file_path, task) ); -- Task relationships are generated dynamically from workflow_phases @@ -50,7 +50,7 @@ CREATE INDEX idx_workflow_phases_status ON workflow_phases (status); - Easy to identify and debug workflow issues ### 2. Composite Primary Keys for Phases -- `(workflow_file_path, task_name, job_name)` uniquely identifies each phase +- `(file_path, task_name, job_name)` uniquely identifies each phase - Directly readable: `("workflows/data-load.toml", "data-load", "hourly")` - No surrogate IDs to remember or map @@ -80,25 +80,25 @@ CREATE INDEX idx_workflow_phases_status ON workflow_phases (status); -- Find all phases in a specific workflow file SELECT task, depends_on, rule, status FROM workflow_phases -WHERE workflow_file_path = 'workflows/data-load.toml'; +WHERE file_path = 'workflows/data-load.toml'; -- Find phases that depend on a specific task type -SELECT workflow_file_path, task, rule, status +SELECT file_path, task, rule, status FROM workflow_phases WHERE depends_on = 'data-load'; -- Find phases by topic (using LIKE for topic:job matching) -SELECT workflow_file_path, task, depends_on, rule, status +SELECT file_path, task, depends_on, rule, status FROM workflow_phases WHERE task LIKE 'data-load:%'; -- Find phases with warnings or errors -SELECT workflow_file_path, task, status +SELECT file_path, task, status FROM workflow_phases WHERE status IS NOT NULL AND status != ''; -- Find phases with specific status messages -SELECT workflow_file_path, task, status +SELECT file_path, task, status FROM workflow_phases WHERE status LIKE '%warning%' OR status LIKE '%error%'; @@ -106,7 +106,7 @@ WHERE status LIKE '%warning%' OR status LIKE '%error%'; SELECT parent.depends_on as parent_task, parent.task as child_task, - parent.workflow_file_path, + parent.file_path, parent.rule as child_rule, parent.status as child_status FROM workflow_phases parent @@ -115,7 +115,7 @@ WHERE parent.depends_on IS NOT NULL AND parent.depends_on != ''; -- Find all children of a specific task SELECT child.task as child_task, - child.workflow_file_path, + child.file_path, child.rule as child_rule, child.status as child_status FROM workflow_phases child @@ -124,7 +124,7 @@ WHERE child.depends_on = 'data-load'; -- Find all parents of a specific task SELECT parent.depends_on as parent_task, - parent.workflow_file_path, + parent.file_path, parent.rule as parent_rule, parent.status as parent_status FROM workflow_phases parent @@ -138,7 +138,7 @@ SELECT COUNT(wp.task) as phase_count, COUNT(CASE WHEN wp.status IS NOT NULL AND wp.status != '' THEN 1 END) as phases_with_status FROM workflow_files wf -LEFT JOIN workflow_phases wp ON wf.file_path = wp.workflow_file_path +LEFT JOIN workflow_phases wp ON wf.file_path = wp.file_path GROUP BY wf.file_path; ``` @@ -246,21 +246,21 @@ type SQLite struct { // Add workflow methods to existing SQLite struct func (s *SQLite) Search(task, job string) (path string, ph Phase) { - // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry, status + // Query: SELECT file_path, task, depends_on, rule, template, retry, status // FROM workflow_phases WHERE task = ? OR task LIKE ? // (where ? is either exact match or topic:job format) // Return same results as before, with status info available } func (s *SQLite) Get(t task.Task) Phase { - // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry, status + // Query: SELECT file_path, task, depends_on, rule, template, retry, status // FROM workflow_phases WHERE task = ? OR task LIKE ? // (where ? is either exact match or topic:job format) // Return same Phase struct with status info } func (s *SQLite) Children(t task.Task) []Phase { - // Query: SELECT workflow_file_path, task, depends_on, rule, template, retry, status + // Query: SELECT file_path, task, depends_on, rule, template, retry, status // FROM workflow_phases WHERE depends_on = ? OR depends_on LIKE ? // (where ? matches the task type or topic:job format) // Return same []Phase slice with status info diff --git a/apps/flowlord/cache/schema.sql b/apps/flowlord/cache/schema.sql index b536735..760b29f 100644 --- a/apps/flowlord/cache/schema.sql +++ b/apps/flowlord/cache/schema.sql @@ -93,16 +93,15 @@ CREATE TABLE IF NOT EXISTS workflow_files ( -- Workflow phases (matches Phase struct exactly) CREATE TABLE IF NOT EXISTS workflow_phases ( - workflow_file_path TEXT NOT NULL, + file_path TEXT NOT NULL, task TEXT NOT NULL, -- topic:job format (e.g., "data-load:hourly") depends_on TEXT, rule TEXT, -- URI query parameters (e.g., "cron=0 0 * * *&offset=1h") template TEXT, - retry INTEGER DEFAULT 0, -- threshold of times to retry + retry integer DEFAULT 0, -- number of retries (default 0) status TEXT -- phase status info (warnings, errors, validation messages) ); -- Indexes for performance CREATE INDEX IF NOT EXISTS idx_workflow_phases_task ON workflow_phases (task); -CREATE INDEX IF NOT EXISTS idx_workflow_phases_depends_on ON workflow_phases (depends_on); -CREATE INDEX IF NOT EXISTS idx_workflow_phases_status ON workflow_phases (status); \ No newline at end of file +CREATE INDEX IF NOT EXISTS idx_workflow_phases_depends_on ON workflow_phases (depends_on); \ No newline at end of file diff --git a/apps/flowlord/cache/sqlite.go b/apps/flowlord/cache/sqlite.go index 60ae671..a57a066 100644 --- a/apps/flowlord/cache/sqlite.go +++ b/apps/flowlord/cache/sqlite.go @@ -51,14 +51,31 @@ func (o *SQLite) Open(workflowPath string, fOpts *file.Options) error { if o.TaskTTL < time.Hour { o.TaskTTL = time.Hour } + if o.db == nil { + if err := o.initDB(); err != nil { + return err + } +} + + // Determine if workflow path is a directory + sts, err := file.Stat(workflowPath, fOpts) + if err != nil { + return fmt.Errorf("problem with workflow path %s %w", workflowPath, err) + } + o.isDir = sts.IsDir + _, err = o.Refresh() - backupSts, _ := file.Stat(o.BackupPath, fOpts) - localSts, _ := file.Stat(o.LocalPath, fOpts) + return err +} + +func (o *SQLite) initDB() error { + backupSts, _ := file.Stat(o.BackupPath, o.fOpts) + localSts, _ := file.Stat(o.LocalPath, o.fOpts) if localSts.Size == 0 && backupSts.Size > 0 { log.Printf("Restoring local DB from backup %s", o.BackupPath) // no local file but backup exists so copy it down - if err := copyFiles(o.BackupPath, o.LocalPath, fOpts); err != nil { + if err := copyFiles(o.BackupPath, o.LocalPath, o.fOpts); err != nil { log.Println(err) // TODO: should this be fatal? } } @@ -89,17 +106,7 @@ func (o *SQLite) Open(workflowPath string, fOpts *file.Options) error { if err != nil { return err } - - //TODO: load workflow file into the database - // Determine if workflow path is a directory - sts, err := file.Stat(workflowPath, fOpts) - if err != nil { - return fmt.Errorf("problem with workflow path %s %w", workflowPath, err) - } - o.isDir = sts.IsDir - _, err = o.Refresh() - - return err + return nil } func copyFiles(src, dst string, fOpts *file.Options) error { diff --git a/apps/flowlord/cache/workflow.go b/apps/flowlord/cache/workflow.go index 0c4d274..e8ad986 100644 --- a/apps/flowlord/cache/workflow.go +++ b/apps/flowlord/cache/workflow.go @@ -11,6 +11,7 @@ import ( "github.com/hydronica/toml" "github.com/jbsmith7741/go-tools/appenderr" "github.com/pcelvng/task" + "github.com/robfig/cron/v3" "github.com/pcelvng/task-tools/file" "github.com/pcelvng/task-tools/workflow" @@ -25,6 +26,19 @@ type Phase struct { Template string // template used to create the task } +type PhaseDB struct { + Phase + FilePath string // workflow file path + Status string // status of the phase (e.g. valid, invalid, warning) +} + +func (p PhaseDB) Topic() string { + return p.Phase.Topic() +} +func (p PhaseDB) Job() string { + return p.Phase.Job() +} + func (p Phase) IsEmpty() bool { return p.Task == "" && p.Rule == "" && p.DependsOn == "" && p.Template == "" } @@ -49,6 +63,7 @@ func (p Phase) Topic() string { return s[0] } +// Deprecated: // ToWorkflowPhase converts cache.Phase to workflow.Phase func (p Phase) ToWorkflowPhase() workflow.Phase { return workflow.Phase{ @@ -79,22 +94,22 @@ func (s *SQLite) Search(task, job string) (path string, ph Phase) { if s == nil { return "", Phase{} } - + s.mu.Lock() defer s.mu.Unlock() - + // Query for phases matching the task topic query := ` - SELECT workflow_file_path, task, depends_on, rule, template, retry, status + SELECT file_path, task, depends_on, rule, template, retry, status FROM workflow_phases WHERE task LIKE ? OR task = ? - ORDER BY workflow_file_path, task + ORDER BY file_path, task LIMIT 1 ` - + var rows *sql.Rows var err error - + if job == "" { // Search by topic only rows, err = s.db.Query(query, task+":%", task) @@ -102,21 +117,21 @@ func (s *SQLite) Search(task, job string) (path string, ph Phase) { // Search by topic:job rows, err = s.db.Query(query, task+":"+job, task+":"+job) } - + if err != nil { return "", Phase{} } defer rows.Close() - + if rows.Next() { var workflowPath, taskStr, dependsOn, rule, template, status string var retry int - + err := rows.Scan(&workflowPath, &taskStr, &dependsOn, &rule, &template, &retry, &status) if err != nil { return "", Phase{} } - + // Check if job matches if specified if job != "" { phase := Phase{ @@ -130,7 +145,7 @@ func (s *SQLite) Search(task, job string) (path string, ph Phase) { return "", Phase{} } } - + return workflowPath, Phase{ Task: taskStr, Rule: rule, @@ -139,121 +154,76 @@ func (s *SQLite) Search(task, job string) (path string, ph Phase) { Template: template, } } - + return "", Phase{} } +// ResetWorkflow deletes all phases in the db files +// TODO: May not be needed if Refresh() is used properly. +func (s *SQLite) resetWorkflow() error { + if s == nil { + return fmt.Errorf("sqlite cache is nil") + } + + s.mu.Lock() + defer s.mu.Unlock() + + // Delete all phases for this workflow file + _, err := s.db.Exec("DELETE FROM workflow_phases ") + if err != nil { + return fmt.Errorf("failed to delete workflow phases: %w", err) + } + + // Delete the workflow file record + _, err = s.db.Exec("DELETE FROM workflow_files") + if err != nil { + return fmt.Errorf("failed to delete workflow file record: %w", err) + } + + return nil +} + // Get the Phase associated with the task // looks for matching phases within a workflow defined in meta // that matches the task Type and job. -func (s *SQLite) Get(t task.Task) Phase { +func (s *SQLite) Get(t task.Task) PhaseDB { s.mu.Lock() defer s.mu.Unlock() - + values, _ := url.ParseQuery(t.Meta) - key := values.Get("workflow") + //key := values.Get("workflow") job := t.Job if job == "" { job = values.Get("job") } - - if key == "*" { // search all workflows for first match - query := ` - SELECT workflow_file_path, task, depends_on, rule, template, retry, status + key := t.Type + if job != "" { + key += ":" + job + } + + query := ` + SELECT file_path, task, depends_on, rule, template, retry, status FROM workflow_phases - WHERE task LIKE ? OR task = ? - ORDER BY workflow_file_path, task + WHERE task = ? + ORDER BY file_path, task LIMIT 1 ` - - rows, err := s.db.Query(query, t.Type+":%", t.Type) - if err != nil { - return Phase{} - } - defer rows.Close() - - if rows.Next() { - var workflowPath, taskStr, dependsOn, rule, template, status string - var retry int - - err := rows.Scan(&workflowPath, &taskStr, &dependsOn, &rule, &template, &retry, &status) - if err != nil { - return Phase{} - } - - // Check if job matches if specified - if job != "" { - phase := Phase{ - Task: taskStr, - Rule: rule, - DependsOn: dependsOn, - Retry: retry, - Template: template, - } - if phase.Job() != job { - return Phase{} - } - } - - return Phase{ - Task: taskStr, - Rule: rule, - DependsOn: dependsOn, - Retry: retry, - Template: template, - } - } - return Phase{} - } - - // Search within specific workflow - query := ` - SELECT task, depends_on, rule, template, retry, status - FROM workflow_phases - WHERE workflow_file_path = ? AND (task LIKE ? OR task = ?) - ORDER BY task - LIMIT 1 - ` - - rows, err := s.db.Query(query, key, t.Type+":%", t.Type) + rows, err := s.db.Query(query, key) if err != nil { - return Phase{} + return PhaseDB{Status: err.Error()} } defer rows.Close() - + if rows.Next() { - var taskStr, dependsOn, rule, template, status string - var retry int - - err := rows.Scan(&taskStr, &dependsOn, &rule, &template, &retry, &status) + ph := PhaseDB{} + + err := rows.Scan(&ph.FilePath, &ph.Task, &ph.DependsOn, &ph.Rule, &ph.Template, &ph.Retry, &ph.Status) if err != nil { - return Phase{} - } - - // Check if job matches if specified - if job != "" { - phase := Phase{ - Task: taskStr, - Rule: rule, - DependsOn: dependsOn, - Retry: retry, - Template: template, - } - if phase.Job() != job { - return Phase{} - } - } - - return Phase{ - Task: taskStr, - Rule: rule, - DependsOn: dependsOn, - Retry: retry, - Template: template, + return PhaseDB{Status: err.Error()} } + return ph } - - return Phase{} + return PhaseDB{Status: "not found"} } // Children of the given task t, a child phase is one that dependsOn another task @@ -263,46 +233,46 @@ func (s *SQLite) Get(t task.Task) Phase { func (s *SQLite) Children(t task.Task) []Phase { s.mu.Lock() defer s.mu.Unlock() - + if t.Type == "" { return nil } - + values, _ := url.ParseQuery(t.Meta) key := values.Get("workflow") job := t.Job if job == "" { job = values.Get("job") } - + if key == "" { return nil } - + // Find phases that depend on this task query := ` - SELECT task, depends_on, rule, template, retry, status + SELECT task, depends_on, rule, template, retry, status FROM workflow_phases - WHERE workflow_file_path = ? AND (depends_on LIKE ? OR depends_on = ?) + WHERE file_path = ? AND (depends_on LIKE ? OR depends_on = ?) ORDER BY task ` - + rows, err := s.db.Query(query, key, t.Type+":%", t.Type) if err != nil { return nil } defer rows.Close() - + var result []Phase for rows.Next() { var taskStr, dependsOn, rule, template, status string var retry int - + err := rows.Scan(&taskStr, &dependsOn, &rule, &template, &retry, &status) if err != nil { continue } - + // Parse depends_on to check if it matches the task v := strings.Split(dependsOn, ":") depends := v[0] @@ -310,7 +280,7 @@ func (s *SQLite) Children(t task.Task) []Phase { if len(v) > 1 { j = v[1] } - + if depends == t.Type { if j == "" || j == job { result = append(result, Phase{ @@ -323,7 +293,7 @@ func (s *SQLite) Children(t task.Task) []Phase { } } } - + return result } @@ -336,13 +306,13 @@ func (s *SQLite) Refresh() (changedFiles []string, err error) { } return changedFiles, err } - + // List and read all files allFiles, err := listAllFiles(s.workflowPath, s.fOpts) if err != nil { return changedFiles, err } - + errs := appenderr.New() for _, filePath := range allFiles { f, err := s.loadFile(filePath, s.fOpts) @@ -353,9 +323,9 @@ func (s *SQLite) Refresh() (changedFiles []string, err error) { changedFiles = append(changedFiles, f) } } - + // Remove deleted workflows from database - for key := range s.GetWorkflowFiles() { + for _, key := range s.GetWorkflowFiles() { found := false for _, v := range allFiles { f := s.filePath(v) @@ -365,11 +335,11 @@ func (s *SQLite) Refresh() (changedFiles []string, err error) { } } if !found { - s.removeWorkflow(key) + errs.Add(s.removeWorkflow(key)) changedFiles = append(changedFiles, "-"+key) } } - + return changedFiles, errs.ErrOrNil() } @@ -409,13 +379,13 @@ func (s *SQLite) loadFile(path string, opts *file.Options) (f string, err error) if sts.IsDir { return "", fmt.Errorf("can not read directory %s", path) } - + // Check if file has changed by comparing checksum existingHash := s.getFileHash(f) if existingHash == sts.Checksum { return "", nil // No changes } - + // Read and parse the workflow file r, err := file.NewReader(path, opts) if err != nil { @@ -425,18 +395,18 @@ func (s *SQLite) loadFile(path string, opts *file.Options) (f string, err error) if err != nil { return "", fmt.Errorf("read-all: %s %w", path, err) } - + var workflow Workflow if _, err := toml.Decode(string(b), &workflow); err != nil { return "", fmt.Errorf("decode: %s %w", string(b), err) } - + // Update database with new workflow data err = s.updateWorkflowInDB(f, sts.Checksum, workflow.Phases) if err != nil { return "", fmt.Errorf("update workflow in db: %w", err) } - + return f, nil } @@ -455,8 +425,9 @@ func (s *SQLite) filePath(p string) (filePath string) { // getFileHash retrieves the current hash for a workflow file func (s *SQLite) getFileHash(filePath string) string { + path := s.filePath(filePath) var hash string - err := s.db.QueryRow("SELECT file_hash FROM workflow_files WHERE file_path = ?", filePath).Scan(&hash) + err := s.db.QueryRow("SELECT file_hash FROM workflow_files WHERE file_path = ?", path).Scan(&hash) if err != nil { return "" } @@ -464,69 +435,58 @@ func (s *SQLite) getFileHash(filePath string) string { } // GetWorkflowFiles returns a map of all workflow files in the database -func (s *SQLite) GetWorkflowFiles() map[string]bool { - files := make(map[string]bool) +func (s *SQLite) GetWorkflowFiles() []string { + files := make([]string, 0) rows, err := s.db.Query("SELECT file_path FROM workflow_files") if err != nil { return files } defer rows.Close() - + for rows.Next() { var filePath string if err := rows.Scan(&filePath); err == nil { - files[filePath] = true + files = append(files, filePath) } } return files } // GetPhasesForWorkflow returns all phases for a specific workflow file -func (s *SQLite) GetPhasesForWorkflow(filePath string) ([]Phase, error) { +func (s *SQLite) GetPhasesForWorkflow(filePath string) ([]PhaseDB, error) { rows, err := s.db.Query(` - SELECT task, depends_on, rule, template, retry, status + SELECT file_path, task, depends_on, rule, template, retry, status FROM workflow_phases - WHERE workflow_file_path = ? + WHERE file_path = ? ORDER BY task `, filePath) if err != nil { return nil, err } defer rows.Close() - - var phases []Phase + + var phases []PhaseDB for rows.Next() { - var taskStr, dependsOn, rule, template, status string - var retry int - - err := rows.Scan(&taskStr, &dependsOn, &rule, &template, &retry, &status) + ph := PhaseDB{} + + err := rows.Scan(&ph.FilePath, &ph.Task, &ph.DependsOn, &ph.Rule, &ph.Template, &ph.Retry, &ph.Status) if err != nil { continue } - - phases = append(phases, Phase{ - Task: taskStr, - Rule: rule, - DependsOn: dependsOn, - Retry: retry, - Template: template, - }) + + phases = append(phases, ph) } - + return phases, nil } // updateWorkflowInDB updates the workflow data in the database func (s *SQLite) updateWorkflowInDB(filePath, checksum string, phases []Phase) error { - // Start transaction - tx, err := s.db.Begin() - if err != nil { - return err - } - defer tx.Rollback() - + s.mu.Lock() + defer s.mu.Unlock() + // Update or insert workflow file record - _, err = tx.Exec(` + _, err := s.db.Exec(` INSERT INTO workflow_files (file_path, file_hash, loaded_at, last_modified, is_active) VALUES (?, ?, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP, TRUE) ON CONFLICT (file_path) DO UPDATE SET @@ -538,26 +498,31 @@ func (s *SQLite) updateWorkflowInDB(filePath, checksum string, phases []Phase) e if err != nil { return err } - + // Remove existing phases for this workflow - _, err = tx.Exec("DELETE FROM workflow_phases WHERE workflow_file_path = ?", filePath) + _, err = s.db.Exec("DELETE FROM workflow_phases WHERE file_path = ?", filePath) if err != nil { return err } - + // Insert new phases for _, phase := range phases { + task := phase.Task + if !strings.Contains(task, ":") && phase.Job() != "" { + task = task + ":" + phase.Job() + } + phase.Task = task status := s.validatePhase(phase) - _, err = tx.Exec(` - INSERT INTO workflow_phases (workflow_file_path, task, depends_on, rule, template, retry, status) + _, err = s.db.Exec(` + INSERT INTO workflow_phases (file_path, task, depends_on, rule, template, retry, status) VALUES (?, ?, ?, ?, ?, ?, ?) `, filePath, phase.Task, phase.DependsOn, phase.Rule, phase.Template, phase.Retry, status) if err != nil { return err } } - - return tx.Commit() + + return nil } // removeWorkflow removes a workflow and its phases from the database @@ -568,49 +533,48 @@ func (s *SQLite) removeWorkflow(filePath string) error { return err } defer tx.Rollback() - + // Remove phases first - _, err = tx.Exec("DELETE FROM workflow_phases WHERE workflow_file_path = ?", filePath) + _, err = tx.Exec("DELETE FROM workflow_phases WHERE file_path = ?", filePath) if err != nil { return err } - + // Remove workflow file record _, err = tx.Exec("DELETE FROM workflow_files WHERE file_path = ?", filePath) if err != nil { return err } - + return tx.Commit() } // validatePhase validates a phase and returns status message func (s *SQLite) validatePhase(phase Phase) string { + + values, err := url.ParseQuery(phase.Rule) + if err != nil { + return fmt.Sprintf("invalid rule format: %s", phase.Rule) + } + // Basic validation logic - if phase.Rule == "" && phase.DependsOn == "" { - return "invalid phase: rule and dependsOn are blank" + if phase.DependsOn == "" && values.Get("cron") == "" && values.Get("files") == "" { + return "non-scheduled phase must have depends_on, cron or files rule" } - + // Check for valid cron rule - if phase.Rule != "" { - values, err := url.ParseQuery(phase.Rule) - if err != nil { - return fmt.Sprintf("invalid rule format: %s", phase.Rule) - } - - cron := values.Get("cron") - if cron != "" { - // Basic cron validation (could be enhanced) - if cron == "invalid" { - return fmt.Sprintf("no valid rule found: cron=%s", cron) - } + + if c := values.Get("cron"); c != "" { + if _, err := cron.ParseStandard(c); err != nil { + return fmt.Sprintf("invalid cron rule: %s", c) } + } - + // Check retry count if phase.Retry > 10 { return "warning: retry count exceeds recommended limit" } - + return "" // No issues } diff --git a/apps/flowlord/cache/workflow_test.go b/apps/flowlord/cache/workflow_test.go new file mode 100644 index 0000000..eb3c0f3 --- /dev/null +++ b/apps/flowlord/cache/workflow_test.go @@ -0,0 +1,307 @@ +package cache + +import ( + "errors" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/hydronica/toml" + "github.com/hydronica/trial" + "github.com/pcelvng/task" +) + +func MockSQLite() *SQLite { + s := &SQLite{LocalPath: ":memory:"} + if err := s.initDB(); err != nil { + panic(err) + } + return s +} + +const testPath = "../../../internal/test/" + +func TestLoadFile(t *testing.T) { + fn := func(in string) (string, error) { + cache := MockSQLite() + _, err := cache.loadFile(in, nil) + return cache.getFileHash(in), err + } + cases := trial.Cases[string, string]{ + "read file": { + Input: testPath + "workflow/f1.toml", + Expected: "4422274d9c9f7e987c609687a7702651", // checksum of test file + }, + "stat error": { + Input: "nop://stat_err", + ExpectedErr: errors.New("nop stat error"), + }, + "dir error": { + Input: "nop://stat_dir", + ExpectedErr: errors.New("can not read directory"), + }, + "read err": { + Input: "nop://init_err", + ExpectedErr: errors.New("new reader"), + }, + "decode error": { + Input: testPath + "invalid.toml", + ExpectedErr: errors.New("decode"), + }, + } + trial.New(fn, cases).SubTest(t) +} + +// TestLoadPhase is used to validated that a phase is correctly loaded into the DB +func TestLoadPhase(t *testing.T) { + fn := func(ph Phase) (PhaseDB, error) { + cache := MockSQLite() + if err := cache.updateWorkflowInDB("test.toml", "NA", []Phase{ph}); err != nil { + return PhaseDB{}, err + } + return cache.Get(task.Task{Type: ph.Task, Meta: "workflow=test.toml"}), nil + } + cases := trial.Cases[Phase, PhaseDB]{} + trial.New(fn, cases).SubTest(t) +} + +func TestToml(t *testing.T) { + v := ` +[[phase]] +task = "task1" +rule = "cron=0 * * * *&offset=-4h&job=t2&retry_delay=10ms" +retry = 3 +template = "?date={yyyy}-{mm}-{dd}T{hh}" + +[[phase]] +task = "task2" +dependsOn = "task1" +rule = "" +retry = 3 +template = "{meta:file}?time={yyyy}-{mm}-{dd}" +` + w := &Workflow{} + + if _, err := toml.Decode(v, w); err != nil { + t.Fatalf(err.Error()) + } + if len(w.Phases) != 2 { + t.Errorf("Expected 2 phases got %d", len(w.Phases)) + t.Log(spew.Sdump(w.Phases)) + } + +} + +func TestRefresh(t *testing.T) { + type Cache struct { + path string + isDir bool + Workflows map[string]Workflow + } + + fn := func(c *Cache) (int, error) { + sqlite := MockSQLite() + for path, workflow := range c.Workflows { + if err := sqlite.updateWorkflowInDB(path, workflow.Checksum, workflow.Phases); err != nil { + return 0, err + } + } + sqlite.workflowPath = c.path + sqlite.isDir = c.isDir + _, err := sqlite.Refresh() + return len(sqlite.GetWorkflowFiles()), err + } + cases := trial.Cases[*Cache, int]{ + "single file": { + Input: &Cache{path: testPath + "workflow/f1.toml"}, + Expected: 1, // load 1 file + }, + "folder": { + Input: &Cache{path: testPath + "workflow", isDir: true}, + Expected: 4, // load folder with 2 files + }, + "sub-folder": { + Input: &Cache{path: testPath + "parent", isDir: true}, + Expected: 2, // load folder with 1 files and sub-folder with 1 file + }, + "error case": { + Input: &Cache{path: "nop://err", isDir: true}, + ShouldErr: true, + }, + "file removed": { + Input: &Cache{ + path: testPath + "workflow", + isDir: true, + Workflows: map[string]Workflow{ + "missing.toml": {}, + "f1.toml": {}, + "f2.toml": {}, + "f3.toml": {}, + }, + }, + Expected: 4, + }, + "keep loaded": { + Input: &Cache{ + path: testPath + "workflow", + isDir: true, + Workflows: map[string]Workflow{ + "f1.toml": { + Checksum: "34cf5142fbd029fa778ee657592d03ce", + }, + "f2.toml": { + Checksum: "eac7716a13d9dea0d630c5d8b1e6c6b1", + }, + }, + }, + Expected: 4, + }, + } + trial.New(fn, cases).SubTest(t) +} + +func TestGet(t *testing.T) { + cache := MockSQLite() + err := cache.updateWorkflowInDB("workflow.toml", "NA", []Phase{ + {Task: "task1"}, + {Task: "dup"}, + {Task: "task2", DependsOn: "task1"}, + {Task: "task3", DependsOn: "task2"}, + {Task: "task4", DependsOn: "task2"}, + }) + if err != nil { + t.Fatal(err) + } + err = cache.updateWorkflowInDB("w2job.toml", "NA", []Phase{ + {Task: "dup"}, + {Task: "t2", Rule: "job=j1"}, + {Task: "t2", Rule: "job=j2"}, + {Task: "t2:j3", Rule: ""}, + }) + if err != nil { + t.Fatal(err) + } + fn := func(t task.Task) (Phase, error) { + return cache.Get(t).Phase, nil + } + cases := trial.Cases[task.Task, Phase]{ + "no meta": { + Input: task.Task{Type: "task1"}, + Expected: Phase{Task: "task1"}, + }, + "blank task": { + Input: task.Task{Meta: "workflow=workflow.toml"}, + Expected: Phase{}, + }, + "not found": { + Input: task.Task{Type: "missing", Meta: "workflow=workflow.toml"}, + Expected: Phase{}, + }, + "task_job": { + Input: task.Task{Type: "t2", Job: "j2", Meta: "workflow=*"}, + Expected: Phase{Rule: "job=j2", Task: "t2:j2"}, + }, + "task2": { + Input: task.Task{Type: "task2", Meta: "workflow=workflow.toml"}, + Expected: Phase{Task: "task2", DependsOn: "task1"}, + }, + "task=t2 with job=j1": { + Input: task.Task{Type: "t2", Meta: "workflow=w2job.toml&job=j1"}, + Expected: Phase{Task: "t2:j1", Rule: "job=j1"}, + }, + "job does not exist": { + Input: task.Task{Type: "t2", Meta: "workflow=w2job.toml&job=invalid"}, + Expected: Phase{}, + }, + "wildcard search": { + Input: task.Task{Type: "t2", Meta: "workflow=*&job=j3"}, + Expected: Phase{Task: "t2:j3"}, + }, + "wildcard with same task in different files": { // picks first match, results will vary + Input: task.Task{Type: "dup", Meta: "workflow=*"}, + Expected: Phase{Task: "dup"}, + }, + } + trial.New(fn, cases).SubTest(t) +} + +func TestChildren(t *testing.T) { + cache := MockSQLite() + err := cache.updateWorkflowInDB("workflow.toml", "NA", []Phase{ + {Task: "task1"}, + {Task: "task2", DependsOn: "task1"}, + {Task: "task3", DependsOn: "task2"}, + {Task: "task4", DependsOn: "task2"}, + {Task: "task5", DependsOn: "task1:j4"}, + }) + if err != nil { + t.Fatal(err) + } + fn := func(t task.Task) ([]Phase, error) { + return cache.Children(t), nil + } + cases := trial.Cases[task.Task, []Phase]{ + "no meta": { + Input: task.Task{Type: "task1"}, + Expected: []Phase(nil), + }, + "blank task": { + Input: task.Task{Meta: "workflow=workflow.toml"}, + Expected: []Phase(nil), + }, + "task1": { + Input: task.Task{Type: "task1", Meta: "workflow=workflow.toml"}, + Expected: []Phase{{Task: "task2", DependsOn: "task1"}}, + }, + "task2": { + Input: task.Task{Type: "task2", Meta: "workflow=workflow.toml"}, + Expected: []Phase{{Task: "task3", DependsOn: "task2"}, {Task: "task4", DependsOn: "task2"}}, + }, + "task3": { + Input: task.Task{Type: "task3", Meta: "workflow=workflow.toml"}, + Expected: []Phase{}, + }, + "task4": { + Input: task.Task{Type: "task4", Meta: "workflow=workflow.toml"}, + Expected: []Phase{}, + }, + "task1:j4": { + Input: task.Task{Type: "task1", Meta: "workflow=workflow.toml&job=j4"}, + Expected: []Phase{ + {Task: "task2", DependsOn: "task1"}, + {Task: "task5", DependsOn: "task1:j4"}, + }, + }, + } + trial.New(fn, cases).SubTest(t) +} + +func TestCache_FilePath(t *testing.T) { + // TODO: remove receive struct as it is unneeded + fn := func(v trial.Input) (string, error) { + c := &SQLite{workflowPath: v.Slice(0).String()} + return c.filePath(v.Slice(1).String()), nil + } + cases := trial.Cases[trial.Input, string]{ + "single file": { + Input: trial.Args("./path", "./path/file.toml"), + Expected: "file.toml", + }, + "same name": { + Input: trial.Args("./path/file.toml", "./path/file.toml"), + Expected: "file.toml", + }, + "sub directory": { + Input: trial.Args("./path", "./path/sub/file.toml"), + Expected: "sub/file.toml", + }, + "embedded": { + Input: trial.Args("./path", "root/folder/path/file.toml"), + Expected: "file.toml", + }, + "embedded sub": { + Input: trial.Args("./path", "root/path/sub/file.toml"), + Expected: "sub/file.toml", + }, + } + trial.New(fn, cases).SubTest(t) +} diff --git a/apps/flowlord/handler.go b/apps/flowlord/handler.go index 3d35519..c7b74be 100644 --- a/apps/flowlord/handler.go +++ b/apps/flowlord/handler.go @@ -115,14 +115,14 @@ func (tm *taskMaster) Info(w http.ResponseWriter, r *http.Request) { // create a copy of all workflows wCache := make(map[string]map[string]workflow.Phase) // [file][task:job]Phase workflowFiles := tm.taskCache.GetWorkflowFiles() - for filePath := range workflowFiles { + for _, filePath := range workflowFiles { phases, err := tm.taskCache.GetPhasesForWorkflow(filePath) if err != nil { continue } phaseMap := make(map[string]workflow.Phase) for _, j := range phases { - phaseMap[pName(j.Topic(), j.Job())] = j.ToWorkflowPhase() + phaseMap[pName(j.Phase.Topic(), j.Phase.Job())] = j.ToWorkflowPhase() } wCache[filePath] = phaseMap } @@ -649,35 +649,23 @@ func taskHTML(tasks []cache.TaskView, date time.Time, taskType, job, result stri } // workflowHTML renders the workflow phases HTML page -func workflowHTML(cache *cache.SQLite) []byte { +func workflowHTML(tCache *cache.SQLite) []byte { // Get all workflow files and their phases - workflowFiles := cache.GetWorkflowFiles() - - var allPhases []WorkflowPhaseView + workflowFiles := tCache.GetWorkflowFiles() + workflowFileSummary := make(map[string]int) - - for filePath := range workflowFiles { - phases, err := cache.GetPhasesForWorkflow(filePath) + allPhases := make([]cache.PhaseDB, 0) + + for _, filePath := range workflowFiles { + phases, err := tCache.GetPhasesForWorkflow(filePath) if err != nil { continue } - + workflowFileSummary[filePath] = len(phases) - - for _, phase := range phases { - allPhases = append(allPhases, WorkflowPhaseView{ - WorkflowFile: filePath, - Task: phase.Topic(), - Job: phase.Job(), - Rule: phase.Rule, - DependsOn: phase.DependsOn, - Retry: phase.Retry, - Template: phase.Template, - Status: "", // TODO: Get status from database - }) - } + allPhases = append(allPhases, phases...) } - + data := map[string]interface{}{ "Phases": allPhases, "WorkflowFileSummary": workflowFileSummary, @@ -861,18 +849,6 @@ type response struct { code int } -// WorkflowPhaseView represents a workflow phase for display in the web interface -type WorkflowPhaseView struct { - WorkflowFile string - Task string - Job string - Rule string - DependsOn string - Retry int - Template string - Status string -} - func (tm *taskMaster) backload(req request) response { // handle start and end date at := parseTime(req.At) diff --git a/apps/flowlord/handler/workflow.tmpl b/apps/flowlord/handler/workflow.tmpl index 1427362..234d2db 100644 --- a/apps/flowlord/handler/workflow.tmpl +++ b/apps/flowlord/handler/workflow.tmpl @@ -58,7 +58,7 @@ Workflow File - Task + Topic Job Rule Depends On @@ -70,8 +70,8 @@ {{range .Phases}} - {{.WorkflowFile}} - {{.Task}} + {{.FilePath}} + {{.Topic}} {{.Job}} {{if ge (len .Template) 80}}{{slice .Template 0 80}}...{{else}}{{.Template}}{{end}} - - {{if .Status}}{{.Status}}{{else}}OK{{end}} + + + {{if .Status}}{{.Status}}{{else}}OK{{end}} + {{end}} diff --git a/apps/flowlord/handler_test.go b/apps/flowlord/handler_test.go index 7308b1f..17833a1 100644 --- a/apps/flowlord/handler_test.go +++ b/apps/flowlord/handler_test.go @@ -17,11 +17,10 @@ import ( const testPath = "../../internal/test" func TestMain(t *testing.M) { - staticPath = "./static" - t.Run() - os.Remove(":memory") -} + staticPath = "./static" + t.Run() +} // loadTaskViewData loads TaskView data from a JSON file func loadTaskViewData(filename string) ([]cache.TaskView, error) { @@ -40,8 +39,8 @@ func loadTaskViewData(filename string) ([]cache.TaskView, error) { } func TestBackloader(t *testing.T) { - sqlDB := &cache.SQLite{LocalPath: ":memory"} - err := sqlDB.Open(testPath+"/workflow/f3.toml", nil ) + sqlDB := &cache.SQLite{LocalPath: ":memory:"} + err := sqlDB.Open(testPath+"/workflow/f3.toml", nil) //cache, err := workflow.New(testPath+"/workflow/f3.toml", nil) today := time.Now().Format("2006-01-02") toHour := time.Now().Format(DateHour) @@ -426,7 +425,6 @@ func TestAlertHTML(t *testing.T) { } - // TestFilesHTML generate a html file based on the files.tmpl it is used for vision examination of the files func TestFilesHTML(t *testing.T) { // Create sample file messages @@ -456,15 +454,15 @@ func TestFilesHTML(t *testing.T) { date := time.Date(2024, 1, 15, 0, 0, 0, 0, time.UTC) html := filesHTML(files, date) - // Write HTML to a file for easy viewing - outputFile := "handler/files_preview.html" - err := os.WriteFile(outputFile, html, 0644) - if err != nil { - t.Fatalf("Failed to write HTML file: %v", err) - } - - t.Logf("Alert preview generated and saved to: ./%s", outputFile) - + // Write HTML to a file for easy viewing + outputFile := "handler/files_preview.html" + err := os.WriteFile(outputFile, html, 0644) + if err != nil { + t.Fatalf("Failed to write HTML file: %v", err) + } + + t.Logf("Alert preview generated and saved to: ./%s", outputFile) + // Basic checks if len(html) == 0 { t.Error("Expected HTML output, got empty") @@ -472,7 +470,6 @@ func TestFilesHTML(t *testing.T) { } - func TestTaskHTML(t *testing.T) { // Load TaskView data from JSON file testTasks, err := loadTaskViewData("test/tasks.json") @@ -504,8 +501,8 @@ func TestTaskHTML(t *testing.T) { func TestWorkflowHTML(t *testing.T) { // Load workflow files - taskCache := &cache.SQLite{LocalPath: ":memory"} - if err := taskCache.Open(testPath+"/workflow/", nil); err != nil { + taskCache := &cache.SQLite{LocalPath: ":memory:"} + if err := taskCache.Open(testPath+"/workflow/", nil); err != nil { t.Fatalf("Failed to create test cache: %v", err) } @@ -528,18 +525,17 @@ func TestWorkflowHTML(t *testing.T) { } - func TestAboutHTML(t *testing.T) { // Create a real SQLite cache for testing - taskCache := &cache.SQLite{LocalPath: ":memory"} - if err := taskCache.Open(testPath+"/workflow/", nil); err != nil { + taskCache := &cache.SQLite{LocalPath: ":memory:"} + if err := taskCache.Open(testPath+"/workflow/", nil); err != nil { t.Fatalf("Failed to create test cache: %v", err) } // Create a mock taskMaster with test data tm := &taskMaster{ - initTime: time.Now().Add(-2 * time.Hour), // 2 hours ago - nextUpdate: time.Now().Add(30 * time.Minute), // 30 minutes from now + initTime: time.Now().Add(-2 * time.Hour), // 2 hours ago + nextUpdate: time.Now().Add(30 * time.Minute), // 30 minutes from now lastUpdate: time.Now().Add(-15 * time.Minute), // 15 minutes ago taskCache: taskCache, } diff --git a/apps/flowlord/taskmaster.go b/apps/flowlord/taskmaster.go index d3d630c..daa1a22 100644 --- a/apps/flowlord/taskmaster.go +++ b/apps/flowlord/taskmaster.go @@ -41,11 +41,11 @@ type taskMaster struct { doneTopic string failedTopic string taskCache *cache.SQLite - HostName string - port int - cron *cron.Cron - slack *Notification - files []fileRule + HostName string + port int + cron *cron.Cron + slack *Notification + files []fileRule alerts chan task.Task } @@ -230,22 +230,22 @@ func validatePhase(p workflow.Phase) string { // schedule the tasks and refresh the schedule when updated func (tm *taskMaster) schedule() (err error) { errs := make([]error, 0) - + // Get all workflow files from database workflowFiles := tm.taskCache.GetWorkflowFiles() - + if len(workflowFiles) == 0 { return fmt.Errorf("no workflows found check path %s", tm.path) } - + // Get all phases for each workflow file - for filePath := range workflowFiles { + for _, filePath := range workflowFiles { phases, err := tm.taskCache.GetPhasesForWorkflow(filePath) if err != nil { errs = append(errs, fmt.Errorf("error getting phases for %s: %w", filePath, err)) continue } - + for _, w := range phases { rules, _ := url.ParseQuery(w.Rule) cronSchedule := rules.Get("cron") @@ -264,8 +264,8 @@ func (tm *taskMaster) schedule() (err error) { } if cronSchedule == "" { - log.Printf("no cron: task:%s, rule:%s", w.Task, w.Rule) - //TODO: update the phase table with this status message + //log.Printf("no cron: task:%s, rule:%s", w.Task, w.Rule) + // this should already be in the status field continue } @@ -275,7 +275,9 @@ func (tm *taskMaster) schedule() (err error) { } if _, err = tm.cron.AddJob(cronSchedule, j); err != nil { - // TODO: update the phase table with this status messgae + // TODO: Remove log + fmt.Println(cronSchedule, j) + errs = append(errs, fmt.Errorf("invalid rule for %s:%s %s %w", filePath, w.Task, w.Rule, err)) } } diff --git a/apps/flowlord/taskmaster_test.go b/apps/flowlord/taskmaster_test.go index 85e56e4..065ec39 100644 --- a/apps/flowlord/taskmaster_test.go +++ b/apps/flowlord/taskmaster_test.go @@ -23,8 +23,8 @@ const base_test_path string = "../../internal/test/" func TestTaskMaster_Process(t *testing.T) { delayRegex := regexp.MustCompile(`delayed=(\d+.\d+)`) - // Initialize taskCache for the test - taskCache := &cache.SQLite{LocalPath: ":memory"} + // Initialize taskCache for the test + taskCache := &cache.SQLite{LocalPath: ":memory:"} fatalErr := taskCache.Open(base_test_path+"workflow", nil) if fatalErr != nil { t.Fatal("cache init", fatalErr) @@ -83,6 +83,7 @@ func TestTaskMaster_Process(t *testing.T) { "task1 attempt 0": { Input: task.Task{ Type: "task1", + Job: "t2", Info: "?date=2019-12-12", Result: task.ErrResult, Started: "now", @@ -101,6 +102,7 @@ func TestTaskMaster_Process(t *testing.T) { "task1 attempt 2": { Input: task.Task{ Type: "task1", + Job: "t2", Info: "?date=2019-12-12", Result: task.ErrResult, ID: "UUID_task1_attempt2", @@ -275,8 +277,8 @@ func TestTaskMaster_Schedule(t *testing.T) { Files []fileRule } fn := func(in string) (expected, error) { - tm := taskMaster{ cron: cron.New()} - tm.taskCache = &cache.SQLite{LocalPath: ":memory"} + tm := taskMaster{cron: cron.New()} + tm.taskCache = &cache.SQLite{LocalPath: ":memory:"} if err := tm.taskCache.Open(base_test_path+in, nil); err != nil { return expected{}, err } From 9c66f886dc05b7fcf2b2d57e946f09122cb4e61e Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Fri, 17 Oct 2025 16:04:12 -0600 Subject: [PATCH 22/40] misc formating adjustments --- apps/flowlord/cache/schema.sql | 39 +++++-- apps/flowlord/cache/sqlite.go | 3 +- apps/flowlord/handler/static/style.css | 149 ++++++++++++++++++++++++- apps/flowlord/handler/task.tmpl | 71 ++++++++---- apps/flowlord/handler/workflow.tmpl | 24 ++-- apps/flowlord/test/tasks.json | 64 +++++++---- 6 files changed, 271 insertions(+), 79 deletions(-) diff --git a/apps/flowlord/cache/schema.sql b/apps/flowlord/cache/schema.sql index 760b29f..3411806 100644 --- a/apps/flowlord/cache/schema.sql +++ b/apps/flowlord/cache/schema.sql @@ -21,6 +21,7 @@ CREATE INDEX IF NOT EXISTS idx_task_records_type_job ON task_records (type, job) CREATE INDEX IF NOT EXISTS idx_task_records_date_range ON task_records (created, ended); -- Create a view that calculates task and queue times +DROP VIEW IF EXISTS tasks; CREATE VIEW IF NOT EXISTS tasks AS SELECT task_records.id, @@ -33,21 +34,35 @@ SELECT task_records.msg, task_records.result, -- Calculate task duration in seconds - CAST((julianday(task_records.ended) - julianday(task_records.started)) * 24 * 60 * 60 AS INTEGER) as task_seconds, + CASE + WHEN task_records.ended IS NULL OR task_records.started IS NULL OR task_records.ended = '' OR task_records.started = '' THEN 0 + ELSE ROUND((julianday(task_records.ended) - julianday(task_records.started)) * 24 * 60 * 60) + END as task_seconds, -- Format task duration as HH:MM:SS - strftime('%H:%M:%S', - CAST((julianday(task_records.ended) - julianday(task_records.started)) * 24 * 60 * 60 AS INTEGER) / 3600 || ':' || - CAST((julianday(task_records.ended) - julianday(task_records.started)) * 24 * 60 * 60 AS INTEGER) % 3600 / 60 || ':' || - CAST((julianday(task_records.ended) - julianday(task_records.started)) * 24 * 60 * 60 AS INTEGER) % 60 - ) as task_time, + CASE + WHEN task_records.ended IS NULL OR task_records.started IS NULL OR task_records.ended = '' OR task_records.started = '' THEN 'N/A' + ELSE + printf('%02d:%02d:%02d', + ROUND((julianday(task_records.ended) - julianday(task_records.started)) * 24 * 60 * 60) / 3600, + ROUND((julianday(task_records.ended) - julianday(task_records.started)) * 24 * 60 * 60) % 3600 / 60, + ROUND((julianday(task_records.ended) - julianday(task_records.started)) * 24 * 60 * 60) % 60 + ) + END as task_time, -- Calculate queue time in seconds - CAST((julianday(task_records.started) - julianday(task_records.created)) * 24 * 60 * 60 AS INTEGER) as queue_seconds, + CASE + WHEN task_records.started IS NULL OR task_records.created IS NULL OR task_records.started = '' OR task_records.created = '' THEN 0 + ELSE ROUND((julianday(task_records.started) - julianday(task_records.created)) * 24 * 60 * 60) + END as queue_seconds, -- Format queue duration as HH:MM:SS - strftime('%H:%M:%S', - CAST((julianday(task_records.started) - julianday(task_records.created)) * 24 * 60 * 60 AS INTEGER) / 3600 || ':' || - CAST((julianday(task_records.started) - julianday(task_records.created)) * 24 * 60 * 60 AS INTEGER) % 3600 / 60 || ':' || - CAST((julianday(task_records.started) - julianday(task_records.created)) * 24 * 60 * 60 AS INTEGER) % 60 - ) as queue_time, + CASE + WHEN task_records.started IS NULL OR task_records.created IS NULL OR task_records.started = '' OR task_records.created = '' THEN 'N/A' + ELSE + printf('%02d:%02d:%02d', + ROUND((julianday(task_records.started) - julianday(task_records.created)) * 24 * 60 * 60) / 3600, + ROUND((julianday(task_records.started) - julianday(task_records.created)) * 24 * 60 * 60) % 3600 / 60, + ROUND((julianday(task_records.started) - julianday(task_records.created)) * 24 * 60 * 60) % 60 + ) + END as queue_time, task_records.created, task_records.started, task_records.ended diff --git a/apps/flowlord/cache/sqlite.go b/apps/flowlord/cache/sqlite.go index a57a066..880c2c3 100644 --- a/apps/flowlord/cache/sqlite.go +++ b/apps/flowlord/cache/sqlite.go @@ -940,7 +940,8 @@ func (s *SQLite) GetTasksByDate(date time.Time, taskType, job, result string) ([ &t.Created, &t.Started, &t.Ended, ) if err != nil { - continue + t.Result = string(task.ErrResult) + t.Msg = err.Error() } tasks = append(tasks, t) } diff --git a/apps/flowlord/handler/static/style.css b/apps/flowlord/handler/static/style.css index 75ec5ed..87bfc23 100644 --- a/apps/flowlord/handler/static/style.css +++ b/apps/flowlord/handler/static/style.css @@ -275,12 +275,15 @@ body { /* ===== TABLE STYLES ===== */ .table-container { overflow-x: auto; + max-width: 100%; } table { border-collapse: collapse; width: 100%; margin: 0; + table-layout: fixed; + max-width: 100%; } th, td { @@ -336,6 +339,55 @@ tr:hover { color: #6c757d; } +/* ===== WORKFLOW TABLE COLUMN WIDTHS ===== */ +th.workflow-file-cell, td.workflow-file-cell { + width: 8%; + max-width: 120px; + word-break: break-all; +} + +th.task-cell, td.task-cell { + width: 8%; + max-width: 100px; + word-break: break-all; +} + +th.job-cell, td.job-cell { + width: 6%; + max-width: 100px; + word-break: break-all; +} + +th.rule-cell, td.rule-cell { + width: 25%; + max-width: 350px; + word-break: break-all; +} + +th.depends-on-cell, td.depends-on-cell { + width: 10%; + max-width: 120px; + word-break: break-all; +} + +th.retry-cell, td.retry-cell { + width: 3%; + max-width: 60px; + text-align: center; +} + +th.template-cell, td.template-cell { + width: 25%; + max-width: 350px; + word-break: break-all; +} + +th.status-cell, td.status-cell { + width: 8%; + max-width: 80px; + text-align: center; +} + .id-cell, .info-cell, .meta-cell { cursor: pointer; position: relative; @@ -346,8 +398,7 @@ tr:hover { background-color: #f8f9fa; } -.id-cell.truncated, .info-cell.truncated, .meta-cell.truncated { - max-width: 200px; +.id-cell.truncated, .info-cell.truncated, .meta-cell.truncated, .rule-cell.truncated, .template-cell.truncated { overflow: hidden; text-overflow: ellipsis; white-space: nowrap; @@ -357,7 +408,11 @@ tr:hover { max-width: 100px; } -.id-cell.expanded, .info-cell.expanded, .meta-cell.expanded { +.rule-cell.truncated, .template-cell.truncated { + max-width: 250px; +} + +.id-cell.expanded, .info-cell.expanded, .meta-cell.expanded, .rule-cell.expanded, .template-cell.expanded { max-width: none; white-space: normal; word-break: break-all; @@ -368,12 +423,17 @@ tr:hover { margin: 2px; } -.info-cell.expanded, .meta-cell.expanded { +.info-cell.expanded, .meta-cell.expanded, .rule-cell.expanded, .template-cell.expanded { word-wrap: break-word; } -.id-cell.copyable, .info-cell.copyable, .meta-cell.copyable { +.id-cell.copyable, .info-cell.copyable, .meta-cell.copyable, .rule-cell.copyable, .template-cell.copyable { position: relative; + cursor: pointer; +} + +.rule-cell.copyable:hover, .template-cell.copyable:hover { + background-color: #f8f9fa; } .type-cell { @@ -394,13 +454,90 @@ tr:hover { .result-error { color: #dc3545; } .result-alert { color: #fd7e14; } .result-warn { color: #ffc107; } +.result-running { color: #007bff; } .time-cell { white-space: nowrap; } +/* ===== TASK TABLE COLUMN WIDTHS ===== */ +/* ID column - compact for task IDs */ +th.id-column, td.id-column { + width: 7%; + max-width: 100px; +} + +/* Type and Job columns - standardized compact width */ +th.type-column, td.type-column, +th.job-column, td.job-column { + width: 6%; + max-width: 80px; +} + +/* Message column - takes remaining space */ +th.message-column, td.message-column { + width: auto; + min-width: 200px; +} + +/* Result column - compact for status */ +th.result-column, td.result-column { + width: 6%; + max-width: 80px; +} + +/* Info and Meta columns - medium space */ +th.info-column, td.info-column, +th.meta-column, td.meta-column { + width: 12%; + max-width: 180px; +} + +/* Created column - standard timestamp width */ +th.created-column, td.created-column { + width: 10%; + max-width: 180px; +} + +/* Duration columns - standardized for hh:mm:ss format */ +th.queue-column, td.queue-column, +th.process-column, td.process-column { + width: 5%; + max-width: 70px; +} + +/* ===== STANDARDIZED COLUMN CLASSES ===== */ +.duration-column { + width: 5%; + max-width: 70px; + white-space: nowrap; + text-align: center; +} + +.timestamp-column { + width: 10%; + max-width: 180px; + white-space: nowrap; +} + +.compact-column { + width: 6%; + max-width: 80px; + white-space: nowrap; +} + +.medium-column { + width: 12%; + max-width: 180px; +} + +.message-column { + width: auto; + min-width: 200px; +} + .message-cell { - max-width: 300px; + max-width: none; word-wrap: break-word; line-height: 1.4; cursor: pointer; diff --git a/apps/flowlord/handler/task.tmpl b/apps/flowlord/handler/task.tmpl index 0de9fe2..be8392e 100644 --- a/apps/flowlord/handler/task.tmpl +++ b/apps/flowlord/handler/task.tmpl @@ -64,6 +64,10 @@

Filters

+
+ + +
+
{{if .ShowRefresh}} @@ -67,45 +67,250 @@ document.addEventListener('DOMContentLoaded', function() { const datePicker = document.getElementById('datePicker'); const todayBtn = document.getElementById('todayBtn'); + // Dates with data from the server + const datesWithData = new Set([ + {{range $index, $date := .DatesWithData}}{{if $index}},{{end}}"{{$date}}"{{end}} + ]); + // Set today's date as default if no date is provided if (!datePicker.value) { const today = new Date().toISOString().split('T')[0]; datePicker.value = today; } - // Handle date change - datePicker.addEventListener('change', function() { - const selectedDate = this.value; - if (selectedDate) { - // Get current page and preserve other query parameters - const currentUrl = new URL(window.location); - const currentPage = currentUrl.pathname; + // Update data indicator + function updateDataIndicator(date) { + if (datesWithData.has(date)) { + datePicker.classList.add('has-data'); + } else { + datePicker.classList.remove('has-data'); + } + } + + // Initial indicator update + updateDataIndicator(datePicker.value); + + // Create custom date picker dropdown + const dropdown = document.createElement('div'); + dropdown.id = 'dateDropdown'; + dropdown.className = 'date-dropdown'; + dropdown.style.display = 'none'; + + // Generate calendar for current month and surrounding dates + function generateCalendar() { + const currentDate = datePicker.value ? new Date(datePicker.value + 'T12:00:00') : new Date(); + const year = currentDate.getFullYear(); + const month = currentDate.getMonth(); + + let html = '
'; + html += ''; + html += '' + currentDate.toLocaleDateString('en-US', {month: 'long', year: 'numeric'}) + ''; + html += ''; + html += '
'; + + html += '
'; + html += '
Su
Mo
Tu
'; + html += '
We
Th
Fr
Sa
'; + + const firstDay = new Date(year, month, 1); + const lastDay = new Date(year, month + 1, 0); + const startDay = firstDay.getDay(); + + // Add empty cells for days before month starts + for (let i = 0; i < startDay; i++) { + html += '
'; + } + + // Add days of month + for (let day = 1; day <= lastDay.getDate(); day++) { + const dateStr = year + '-' + String(month + 1).padStart(2, '0') + '-' + String(day).padStart(2, '0'); + const hasData = datesWithData.has(dateStr); + const isSelected = dateStr === datePicker.value; - // Build new URL with date parameter - const newUrl = new URL(currentPage, window.location.origin); - newUrl.searchParams.set('date', selectedDate); + let classes = 'day-cell'; + if (hasData) classes += ' has-data'; + if (isSelected) classes += ' selected'; + + html += '
' + day + '
'; + } + + html += '
'; + dropdown.innerHTML = html; + } + + // Toggle dropdown + datePicker.addEventListener('click', function(e) { + e.stopPropagation(); + if (dropdown.style.display === 'none') { + generateCalendar(); + dropdown.style.display = 'block'; - // Preserve other query parameters (type, job, result, etc.) - const otherParams = ['type', 'job', 'result', 'sort', 'direction']; - otherParams.forEach(param => { - if (currentUrl.searchParams.has(param)) { - newUrl.searchParams.set(param, currentUrl.searchParams.get(param)); - } - }); + // Position dropdown + const rect = datePicker.getBoundingClientRect(); + dropdown.style.position = 'absolute'; + dropdown.style.top = (rect.bottom + 5) + 'px'; + dropdown.style.left = rect.left + 'px'; + } else { + dropdown.style.display = 'none'; + } + }); + + // Handle dropdown clicks + dropdown.addEventListener('click', function(e) { + e.stopPropagation(); + + if (e.target.classList.contains('day-cell') && !e.target.classList.contains('empty')) { + const selectedDate = e.target.getAttribute('data-date'); + datePicker.value = selectedDate; + updateDataIndicator(selectedDate); + dropdown.style.display = 'none'; - // Navigate to new URL - window.location.href = newUrl.toString(); + // Navigate to date + navigateToDate(selectedDate); + } else if (e.target.classList.contains('month-nav')) { + const dir = parseInt(e.target.getAttribute('data-dir')); + const currentDate = new Date(datePicker.value + 'T12:00:00'); + currentDate.setMonth(currentDate.getMonth() + dir); + datePicker.value = currentDate.toISOString().split('T')[0]; + generateCalendar(); } }); + // Close dropdown when clicking outside + document.addEventListener('click', function() { + dropdown.style.display = 'none'; + }); + // Handle today button - todayBtn.addEventListener('click', function() { + todayBtn.addEventListener('click', function(e) { + e.stopPropagation(); const today = new Date().toISOString().split('T')[0]; datePicker.value = today; - - // Trigger change event - datePicker.dispatchEvent(new Event('change')); + updateDataIndicator(today); + dropdown.style.display = 'none'; + navigateToDate(today); }); + + // Navigate to selected date + function navigateToDate(selectedDate) { + const currentUrl = new URL(window.location); + const currentPage = currentUrl.pathname; + + const newUrl = new URL(currentPage, window.location.origin); + newUrl.searchParams.set('date', selectedDate); + + // Preserve other query parameters + const otherParams = ['type', 'job', 'result', 'sort', 'direction']; + otherParams.forEach(param => { + if (currentUrl.searchParams.has(param)) { + newUrl.searchParams.set(param, currentUrl.searchParams.get(param)); + } + }); + + window.location.href = newUrl.toString(); + } + + // Append dropdown to body + document.body.appendChild(dropdown); }); + {{end}} diff --git a/apps/flowlord/handler_test.go b/apps/flowlord/handler_test.go index d3ee1cd..338f7c4 100644 --- a/apps/flowlord/handler_test.go +++ b/apps/flowlord/handler_test.go @@ -407,7 +407,9 @@ func TestAlertHTML(t *testing.T) { } // Generate HTML using the alertHTML function - htmlContent := alertHTML(sampleAlerts, trial.TimeDay("2024-01-15")) + // Pass sample dates with data for calendar highlighting + datesWithData := []string{"2024-01-14", "2024-01-15", "2024-01-16"} + htmlContent := alertHTML(sampleAlerts, trial.TimeDay("2024-01-15"), datesWithData) // Validate HTML using the new function if err := validateHTML(htmlContent); err != nil { @@ -452,7 +454,9 @@ func TestFilesHTML(t *testing.T) { } date := time.Date(2024, 1, 15, 0, 0, 0, 0, time.UTC) - html := filesHTML(files, date) + // Pass sample dates with data for calendar highlighting + datesWithData := []string{ "2024-01-15"} + html := filesHTML(files, date, datesWithData) // Validate HTML using the new function if err := validateHTML(html); err != nil { @@ -519,7 +523,9 @@ func TestTaskHTML(t *testing.T) { date := trial.TimeDay("2024-01-15") // Test with no filters - summary will be generated from tasks data - html := taskHTML(testTasks, date, "", "", "") + // Pass sample dates with data for calendar highlighting + datesWithData := []string{"2024-01-15"} + html := taskHTML(testTasks, date, "", "", "", datesWithData) // Validate HTML using the new function if err := validateHTML(html); err != nil { @@ -569,12 +575,20 @@ func TestAboutHTML(t *testing.T) { t.Fatalf("Failed to create test cache: %v", err) } + // Create a mock Notification for slack + notification := &Notification{ + MinFrequency: 5 * time.Minute, + MaxFrequency: 30 * time.Minute, + } + notification.currentDuration.Store(int64(10 * time.Minute)) + // Create a mock taskMaster with test data tm := &taskMaster{ initTime: time.Now().Add(-2 * time.Hour), // 2 hours ago nextUpdate: time.Now().Add(30 * time.Minute), // 30 minutes from now lastUpdate: time.Now().Add(-15 * time.Minute), // 15 minutes ago taskCache: taskCache, + slack: notification, } // Generate HTML using the aboutHTML method diff --git a/tmpl/tmpl_test.go b/tmpl/tmpl_test.go index 8abd766..c70ff8b 100644 --- a/tmpl/tmpl_test.go +++ b/tmpl/tmpl_test.go @@ -382,6 +382,11 @@ func TestPrintDates(t *testing.T) { Input: trial.Times(f, "2018/04/09T00", "2018/04/10T00", "2018/04/11T00", "2018/04/12T00"), Expected: "2018/04/09-2018/04/12", }, + "daily records offset": // TODO: Need to know if task is daily or hourly or monthly + { + Input: trial.Times(f, "2018/04/09T04", "2018/04/10T04", "2018/04/11T04", "2018/04/12T06"), + Expected: "2018/04/09-2018/04/12", + }, "daily records with gaps": { Input: trial.Times(f, "2018/04/09T00", "2018/04/10T00", "2018/04/11T00", "2018/04/12T00", "2018/04/15T00", "2018/04/16T00", "2018/04/17T00"), Expected: "2018/04/09-2018/04/12,2018/04/15-2018/04/17", From e788d8c93c7bd3c2253f9b14f26aae584f10a1ca Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Tue, 4 Nov 2025 16:00:35 -0700 Subject: [PATCH 32/40] v1 migration changes for workflow files --- apps/flowlord/cache/sqlite.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/apps/flowlord/cache/sqlite.go b/apps/flowlord/cache/sqlite.go index f180a30..184ddfd 100644 --- a/apps/flowlord/cache/sqlite.go +++ b/apps/flowlord/cache/sqlite.go @@ -177,7 +177,16 @@ func (o *SQLite) migrateSchema(currentVersion int) error { if currentVersion < 1 { log.Println("Creating initial schema (version 1)") - _, err := o.db.Exec(schema) + // Drop workflow tables if they exist (they may have been created before versioning) + _, err := o.db.Exec(` + DROP TABLE IF EXISTS workflow_files; + DROP TABLE IF EXISTS workflow_phases; + `) + if err != nil { + return fmt.Errorf("failed to drop old workflow tables: %w", err) + } + + _, err = o.db.Exec(schema) if err != nil { return fmt.Errorf("failed to create initial schema: %w", err) } From 42a1c75516142a254c0e5a152e105dd56d59b73d Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Wed, 5 Nov 2025 16:07:33 -0700 Subject: [PATCH 33/40] unit tests --- apps/flowlord/cache/cache_test.go | 32 +++++++++++++++---------------- apps/flowlord/cache/sqlite.go | 3 --- 2 files changed, 15 insertions(+), 20 deletions(-) diff --git a/apps/flowlord/cache/cache_test.go b/apps/flowlord/cache/cache_test.go index 88194a3..e24034a 100644 --- a/apps/flowlord/cache/cache_test.go +++ b/apps/flowlord/cache/cache_test.go @@ -254,24 +254,22 @@ func TestDatesByType(t *testing.T) { Ended: "2024-01-16T10:05:00Z", }) - // Add sample alert records - db.AddAlert(task.Task{ - ID: "alert1", - Type: "data-validation", - Job: "check", - Created: "2024-01-15T11:00:00Z", - }, "Validation error") - db.AddAlert(task.Task{ - ID: "alert2", - Type: "data-validation", - Job: "check", - Created: "2024-01-17T11:00:00Z", - }, "Validation error") + // Add sample alert records with specific created_at times + _, err := db.db.Exec(` + INSERT INTO alert_records (task_id, task_time, task_type, job, msg, created_at) + VALUES (?, ?, ?, ?, ?, ?), + (?, ?, ?, ?, ?, ?) + `, "alert1", "2024-01-15T11:00:00Z", "data-validation", "check", "Validation error", "2024-01-15T11:00:00Z", + "alert2", "2024-01-17T11:00:00Z", "data-validation", "check", "Validation error", "2024-01-17T11:00:00Z") + if err != nil { + t.Fatalf("Failed to insert alerts: %v", err) + } // Add sample file messages - fileMsg1 := stat.New() - fileMsg1.Path = "gs://bucket/file1.json" - fileMsg1.Size = 1024 + fileMsg1 := stat.Stats{ + Path: "gs://bucket/file1.json", + Size: 1024, + } db.AddFileMessage(fileMsg1, []string{}, []string{}) // Test "tasks" type @@ -292,7 +290,7 @@ func TestDatesByType(t *testing.T) { t.Errorf("DatesByType('alerts') error: %v", err) } if len(alertDates) != 2 { - t.Errorf("Expected 2 alert dates, got %d", len(alertDates)) + t.Errorf("Expected 2 alert dates, got %v", alertDates) } // Test "files" type diff --git a/apps/flowlord/cache/sqlite.go b/apps/flowlord/cache/sqlite.go index 184ddfd..93c530f 100644 --- a/apps/flowlord/cache/sqlite.go +++ b/apps/flowlord/cache/sqlite.go @@ -1179,21 +1179,18 @@ func (s *SQLite) DatesByType(dataType string) ([]string, error) { query = ` SELECT DISTINCT DATE(created) as date_val FROM task_records - WHERE created >= datetime('now', '-' || ? || ' days') ORDER BY date_val DESC ` case "alerts": query = ` SELECT DISTINCT DATE(created_at) as date_val FROM alert_records - WHERE created_at >= datetime('now', '-' || ? || ' days') ORDER BY date_val DESC ` case "files": query = ` SELECT DISTINCT DATE(received_at) as date_val FROM file_messages - WHERE received_at >= datetime('now', '-' || ? || ' days') ORDER BY date_val DESC ` default: From 16a497cf81ad62a621ee0f0e0203e2848c95d783 Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Thu, 6 Nov 2025 16:23:34 -0700 Subject: [PATCH 34/40] recap improvements: Autodetect granularity (month,day,year) and print results accordingly --- apps/flowlord/cache/cache_test.go | 220 ------------------------------ apps/utils/recap/app_test.go | 8 +- tmpl/tmpl.go | 187 +++++++++++++++++++------ tmpl/tmpl_test.go | 40 +++++- 4 files changed, 187 insertions(+), 268 deletions(-) diff --git a/apps/flowlord/cache/cache_test.go b/apps/flowlord/cache/cache_test.go index e24034a..dbe429d 100644 --- a/apps/flowlord/cache/cache_test.go +++ b/apps/flowlord/cache/cache_test.go @@ -7,226 +7,6 @@ import ( "github.com/pcelvng/task-tools/file/stat" ) -/* -func TestAdd(t *testing.T) { - fn := func(tasks []task.Task) (map[string]TaskJob, error) { - cache := &Memory{cache: make(map[string]TaskJob)} - for _, t := range tasks { - cache.Add(t) - } - for k, v := range cache.cache { - v.count = len(v.Events) - v.Events = nil - cache.cache[k] = v - } - return cache.cache, nil - } - cases := trial.Cases[[]task.Task, map[string]TaskJob]{ - "no id": { - Input: []task.Task{ - {Type: "test"}, - }, - }, - "created": { - Input: []task.Task{ - {Type: "pull", ID: "id1", Info: "?date=2023-01-01", Created: "2023-01-01T00:00:00Z"}, - }, - Expected: map[string]TaskJob{ - "id1": { - LastUpdate: trial.TimeDay("2023-01-01"), - count: 1, - }, - }, - }, - "completed": { - Input: []task.Task{ - {Type: "pull", ID: "id1", Info: "?date=2023-01-01", Created: "2023-01-01T00:00:00Z"}, - {Type: "pull", ID: "id1", Info: "?date=2023-01-01", Created: "2023-01-01T00:00:00Z", Ended: "2023-01-01T00:00:01Z", Result: task.CompleteResult}, - }, - Expected: map[string]TaskJob{ - "id1": { - LastUpdate: trial.Time(time.RFC3339, "2023-01-01T00:00:01Z"), - Completed: true, - count: 2, - }, - }, - }, - "failed": { - Input: []task.Task{ - {Type: "pull", ID: "id1", Info: "?date=2023-01-01", Created: "2023-01-01T00:00:00Z"}, - {Type: "pull", ID: "id1", Info: "?date=2023-01-01", Created: "2023-01-01T00:00:00Z", Ended: "2023-01-01T00:00:01Z", Result: task.ErrResult, Msg: "Error with pull from X"}, - }, - Expected: map[string]TaskJob{ - "id1": { - LastUpdate: trial.Time(time.RFC3339, "2023-01-01T00:00:01Z"), - Completed: true, - count: 2, - }, - }, - }, - "retry": { - Input: []task.Task{ - {Type: "pull", ID: "id1", Info: "?date=2023-01-01", Created: "2023-01-01T00:00:00Z"}, - {Type: "pull", ID: "id1", Info: "?date=2023-01-01", Created: "2023-01-01T00:00:00Z", Ended: "2023-01-01T00:00:01Z", Result: task.ErrResult, Msg: "Error with pull from X"}, - {Type: "pull", ID: "id1", Info: "?date=2023-01-01", Created: "2023-01-01T00:01:00Z", Meta: "retry=1"}, - }, - Expected: map[string]TaskJob{ - "id1": { - LastUpdate: trial.Time(time.RFC3339, "2023-01-01T00:01:00Z"), - Completed: false, - count: 3, - }, - }, - }, - "child": { - Input: []task.Task{ - {Type: "pull", ID: "id1", Info: "?date=2023-01-01", Created: "2023-01-01T00:00:00Z"}, - {Type: "pull", ID: "id1", Info: "?date=2023-01-01", Created: "2023-01-01T00:00:00Z", Ended: "2023-01-01T00:00:01Z", Result: task.CompleteResult}, - {Type: "transform", ID: "id1", Info: "/product/2023-01-01/data.txt", Created: "2023-01-01T00:02:00Z"}, - }, - Expected: map[string]TaskJob{ - "id1": { - LastUpdate: trial.Time(time.RFC3339, "2023-01-01T00:02:00Z"), - Completed: false, - count: 3, - }, - }, - }, - "multi-child": { - Input: []task.Task{ - {Type: "pull", ID: "id1", Info: "?date=2023-01-01", Started: "2023-01-01T00:00:00Z"}, - {Type: "pull", ID: "id1", Info: "?date=2023-01-01", Started: "2023-01-01T00:00:00Z", Ended: "2023-01-01T00:00:01Z", Result: task.CompleteResult}, - {Type: "transform", ID: "id1", Info: "/product/2023-01-01/data.txt", Started: "2023-01-01T00:02:00Z"}, - {Type: "transform", ID: "id1", Info: "/product/2023-01-01/data.txt", Started: "2023-01-01T00:02:00Z", Ended: "2023-01-01T00:02:15Z", Result: task.CompleteResult}, - {Type: "load", ID: "id1", Info: "/product/2023-01-01/data.txt?table=schema.product", Started: "2023-01-01T00:04:00Z"}, - {Type: "load", ID: "id1", Info: "/product/2023-01-01/data.txt?table=schema.product", Started: "2023-01-01T00:04:00Z", Ended: "2023-01-01T00:05:12Z", Result: task.CompleteResult}, - }, - Expected: map[string]TaskJob{ - "id1": { - LastUpdate: trial.Time(time.RFC3339, "2023-01-01T00:05:12Z"), - Completed: true, - count: 6, - }, - }, - }, - } - trial.New(fn, cases).SubTest(t) -} - -func TestRecycle(t *testing.T) { - now := time.Now() - cache := Memory{ - ttl: time.Hour, - cache: map[string]TaskJob{ - "keep": { - Completed: false, - LastUpdate: now.Add(-30 * time.Minute), - Events: []task.Task{{Type: "test1"}}, - }, - "expire": { - Completed: true, - LastUpdate: now.Add(-90 * time.Minute), - }, - "not-completed": { - Completed: false, - LastUpdate: now.Add(-90 * time.Minute), - Events: []task.Task{ - {Type: "test1", Created: now.String()}, - {Type: "test1", Created: now.String(), Result: task.CompleteResult}, - {Type: "test2", Created: now.String()}, - }, - }, - }, - } - - stat := cache.Recycle() - stat.ProcessTime = 0 - expected := Stat{ - Count: 1, - Removed: 2, - Unfinished: []task.Task{ - {Type: "test2", Created: now.String()}, - }} - if eq, diff := trial.Equal(stat, expected); !eq { - t.Logf(diff) - } -} - -func TestRecap(t *testing.T) { - fn := func(in []task.Task) (map[string]string, error) { - c := &Memory{cache: map[string]TaskJob{}} - for _, t := range in { - c.Add(t) - } - result := map[string]string{} - for k, v := range c.Recap() { - result[k] = v.String() - } - return result, nil - } - cases := trial.Cases[[]task.Task, map[string]string]{ - "task no job": { - Input: []task.Task{{ID: "abc", Type: "test1", Info: "?date=2020-01-02", Result: "complete", Started: "2023-01-01T00:00:00Z", Ended: "2023-01-01T00:00:10Z"}}, - Expected: map[string]string{ - "test1": "min: 10s max: 10s avg: 10s\n\tComplete: 1 2020/01/02\n", - }, - }, - "task:job": { - Input: []task.Task{ - {ID: "abc", Type: "test1", Job: "job1", Info: "?day=2020-01-01", Result: "complete", Started: "2023-01-01T00:00:00Z", Ended: "2023-01-01T00:00:10Z"}, - {ID: "abc", Type: "test1", Job: "job1", Info: "?day=2020-01-02", Result: "complete", Started: "2023-01-01T00:00:00Z", Ended: "2023-01-01T00:00:15Z"}, - {ID: "abc", Type: "test1", Job: "job1", Info: "?day=2020-01-03", Result: "complete", Started: "2023-01-01T00:00:00Z", Ended: "2023-01-01T00:00:05Z"}, - }, - Expected: map[string]string{ - "test1:job1": "min: 5s max: 15s avg: 10s\n\tComplete: 3 2020/01/01-2020/01/03\n", - }, - }, - "with errors": { - Input: []task.Task{ - {ID: "abc", Type: "test1", Job: "job1", Info: "?day=2020-01-01", Result: "complete", Started: "2023-01-01T00:00:00Z", Ended: "2023-01-01T00:00:10Z"}, - {ID: "abc", Type: "test1", Job: "job1", Info: "?day=2020-01-02", Result: "error", Started: "2023-01-01T00:00:00Z", Ended: "2023-01-01T00:00:15Z"}, - {ID: "abc", Type: "test1", Job: "job1", Info: "?day=2020-01-03", Result: "complete", Started: "2023-01-01T00:00:00Z", Ended: "2023-01-01T00:00:05Z"}, - }, - Expected: map[string]string{ - "test1:job1": "min: 5s max: 10s avg: 7.5s\n\tComplete: 2 2020/01/01,2020/01/03\n\tError: 1 2020/01/02\n", - }, - }, - "hourly": { - Input: []task.Task{ - {ID: "abc", Type: "proc", Job: "hour", Info: "?hour=2020-01-01T05", Result: "complete", Started: "2023-01-01T00:00:00Z", Ended: "2023-01-01T00:00:10Z"}, - {ID: "abc", Type: "proc", Job: "hour", Info: "?hour_utc=2020-01-01T06", Result: "complete", Started: "2023-01-01T00:00:00Z", Ended: "2023-01-01T00:00:15Z"}, - {ID: "abc", Type: "proc", Job: "hour", Info: "?hour=2020-01-01T07", Result: "complete", Started: "2023-01-01T00:00:00Z", Ended: "2023-01-01T00:00:05Z"}, - {ID: "abc", Type: "proc", Job: "hour", Info: "?hour=2020-01-01T08", Result: "complete", Started: "2023-01-01T00:00:00Z", Ended: "2023-01-01T00:00:47Z"}, - {ID: "abc", Type: "proc", Job: "hour", Info: "?hour=2020-01-01T09", Result: "complete", Started: "2023-01-01T00:00:00Z", Ended: "2023-01-01T00:01:33Z"}, - }, - Expected: map[string]string{ - "proc:hour": "min: 5s max: 1m33s avg: 34s\n\tComplete: 5 2020/01/01T05-2020/01/01T09\n", - }, - }, - "monthly": { - Input: []task.Task{ - {ID: "abc", Type: "month", Info: "?day=2020-01-01", Result: "complete", Started: "2023-01-01T00:00:00Z", Ended: "2023-01-01T00:00:10Z"}, - {ID: "abc", Type: "month", Info: "?day=2020-02-01", Result: "complete", Started: "2023-01-01T00:00:00Z", Ended: "2023-01-01T00:00:15Z"}, - }, - Expected: map[string]string{ - "month": "min: 10s max: 15s avg: 12.5s\n\tComplete: 2 2020/01/01,2020/02/01\n", - }, - }, - "meta_job": { - Input: []task.Task{ - {ID: "abc", Type: "test1", Info: "?day=2020-01-01", Result: "complete", Started: "2023-01-01T00:00:00Z", Ended: "2023-01-01T00:00:10Z", Meta: "job=job1"}, - {ID: "abc", Type: "test1", Info: "?day=2020-01-02", Result: "complete", Started: "2023-01-01T00:00:00Z", Ended: "2023-01-01T00:00:15Z", Meta: "job=job1"}, - {ID: "abc", Type: "test1", Info: "?day=2020-01-03", Result: "complete", Started: "2023-01-01T00:00:00Z", Ended: "2023-01-01T00:00:05Z", Meta: "job=job1"}, - }, - Expected: map[string]string{ - "test1:job1": "min: 5s max: 15s avg: 10s\n\tComplete: 3 2020/01/01-2020/01/03\n", - }, - }, - } - trial.New(fn, cases).SubTest(t) -} -*/ - // TestDatesByType tests the unified date query method func TestDatesByType(t *testing.T) { // Create in-memory database diff --git a/apps/utils/recap/app_test.go b/apps/utils/recap/app_test.go index 3bfacb8..0a52193 100644 --- a/apps/utils/recap/app_test.go +++ b/apps/utils/recap/app_test.go @@ -46,17 +46,17 @@ func TestDoneTopic(t *testing.T) { cases := trial.Cases[string, []string]{ "task without job": { Input: `{"type":"test1","info":"?date=2020-01-02","result":"complete"}`, - Expected: []string{"test1\n\tmin: 0s max 0s avg:0s\n\tComplete 1 2020/01/02"}, + Expected: []string{"test1\n\tmin: 0s max 0s avg:0s\n\tComplete 1 2020/01/02T00"}, }, "task with job meta": { Input: `{"type":"test2","info":"?date=2020-01-02","result":"complete","meta":"job=part1"}`, - Expected: []string{"test2:part1\n\tmin: 0s max 0s avg:0s\n\tComplete 1 2020/01/02"}, + Expected: []string{"test2:part1\n\tmin: 0s max 0s avg:0s\n\tComplete 1 2020/01/02T00"}, }, "2 task with job meta": { Input: `{"type":"test3","info":"?date=2020-01-02","result":"complete","meta":"job=part1"} {"type":"test3","info":"?date=2020-01-02","result":"complete","meta":"job=part2"}`, - Expected: []string{"test3:part1\n\tmin: 0s max 0s avg:0s\n\tComplete 1 2020/01/02", - "test3:part2\n\tmin: 0s max 0s avg:0s\n\tComplete 1 2020/01/02"}, + Expected: []string{"test3:part1\n\tmin: 0s max 0s avg:0s\n\tComplete 1 2020/01/02T00", + "test3:part2\n\tmin: 0s max 0s avg:0s\n\tComplete 1 2020/01/02T00"}, }, } trial.New(fn, cases).Test(t) diff --git a/tmpl/tmpl.go b/tmpl/tmpl.go index a226e9c..05fff21 100644 --- a/tmpl/tmpl.go +++ b/tmpl/tmpl.go @@ -286,61 +286,162 @@ type Getter interface { Get(string) string } +type granularity int + +const ( + granularityHourly granularity = iota + granularityDaily + granularityMonthly +) + +// isConsecutive checks if two times are consecutive based on the granularity +func isConsecutive(t1, t2 time.Time, gran granularity) bool { + // Equal times are always consecutive (handles duplicates) + if t1.Equal(t2) { + return true + } + + switch gran { + case granularityHourly: + return t2.Sub(t1) == time.Hour + case granularityDaily: + // Check if next calendar day (not exactly 24 hours) + y1, m1, d1 := t1.Date() + y2, m2, d2 := t2.Date() + // Add one day to t1 and check if it matches t2's date + nextDay := time.Date(y1, m1, d1+1, 0, 0, 0, 0, t1.Location()) + yn, mn, dn := nextDay.Date() + return y2 == yn && m2 == mn && d2 == dn + case granularityMonthly: + // Check if next month + y1, m1, _ := t1.Date() + y2, m2, _ := t2.Date() + expectedYear := y1 + expectedMonth := m1 + 1 + if expectedMonth > 12 { + expectedMonth = 1 + expectedYear++ + } + return y2 == expectedYear && m2 == expectedMonth + } + return false +} + +// formatTime formats a time based on granularity +func formatTime(t time.Time, gran granularity) string { + switch gran { + case granularityMonthly: + return t.Format("2006/01") + case granularityDaily: + return t.Format("2006/01/02") + case granularityHourly: + return t.Format("2006/01/02T15") + } + return t.Format("2006/01/02T15") +} + // PrintDates takes a slice of times and displays the range of times in a more friendly format. +// It automatically detects the granularity (hourly/daily/monthly) and formats accordingly. +// Examples: +// - Hourly: "2006/01/02T15-2006/01/02T18" +// - Daily: "2006/01/02-2006/01/05" +// - Monthly: "2006/01-2006/04" +// - Mixed: "2006/01-2006/03, 2006/05/01T10" func PrintDates(dates []time.Time) string { - tFormat := "2006/01/02T15" if len(dates) == 0 { return "" } + + // Sort dates sort.Slice(dates, func(i, j int) bool { return dates[i].Before(dates[j]) }) - prev := dates[0] - s := prev.Format(tFormat) - series := false - for _, t := range dates { - diff := t.Truncate(time.Hour).Sub(prev.Truncate(time.Hour)) - if diff != time.Hour && diff != 0 { - if series { - s += "-" + prev.Format(tFormat) - } - s += "," + t.Format(tFormat) - series = false - } else if diff == time.Hour { - series = true - } - prev = t - } - if series { - s += "-" + prev.Format(tFormat) + + // Single timestamp - return full hour format + if len(dates) == 1 { + return dates[0].Format("2006/01/02T15") } - //check for daily records only - if !strings.Contains(s, "-") { - days := strings.Split(s, ",") - prev, _ := time.Parse(tFormat, days[0]) - dailyString := prev.Format("2006/01/02") - series = false - - for i := 1; i < len(days); i++ { - tm, _ := time.Parse(tFormat, days[i]) - if r := tm.Sub(prev) % (24 * time.Hour); r != 0 { - return s - } - if tm.Sub(prev) != 24*time.Hour { - if series { - dailyString += "-" + prev.Format("2006/01/02") - series = false - } - dailyString += "," + tm.Format("2006/01/02") + // Detect granularity in a single pass (skip duplicates) + monthMap := make(map[string]bool) + dayMap := make(map[string]bool) + gran := granularityMonthly // Start optimistic, downgrade as needed + + for i, t := range dates { + // Skip duplicates for granularity detection + if i > 0 && t.Equal(dates[i-1]) { + continue + } + + // Detect granularity while iterating + monthKey := t.Format("2006-01") + dayKey := t.Format("2006-01-02") + + // If we've seen this month before, it's not monthly data + if monthMap[monthKey] && gran == granularityMonthly { + gran = granularityDaily + } + // If we've seen this day before, it's hourly data + if dayMap[dayKey] && gran == granularityDaily { + gran = granularityHourly + } + + monthMap[monthKey] = true + dayMap[dayKey] = true + } + // Build output + var result strings.Builder + rangeStart := dates[0] + prev := dates[0] + inRange := false + + for i := 1; i < len(dates); i++ { + curr := dates[i] + + if isConsecutive(prev, curr, gran) { + // Continue range + inRange = true + prev = curr + continue + } + + // Range broken or gap - write the previous range/item + if inRange { + // Close the range (but check if it's just duplicates) + if rangeStart.Equal(prev) { + // Just duplicates, write as single item + result.WriteString(rangeStart.Format("2006/01/02T15")) } else { - series = true + result.WriteString(formatTime(rangeStart, gran)) + result.WriteString("-") + result.WriteString(formatTime(prev, gran)) } - prev = tm + } else { + // Single item in a multi-timestamp dataset - use hour format + result.WriteString(rangeStart.Format("2006/01/02T15")) } - if series { - return dailyString + "-" + prev.Format("2006/01/02") + result.WriteString(",") + + // Start new range + rangeStart = curr + inRange = false + prev = curr + } + + // Handle the last item/range + if inRange { + // Close final range (but check if it's just duplicates) + if rangeStart.Equal(prev) { + // Just duplicates, write as single item + result.WriteString(rangeStart.Format("2006/01/02T15")) + } else { + result.WriteString(formatTime(rangeStart, gran)) + result.WriteString("-") + result.WriteString(formatTime(prev, gran)) } - return dailyString + return result.String() } - return s + + // Single final item in multi-timestamp dataset - use hour format + result.WriteString(rangeStart.Format("2006/01/02T15")) + return result.String() } diff --git a/tmpl/tmpl_test.go b/tmpl/tmpl_test.go index c70ff8b..3d9fb34 100644 --- a/tmpl/tmpl_test.go +++ b/tmpl/tmpl_test.go @@ -382,15 +382,53 @@ func TestPrintDates(t *testing.T) { Input: trial.Times(f, "2018/04/09T00", "2018/04/10T00", "2018/04/11T00", "2018/04/12T00"), Expected: "2018/04/09-2018/04/12", }, - "daily records offset": // TODO: Need to know if task is daily or hourly or monthly + "daily records offset": { Input: trial.Times(f, "2018/04/09T04", "2018/04/10T04", "2018/04/11T04", "2018/04/12T06"), Expected: "2018/04/09-2018/04/12", }, + "daily records offset with duplicates": + { + Input: trial.Times(f, "2018/04/09T04","2018/04/09T04", "2018/04/09T04", "2018/04/10T04", "2018/04/11T04", "2018/04/12T06"), + Expected: "2018/04/09-2018/04/12", + }, "daily records with gaps": { Input: trial.Times(f, "2018/04/09T00", "2018/04/10T00", "2018/04/11T00", "2018/04/12T00", "2018/04/15T00", "2018/04/16T00", "2018/04/17T00"), Expected: "2018/04/09-2018/04/12,2018/04/15-2018/04/17", }, + + "monthly consecutive": { + Input: trial.Times(f, "2018/01/15T10", "2018/02/20T11", "2018/03/10T09"), + Expected: "2018/01-2018/03", + }, + "monthly with gaps": { + Input: trial.Times(f, "2018/01/15T10", "2018/02/20T11", "2018/05/10T09", "2018/06/15T12"), + Expected: "2018/01-2018/02,2018/05-2018/06", + }, + "monthly single": { + Input: trial.Times(f, "2018/01/15T10", "2018/03/20T11", "2018/08/10T09"), + Expected: "2018/01/15T10,2018/03/20T11,2018/08/10T09", + }, + "daily with different hours": { + Input: trial.Times(f, "2020/05/01T08", "2020/05/02T10", "2020/05/03T09", "2020/05/04T11"), + Expected: "2020/05/01-2020/05/04", + }, + "single timestamp": { + Input: trial.Times(f, "2020/05/01T10"), + Expected: "2020/05/01T10", + }, + "empty input": { + Input: []time.Time{}, + Expected: "", + }, + "cross year monthly": { + Input: trial.Times(f, "2020/11/15T10", "2020/12/20T11", "2021/01/10T09"), + Expected: "2020/11-2021/01", + }, + "cross month daily": { + Input: trial.Times(f, "2020/01/30T10", "2020/01/31T10", "2020/02/01T10", "2020/02/02T10"), + Expected: "2020/01/30-2020/02/02", + }, } trial.New(fn, cases).Test(t) From 2c8d35e79bbeeb9d9103f5c98a0611f1a2f1093f Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Tue, 11 Nov 2025 15:39:35 -0700 Subject: [PATCH 35/40] speed up populate date query --- apps/flowlord/cache/cache_test.go | 6 + apps/flowlord/cache/schema.sql | 17 +- apps/flowlord/cache/sqlite.go | 220 ++++++++++++++++++++----- apps/flowlord/handler.go | 50 +++++- apps/flowlord/handler/about.tmpl | 2 +- apps/flowlord/handler/static/style.css | 1 + apps/flowlord/handler/task.tmpl | 41 ++--- 7 files changed, 263 insertions(+), 74 deletions(-) diff --git a/apps/flowlord/cache/cache_test.go b/apps/flowlord/cache/cache_test.go index dbe429d..9a33411 100644 --- a/apps/flowlord/cache/cache_test.go +++ b/apps/flowlord/cache/cache_test.go @@ -52,6 +52,12 @@ func TestDatesByType(t *testing.T) { } db.AddFileMessage(fileMsg1, []string{}, []string{}) + // Rebuild the date index to capture the directly-inserted alerts + // (In production, all inserts go through Add/AddAlert/AddFileMessage which maintain the index) + if err := db.RebuildDateIndex(); err != nil { + t.Fatalf("Failed to rebuild date index: %v", err) + } + // Test "tasks" type taskDates, err := db.DatesByType("tasks") if err != nil { diff --git a/apps/flowlord/cache/schema.sql b/apps/flowlord/cache/schema.sql index 70aa040..a5f606c 100644 --- a/apps/flowlord/cache/schema.sql +++ b/apps/flowlord/cache/schema.sql @@ -1,6 +1,7 @@ -- Schema version tracking CREATE TABLE IF NOT EXISTS schema_version ( - version INTEGER PRIMARY KEY + id INTEGER PRIMARY KEY DEFAULT 1, + version INTEGER NOT NULL ); -- SQL schema for the task cache @@ -122,4 +123,16 @@ CREATE TABLE IF NOT EXISTS workflow_phases ( -- Indexes for performance CREATE INDEX IF NOT EXISTS idx_workflow_phases_task ON workflow_phases (task); -CREATE INDEX IF NOT EXISTS idx_workflow_phases_depends_on ON workflow_phases (depends_on); \ No newline at end of file +CREATE INDEX IF NOT EXISTS idx_workflow_phases_depends_on ON workflow_phases (depends_on); + +-- Date index table for fast date lookups +-- Tracks which dates have data in each table +CREATE TABLE IF NOT EXISTS date_index ( + date TEXT PRIMARY KEY, + -- YYYY-MM-DD format + has_tasks BOOLEAN DEFAULT 0, + -- 1 if task_records has data for this date + has_alerts BOOLEAN DEFAULT 0, + -- 1 if alert_records has data for this date + has_files BOOLEAN DEFAULT 0 -- 1 if file_messages has data for this date +); \ No newline at end of file diff --git a/apps/flowlord/cache/sqlite.go b/apps/flowlord/cache/sqlite.go index 93c530f..9c028cf 100644 --- a/apps/flowlord/cache/sqlite.go +++ b/apps/flowlord/cache/sqlite.go @@ -28,7 +28,9 @@ var schema string // currentSchemaVersion is the current version of the database schema. // Increment this when making schema changes that require migration. -const currentSchemaVersion = 1 +// Version 1: Initial schema +// Version 2: Added date_index table for performance optimization +const currentSchemaVersion = 2 type SQLite struct { LocalPath string @@ -167,8 +169,19 @@ func (o *SQLite) GetSchemaVersion() int { // setVersion updates the schema version in the database func (o *SQLite) setVersion(version int) error { - _, err := o.db.Exec("INSERT OR REPLACE INTO schema_version (version) VALUES (?)", version) - return err + // Delete all existing records to ensure we only have one row + _, err := o.db.Exec("DELETE FROM schema_version") + if err != nil { + return fmt.Errorf("failed to clear schema_version: %w", err) + } + + // Insert the new version + _, err = o.db.Exec("INSERT INTO schema_version (version) VALUES (?)", version) + if err != nil { + return fmt.Errorf("failed to insert schema version: %w", err) + } + + return nil } // migrateSchema applies version-specific migrations based on the current version @@ -192,19 +205,26 @@ func (o *SQLite) migrateSchema(currentVersion int) error { } } + // Version 1 → 2: Add date_index table for performance optimization + if currentVersion < 2 { + log.Println("Migrating schema from version 1 to 2 (adding date_index table)") + + // Re-apply schema.sql - it has IF NOT EXISTS so it's safe and will add new tables + _, err := o.db.Exec(schema) + if err != nil { + return fmt.Errorf("failed to apply schema for version 2: %w", err) + } + + // Populate the date_index from existing data + if err := o.RebuildDateIndex(); err != nil { + return fmt.Errorf("failed to populate date_index: %w", err) + } + + log.Println("Successfully migrated to schema version 2") + } + // Add future migrations here as needed: // Example: - // if currentVersion < 2 { - // db := o.db - // // Drop an old table - // db.Exec("DROP TABLE IF EXISTS obsolete_table") - // - // // Add new column if it doesn't exist - // if !columnExists(db, "task_records", "new_field") { - // db.Exec("ALTER TABLE task_records ADD COLUMN new_field TEXT") - // } - // } - // // if currentVersion < 3 { // db := o.db // // Drop column by recreating table (since data loss is OK) @@ -260,6 +280,11 @@ func (s *SQLite) Add(t task.Task) { return } + // Update date index for this task's date + if t.Created != "" { + s.updateDateIndex(t.Created, "tasks") + } + // Check if this was an update (conflict) rather than insert rowsAffected, _ := result.RowsAffected() if rowsAffected == 0 { @@ -557,7 +582,14 @@ func (s *SQLite) AddAlert(t task.Task, message string) error { VALUES (?, ?, ?, ?, ?) `, taskID, taskTime, t.Type, job, message) - return err + if err != nil { + return err + } + + // Update date index - use current time for alert created_at + s.updateDateIndex(time.Now().Format(time.RFC3339), "alerts") + + return nil } // extractJobFromTask is a helper function to get job from task @@ -571,6 +603,49 @@ func extractJobFromTask(t task.Task) string { return job } +// updateDateIndex updates the date_index table for a given timestamp and data type +// This method should be called within an existing lock (s.mu.Lock) +func (s *SQLite) updateDateIndex(timestamp, dataType string) { + // Parse timestamp to extract date + t, err := time.Parse(time.RFC3339, timestamp) + if err != nil { + // Try other formats if RFC3339 fails + t, err = time.Parse("2006-01-02 15:04:05", timestamp) + if err != nil { + return // Skip if we can't parse the timestamp + } + } + + dateStr := t.Format("2006-01-02") + + // Determine which column to update + var column string + switch dataType { + case "tasks": + column = "has_tasks" + case "alerts": + column = "has_alerts" + case "files": + column = "has_files" + default: + return + } + + // First try to insert the date, if it already exists, update the column + _, err = s.db.Exec("INSERT OR IGNORE INTO date_index (date) VALUES (?)", dateStr) + if err != nil { + log.Printf("WARNING: Failed to insert date into date_index for %s on %s: %v", dataType, dateStr, err) + return + } + + // Now update the specific column + query := fmt.Sprintf("UPDATE date_index SET %s = 1 WHERE date = ?", column) + _, err = s.db.Exec(query, dateStr) + if err != nil { + log.Printf("WARNING: Failed to update date_index for %s on %s: %v", dataType, dateStr, err) + } +} + // GetAlertsByDate retrieves all alerts for a specific date func (s *SQLite) GetAlertsByDate(date time.Time) ([]AlertRecord, error) { s.mu.Lock() @@ -842,7 +917,14 @@ func (s *SQLite) AddFileMessage(sts stat.Stats, taskIDs []string, taskNames []st VALUES (?, ?, ?, ?, ?, ?) `, sts.Path, sts.Size, lastModified, taskTime, taskIDsJSON, taskNamesJSON) - return err + if err != nil { + return err + } + + // Update date index - use current time for received_at + s.updateDateIndex(time.Now().Format(time.RFC3339), "files") + + return nil } // GetFileMessages retrieves file messages with optional filtering @@ -1164,40 +1246,31 @@ func (s *SQLite) GetDatesWithData() ([]string, error) { // DatesByType returns a list of dates (YYYY-MM-DD format) that have data for the specified type // dataType can be "tasks", "alerts", or "files" +// This uses the date_index table for instant lookups func (s *SQLite) DatesByType(dataType string) ([]string, error) { s.mu.Lock() defer s.mu.Unlock() - retentionDays := int(s.Retention.Hours() / 24) - if retentionDays == 0 { - retentionDays = 90 - } - - var query string + var column string switch dataType { case "tasks": - query = ` - SELECT DISTINCT DATE(created) as date_val - FROM task_records - ORDER BY date_val DESC - ` + column = "has_tasks" case "alerts": - query = ` - SELECT DISTINCT DATE(created_at) as date_val - FROM alert_records - ORDER BY date_val DESC - ` + column = "has_alerts" case "files": - query = ` - SELECT DISTINCT DATE(received_at) as date_val - FROM file_messages - ORDER BY date_val DESC - ` + column = "has_files" default: return nil, fmt.Errorf("invalid data type: %s (must be 'tasks', 'alerts', or 'files')", dataType) } - rows, err := s.db.Query(query, retentionDays) + query := fmt.Sprintf(` + SELECT date + FROM date_index + WHERE %s = 1 + ORDER BY date DESC + `, column) + + rows, err := s.db.Query(query) if err != nil { return nil, err } @@ -1230,6 +1303,77 @@ func (s *SQLite) GetDatesWithFiles() ([]string, error) { return s.DatesByType("files") } +// RebuildDateIndex scans all tables and rebuilds the date_index table +// This should be called once during migration or can be exposed as an admin endpoint +func (s *SQLite) RebuildDateIndex() error { + s.mu.Lock() + defer s.mu.Unlock() + + log.Println("Starting date_index rebuild...") + + // Clear existing index + _, err := s.db.Exec("DELETE FROM date_index") + if err != nil { + return fmt.Errorf("failed to clear date_index: %w", err) + } + + // Populate from task_records + // First insert the dates, then update the has_tasks flag + _, err = s.db.Exec(` + INSERT OR IGNORE INTO date_index (date, has_tasks) + SELECT DISTINCT DATE(created), 1 + FROM task_records + `) + if err != nil { + return fmt.Errorf("failed to populate date_index from tasks: %w", err) + } + + // Populate from alert_records + // Insert new dates and update has_alerts for existing dates + _, err = s.db.Exec(` + INSERT OR IGNORE INTO date_index (date) + SELECT DISTINCT DATE(created_at) + FROM alert_records + WHERE DATE(created_at) NOT IN (SELECT date FROM date_index) + `) + if err != nil { + return fmt.Errorf("failed to insert new dates from alerts: %w", err) + } + + _, err = s.db.Exec(` + UPDATE date_index + SET has_alerts = 1 + WHERE date IN (SELECT DISTINCT DATE(created_at) FROM alert_records) + `) + if err != nil { + return fmt.Errorf("failed to update date_index from alerts: %w", err) + } + + // Populate from file_messages + // Insert new dates and update has_files for existing dates + _, err = s.db.Exec(` + INSERT OR IGNORE INTO date_index (date) + SELECT DISTINCT DATE(received_at) + FROM file_messages + WHERE DATE(received_at) NOT IN (SELECT date FROM date_index) + `) + if err != nil { + return fmt.Errorf("failed to insert new dates from files: %w", err) + } + + _, err = s.db.Exec(` + UPDATE date_index + SET has_files = 1 + WHERE date IN (SELECT DISTINCT DATE(received_at) FROM file_messages) + `) + if err != nil { + return fmt.Errorf("failed to update date_index from files: %w", err) + } + + log.Println("Successfully rebuilt date_index") + return nil +} + // FileMessageWithTasks represents a file message with associated task details type FileMessageWithTasks struct { FileID int `json:"file_id"` diff --git a/apps/flowlord/handler.go b/apps/flowlord/handler.go index 20c75e4..286e910 100644 --- a/apps/flowlord/handler.go +++ b/apps/flowlord/handler.go @@ -12,6 +12,7 @@ import ( "net/http" "net/url" "path/filepath" + "sort" "strconv" "strings" "time" @@ -421,15 +422,19 @@ func (tm *taskMaster) htmlTask(w http.ResponseWriter, r *http.Request) { result := r.URL.Query().Get("result") // Get tasks with filters + start := time.Now() tasks, err := tm.taskCache.GetTasksByDate(dt, taskType, job, result) if err != nil { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) return } - + log.Printf("getTasksbyDate %v", time.Since(start)) + + start = time.Now() // Get dates with tasks for calendar highlighting datesWithData, _ := tm.taskCache.DatesByType("tasks") + log.Printf("DatesByType %v", time.Since(start)) w.WriteHeader(http.StatusOK) w.Header().Set("Content-Type", "text/html") @@ -571,15 +576,21 @@ func taskHTML(tasks []cache.TaskView, date time.Time, taskType, job, result stri nextDate := date.AddDate(0, 0, 1) // Generate summary from tasks data + start := time.Now() summary := generateSummaryFromTasks(tasks) //TODO: replace with taskCache.Recap() + log.Printf("generateSummary %v", time.Since(start)) - // Calculate statistics + // Calculate statistics and extract unique types/jobs totalTasks := len(tasks) completedTasks := 0 errorTasks := 0 alertTasks := 0 warnTasks := 0 runningTasks := 0 + + // Extract unique types and jobs for filter dropdowns + uniqueTypes := make(map[string]struct{}) + uniqueJobs := make(map[string]map[string]struct{}) // type -> jobs for _, t := range tasks { switch t.Result { @@ -594,6 +605,37 @@ func taskHTML(tasks []cache.TaskView, date time.Time, taskType, job, result stri case "": runningTasks++ } + + // Track unique types + if t.Type != "" { + uniqueTypes[t.Type] = struct{}{} + + // Track jobs per type + if uniqueJobs[t.Type] == nil { + uniqueJobs[t.Type] = make(map[string]struct{}) + } + if t.Job != "" { + uniqueJobs[t.Type][t.Job] = struct{}{} + } + } + } + + // Convert to sorted slices for template + types := make([]string, 0, len(uniqueTypes)) + for t := range uniqueTypes { + types = append(types, t) + } + sort.Strings(types) + + // Convert jobs map to sorted slices + jobsByType := make(map[string][]string) + for typ, jobs := range uniqueJobs { + jobList := make([]string, 0, len(jobs)) + for j := range jobs { + jobList = append(jobList, j) + } + sort.Strings(jobList) + jobsByType[typ] = jobList } data := map[string]interface{}{ @@ -616,6 +658,8 @@ func taskHTML(tasks []cache.TaskView, date time.Time, taskType, job, result stri "PageTitle": "Task Dashboard", "isLocal": isLocal, "DatesWithData": datesWithData, + "UniqueTypes": types, + "JobsByType": jobsByType, } // Get base funcMap and extend it with task-specific closures @@ -672,9 +716,11 @@ func taskHTML(tasks []cache.TaskView, date time.Time, taskType, job, result stri } var buf bytes.Buffer + start = time.Now() if err := tmpl.Execute(&buf, data); err != nil { return []byte(err.Error()) } + log.Printf("execute tmpl %v", time.Since(start)) return buf.Bytes() } diff --git a/apps/flowlord/handler/about.tmpl b/apps/flowlord/handler/about.tmpl index 3efd544..a33f811 100644 --- a/apps/flowlord/handler/about.tmpl +++ b/apps/flowlord/handler/about.tmpl @@ -40,7 +40,7 @@ {{.NextUpdate}}
- Database File + Database {{.DBPath}}
diff --git a/apps/flowlord/handler/static/style.css b/apps/flowlord/handler/static/style.css index 387cdb1..00f9388 100644 --- a/apps/flowlord/handler/static/style.css +++ b/apps/flowlord/handler/static/style.css @@ -231,6 +231,7 @@ body { .info-label { font-weight: 500; + margin-right: 10px; color: #495057; } diff --git a/apps/flowlord/handler/task.tmpl b/apps/flowlord/handler/task.tmpl index 74f383a..48248be 100644 --- a/apps/flowlord/handler/task.tmpl +++ b/apps/flowlord/handler/task.tmpl @@ -293,37 +293,16 @@ if (!table) return; - // Get all task rows - const rows = Array.from(table.querySelectorAll('tbody tr')); - - // Extract unique task types - const taskTypes = new Set(); - const jobMap = new Map(); // type -> Set of jobs - - rows.forEach(row => { - const typeCell = row.cells[1]; // Type column - const jobCell = row.cells[2]; // Job column - - if (typeCell && jobCell) { - const type = typeCell.textContent.trim(); - const job = jobCell.textContent.trim(); - - if (type) { - taskTypes.add(type); - - if (!jobMap.has(type)) { - jobMap.set(type, new Set()); - } - if (job) { - jobMap.get(type).add(job); - } - } - } - }); - - // Populate task type dropdown - const sortedTypes = Array.from(taskTypes).sort(); - sortedTypes.forEach(type => { + // Use server-provided data instead of extracting from DOM + const taskTypes = [{{range $i, $t := .UniqueTypes}}{{if $i}},{{end}}"{{$t}}"{{end}}]; + const jobMap = new Map([ + {{range $type, $jobs := .JobsByType}} + ["{{$type}}", [{{range $j, $job := $jobs}}{{if $j}},{{end}}"{{$job}}"{{end}}]], + {{end}} + ]); + + // Populate task type dropdown from server data + taskTypes.forEach(type => { const option = document.createElement('option'); option.value = type; option.textContent = type; From 950bba8037429bb4e662d6f89072d92c0e73dee5 Mon Sep 17 00:00:00 2001 From: Joshua Smith Date: Thu, 13 Nov 2025 15:04:31 -0700 Subject: [PATCH 36/40] speed up tasksHTML --- apps/flowlord/handler.go | 168 ++++++++------- apps/flowlord/handler/header.tmpl | 2 + apps/flowlord/handler/static/style.css | 27 +++ apps/flowlord/handler/task.tmpl | 269 ++++++++++++------------- 4 files changed, 257 insertions(+), 209 deletions(-) diff --git a/apps/flowlord/handler.go b/apps/flowlord/handler.go index 286e910..8eab73b 100644 --- a/apps/flowlord/handler.go +++ b/apps/flowlord/handler.go @@ -420,25 +420,39 @@ func (tm *taskMaster) htmlTask(w http.ResponseWriter, r *http.Request) { taskType := r.URL.Query().Get("type") job := r.URL.Query().Get("job") result := r.URL.Query().Get("result") + + // Get pagination parameters + page := 1 + if pageStr := r.URL.Query().Get("page"); pageStr != "" { + if p, err := strconv.Atoi(pageStr); err == nil && p > 0 { + page = p + } + } + + pageSize := 500 // Show 500 tasks per page + if pageSizeStr := r.URL.Query().Get("pageSize"); pageSizeStr != "" { + if ps, err := strconv.Atoi(pageSizeStr); err == nil && ps > 0 && ps <= 1000 { + pageSize = ps + } + } - // Get tasks with filters - start := time.Now() - tasks, err := tm.taskCache.GetTasksByDate(dt, taskType, job, result) + // Get ALL tasks for the date (no filtering at all) to populate summary and dropdowns + queryStart := time.Now() + allTasks, err := tm.taskCache.GetTasksByDate(dt, "", "", "") if err != nil { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) return } - log.Printf("getTasksbyDate %v", time.Since(start)) + queryTime := time.Since(queryStart) - start = time.Now() // Get dates with tasks for calendar highlighting datesWithData, _ := tm.taskCache.DatesByType("tasks") - log.Printf("DatesByType %v", time.Since(start)) w.WriteHeader(http.StatusOK) w.Header().Set("Content-Type", "text/html") - w.Write(taskHTML(tasks, dt, taskType, job, result, datesWithData)) + htmlBytes := taskHTML(allTasks, dt, taskType, job, result, datesWithData, page, pageSize, queryTime) + w.Write(htmlBytes) } // htmlWorkflow handles GET /web/workflow - displays workflow phases from database @@ -570,29 +584,29 @@ func generateSummaryFromTasks(tasks []cache.TaskView) map[string]*cache.Stats { } // taskHTML renders the task summary and table HTML page -func taskHTML(tasks []cache.TaskView, date time.Time, taskType, job, result string, datesWithData []string) []byte { +func taskHTML(allTasks []cache.TaskView, date time.Time, taskType, job, result string, datesWithData []string, page, pageSize int, queryTime time.Duration) []byte { + renderStart := time.Now() + // Calculate navigation dates prevDate := date.AddDate(0, 0, -1) nextDate := date.AddDate(0, 0, 1) - // Generate summary from tasks data - start := time.Now() - summary := generateSummaryFromTasks(tasks) //TODO: replace with taskCache.Recap() - log.Printf("generateSummary %v", time.Since(start)) + // Generate summary from ALL tasks (not filtered) + summary := generateSummaryFromTasks(allTasks) //TODO: replace with taskCache.Recap() - // Calculate statistics and extract unique types/jobs - totalTasks := len(tasks) + // Calculate statistics and extract unique types/jobs from ALL tasks + totalAllTasks := len(allTasks) completedTasks := 0 errorTasks := 0 alertTasks := 0 warnTasks := 0 runningTasks := 0 - // Extract unique types and jobs for filter dropdowns + // Extract unique types and jobs for filter dropdowns from ALL tasks uniqueTypes := make(map[string]struct{}) uniqueJobs := make(map[string]map[string]struct{}) // type -> jobs - for _, t := range tasks { + for _, t := range allTasks { switch t.Result { case "complete": completedTasks++ @@ -637,16 +651,63 @@ func taskHTML(tasks []cache.TaskView, date time.Time, taskType, job, result stri sort.Strings(jobList) jobsByType[typ] = jobList } + + // Filter tasks for display based on taskType, job, and result parameters + filteredTasks := make([]cache.TaskView, 0, len(allTasks)) + for _, t := range allTasks { + // Apply type filter if specified + if taskType != "" && t.Type != taskType { + continue + } + // Apply job filter if specified + if job != "" && t.Job != job { + continue + } + // Apply result filter if specified + if result != "" { + // Handle "running" as empty result + if result == "running" { + if t.Result != "" { + continue + } + } else if t.Result != result { + continue + } + } + filteredTasks = append(filteredTasks, t) + } + + // Calculate pagination based on filtered tasks + totalFilteredTasks := len(filteredTasks) + totalPages := (totalFilteredTasks + pageSize - 1) / pageSize + if totalPages == 0 { + totalPages = 1 + } + if page > totalPages { + page = totalPages + } + + // Slice tasks for current page + startIdx := (page - 1) * pageSize + endIdx := startIdx + pageSize + if endIdx > totalFilteredTasks { + endIdx = totalFilteredTasks + } + if startIdx > totalFilteredTasks { + startIdx = totalFilteredTasks + } + + pagedTasks := filteredTasks[startIdx:endIdx] data := map[string]interface{}{ "Date": date.Format("Monday, January 2, 2006"), "DateValue": date.Format("2006-01-02"), "PrevDate": prevDate.Format("2006-01-02"), "NextDate": nextDate.Format("2006-01-02"), - "Tasks": tasks, + "Tasks": pagedTasks, // Only show current page's tasks "Summary": summary, - "TotalTasks": totalTasks, - "CompletedTasks": completedTasks, + "TotalTasks": totalAllTasks, // Total count of ALL tasks for the day (for summary) + "CompletedTasks": completedTasks, // Stats from ALL tasks "ErrorTasks": errorTasks, "AlertTasks": alertTasks, "WarnTasks": warnTasks, @@ -660,67 +721,34 @@ func taskHTML(tasks []cache.TaskView, date time.Time, taskType, job, result stri "DatesWithData": datesWithData, "UniqueTypes": types, "JobsByType": jobsByType, + // Pagination info (based on filtered tasks) + "Page": page, + "PageSize": pageSize, + "TotalPages": totalPages, + "StartIndex": startIdx + 1, + "EndIndex": endIdx, + "FilteredCount": totalFilteredTasks, // Number of tasks after filtering } - // Get base funcMap and extend it with task-specific closures - funcMap := getBaseFuncMap() - funcMap["getAlertCount"] = func(topic, job string) int { - // Count alerts for a specific topic and job - count := 0 - for _, task := range tasks { - if task.Result == "alert" { - // Check if this task matches the topic and job - taskJob := task.Job - if taskJob == "" { - // Try to extract job from meta if not set directly - if v, err := url.ParseQuery(task.Meta); err == nil { - taskJob = v.Get("job") - } - } - // For now, we'll match any alert task since we don't have topic info in TaskView - // This is a simplified implementation - if taskJob == job { - count++ - } - } - } - return count - } - funcMap["getWarnCount"] = func(topic, job string) int { - // Count warnings for a specific topic and job - count := 0 - for _, task := range tasks { - if task.Result == "warn" { - // Check if this task matches the topic and job - taskJob := task.Job - if taskJob == "" { - // Try to extract job from meta if not set directly - if v, err := url.ParseQuery(task.Meta); err == nil { - taskJob = v.Get("job") - } - } - // For now, we'll match any warn task since we don't have topic info in TaskView - // This is a simplified implementation - if taskJob == job { - count++ - } - } - } - return count - } - - // Parse and execute template - tmpl, err := template.New("task").Funcs(funcMap).Parse(HeaderTemplate + TaskTemplate) + // Parse and execute template using base funcMap + tmpl, err := template.New("task").Funcs(getBaseFuncMap()).Parse(HeaderTemplate + TaskTemplate) if err != nil { return []byte(err.Error()) } var buf bytes.Buffer - start = time.Now() if err := tmpl.Execute(&buf, data); err != nil { return []byte(err.Error()) } - log.Printf("execute tmpl %v", time.Since(start)) + + htmlSize := buf.Len() + renderTime := time.Since(renderStart) + + // Single consolidated log with all metrics + log.Printf("Task page: date=%s filters=[type=%q job=%q result=%q] tasks=%d filtered=%d page=%d/%d query=%v render=%v size=%.2fMB", + date.Format("2006-01-02"), taskType, job, result, + totalAllTasks, totalFilteredTasks, page, totalPages, + queryTime, renderTime, float64(htmlSize)/(1024*1024)) return buf.Bytes() } diff --git a/apps/flowlord/handler/header.tmpl b/apps/flowlord/handler/header.tmpl index 5af15b3..4c98c85 100644 --- a/apps/flowlord/handler/header.tmpl +++ b/apps/flowlord/handler/header.tmpl @@ -2,11 +2,13 @@