-
Notifications
You must be signed in to change notification settings - Fork 2
fix(ha): PR #101 HA implementation bug fixes #154
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
36bc132
07ec0bf
909e78c
86b6fdb
92b02fb
821e78e
0bac69c
1241bc0
077a9fb
67435d2
297c4c0
128cafa
61cfba8
748fbb6
5da7617
bba166a
25ca95e
c6c6cf1
01ce374
117194e
c99f0a4
10da6e3
0fc15fe
e9fb55e
e322d11
a2ed862
b59e6f8
83640ad
79945c8
edb7922
98c5229
8e51eea
fa0378e
57ef5fd
de807e0
5e59b15
59c6b41
c679086
c1fedbb
fe8554b
02e2ee1
c066ad8
1cc75ed
3d51def
3605d4a
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -130,16 +130,28 @@ func run() error { | |
| defer db.Close() | ||
| defer func() { _ = rdb.Close() }() | ||
|
|
||
| compute, storage, network, lbProxy, err := initBackends(deps, cfg, logger, db, rdb) | ||
| rawCompute, rawStorage, rawNetwork, rawLBProxy, err := initBackends(deps, cfg, logger, db, rdb) | ||
|
||
| if err != nil { | ||
| logger.Error("backend initialization failed", "error", err) | ||
| return err | ||
| } | ||
|
|
||
| // Wrap raw backends with resilience decorators (circuit breaker, bulkhead, timeouts). | ||
| compute := platform.NewResilientCompute(rawCompute, logger, platform.ResilientComputeOpts{}) | ||
| storage := platform.NewResilientStorage(rawStorage, logger, platform.ResilientStorageOpts{}) | ||
| network := platform.NewResilientNetwork(rawNetwork, logger, platform.ResilientNetworkOpts{}) | ||
| lbProxy := platform.NewResilientLB(rawLBProxy, logger, platform.ResilientLBOpts{}) | ||
|
|
||
| repos := deps.InitRepositories(db, rdb) | ||
|
|
||
| // Create leader elector for singleton worker coordination. | ||
| // When multiple worker replicas run, only one will hold leadership per key. | ||
| leaderElector := postgres.NewPgLeaderElector(db, logger) | ||
|
|
||
| svcs, workers, err := deps.InitServices(setup.ServiceConfig{ | ||
| Config: cfg, Repos: repos, Compute: compute, Storage: storage, | ||
| Network: network, LBProxy: lbProxy, DB: db, RDB: rdb, Logger: logger, | ||
| LeaderElector: leaderElector, | ||
| }) | ||
| if err != nil { | ||
| logger.Error("service initialization failed", "error", err) | ||
|
|
@@ -154,52 +166,61 @@ func run() error { | |
| r.Use(otelgin.Middleware("compute-api")) | ||
| } | ||
|
|
||
| runApplication(deps, cfg, logger, r, workers) | ||
| return nil | ||
| return runApplication(deps, cfg, logger, r, workers) | ||
| } | ||
|
|
||
| func runApplication(deps AppDeps, cfg *platform.Config, logger *slog.Logger, r *gin.Engine, workers *setup.Workers) { | ||
| role := os.Getenv("APP_ROLE") | ||
| func runApplication(deps AppDeps, cfg *platform.Config, logger *slog.Logger, r *gin.Engine, workers *setup.Workers) error { | ||
| role := os.Getenv("ROLE") | ||
| if role == "" { | ||
| role = "all" | ||
| } | ||
|
Comment on lines
+172
to
176
|
||
|
|
||
| validRoles := map[string]bool{"api": true, "worker": true, "all": true} | ||
| if !validRoles[role] { | ||
| logger.Error("invalid ROLE value, must be one of: api, worker, all", "role", role) | ||
| return fmt.Errorf("invalid ROLE value %q, must be one of: api, worker, all", role) | ||
| } | ||
| logger.Info("starting with role", "role", role) | ||
|
|
||
| wg := &sync.WaitGroup{} | ||
| workerCtx, workerCancel := context.WithCancel(context.Background()) | ||
|
|
||
| if role == "worker" || role == "all" { | ||
| runWorkers(workerCtx, wg, workers) | ||
| } | ||
|
|
||
| srv := deps.NewHTTPServer(":"+cfg.Port, r) | ||
|
|
||
| var srv *http.Server | ||
| if role == "api" || role == "all" { | ||
| srv = deps.NewHTTPServer(":"+cfg.Port, r) | ||
| go func() { | ||
| logger.Info("starting compute-api", "port", cfg.Port) | ||
| if err := deps.StartHTTPServer(srv); err != nil && !stdlib_errors.Is(err, http.ErrServerClosed) { | ||
| logger.Error("failed to start server", "error", err) | ||
| } | ||
| }() | ||
| } else { | ||
| logger.Info("running in worker-only mode") | ||
| logger.Info("running in worker-only mode, HTTP server disabled") | ||
| } | ||
|
|
||
| quit := make(chan os.Signal, 1) | ||
| deps.NotifySignals(quit, syscall.SIGINT, syscall.SIGTERM) | ||
| <-quit | ||
|
|
||
| logger.Info("shutting down server...") | ||
| logger.Info("shutting down...") | ||
|
|
||
| ctx, cancel := context.WithTimeout(context.Background(), defaultShutdownTimeout) | ||
| defer cancel() | ||
|
|
||
| if err := deps.ShutdownHTTPServer(ctx, srv); err != nil { | ||
| logger.Error("server forced to shutdown", "error", err) | ||
| if srv != nil { | ||
| if err := deps.ShutdownHTTPServer(ctx, srv); err != nil { | ||
| logger.Error("server forced to shutdown", "error", err) | ||
| } | ||
| } | ||
|
|
||
| workerCancel() | ||
| wg.Wait() | ||
| logger.Info("server exited") | ||
| logger.Info("shutdown complete") | ||
| return nil | ||
| } | ||
|
|
||
| type runner interface { | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The inline comment says "Fail if performance drops by more than 50%", but
alert-threshold: '200%'corresponds to allowing up to a 2x regression. If you intend a 50% regression threshold, this likely should be150%(or update the comment to match the chosen threshold).