diff --git a/.github/atomicgo/custom_readme b/.github/atomicgo/custom_readme
new file mode 100644
index 0000000..e69de29
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index 5701467..cfd8986 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -25,6 +25,6 @@ jobs:
go-version: "stable"
- name: golangci-lint
- uses: golangci/golangci-lint-action@v3
+ uses: golangci/golangci-lint-action@v8
with:
version: latest
diff --git a/.golangci.yml b/.golangci.yml
index 86989d1..a0ff5cd 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,115 +1,122 @@
-# ┌───────────────────────────────────────────────────────────────────┐
-# │ │
-# │ IMPORTANT NOTE │
-# │ │
-# │ This file is synced with https://github.com/atomicgo/template │
-# │ │
-# │ Please apply all changes to the template repository │
-# │ │
-# └───────────────────────────────────────────────────────────────────┘
-
-run:
- timeout: 3m
-
+version: "2"
linters:
- enable-all: true
+ default: all
disable:
- - copyloopvar # fixed in go 1.22+
- - depguard # no forbidden imports
- - dogsled # blank identifiers are allowed
- - dupword # duplicate words are allowed
- - exhaustruct # many structs don't need to be exhaustive
- - forbidigo # no forbidden identifiers
- - ginkgolinter # not used
- - gochecknoinits # init functions are fine, if used carefully
- - goconst # many false positives
- - godot # comments don't need to be complete sentences
- - godox # todo comments are allowed
- - goheader # no need for a header
- - gomoddirectives # allow all directives
- - gomodguard # no forbidden imports
- - grouper # unused
- - importas # some aliases are fine
- - makezero # make with non-zero initial length is fine
- - noctx # http request may be sent without context
- - nonamedreturns # named returns are fine
- - testableexamples # examples do not need to be testable (have declared output)
- - testifylint # testify is not recommended
- - testpackage # not a go best practice
- - unparam # interfaces can enforce parameters
- - zerologlint # slog should be used instead of zerolog
- - mnd # too many detections
- - cyclop # covered by gocyclo
- - gochecknoglobals # there are many valid reasons for global variables, depending on the project
- - ireturn # there are too many exceptions
- - tenv # deprecated
-
-linters-settings:
- wsl:
- allow-cuddle-declarations: true
- force-err-cuddling: true
- force-case-trailing-whitespace: 3
-
- funlen:
- lines: 100
- statements: 50
- ignore-comments: true
-
- lll:
- line-length: 140
- tab-width: 1
-
- nlreturn:
- block-size: 2
-
- exhaustive:
- check-generated: false
- default-signifies-exhaustive: true
-
- varnamelen:
- ignore-type-assert-ok: true # ignore "ok" variables
- ignore-map-index-ok: true
- ignore-chan-recv-ok: true
- ignore-decls:
- - n int # generic number
- - x int # generic number (e.g. coordinate)
- - y int # generic number (e.g. coordinate)
- - z int # generic number (e.g. coordinate)
- - i int # generic number
- - a int # generic number
- - r int # generic number (e.g. red or radius)
- - g int # generic number (e.g. green)
- - b int # generic number (e.g. blue)
- - r int64 # generic number (e.g. red or radius)
- - g int64 # generic number (e.g. green)
- - b int64 # generic number (e.g. blue)
- - c int # generic number (e.g. count)
- - j int # generic number (e.g. index)
- - T any # generic type
- - a any # generic any (e.g. data)
- - b any # generic any (e.g. body)
- - c any # generic any
- - d any # generic any (e.g. data)
- - data any # generic data
- - n any # generic any
- - ch chan T # common generic channel name
- - ch chan int # common generic channel name
- - ch chan any # common generic channel name
- - wg sync.WaitGroup # common generic WaitGroup name
- - t time.Time # often used as a variable name
- - f func() # often used as a callback variable name
- - f func(T) # often used as a generic callback variable name
- - cb func() # often used as a callback variable name
- - t testing.T # default testing.T variable name
- - b testing.B # default testing.B variable name
- - sb strings.Builder # often used as a variable name
-
-issues:
- exclude-rules:
- - path: "_test(_[^/]+)?\\.go"
- linters:
- - gochecknoglobals
- - noctx
- - funlen
- - dupl
- - errcheck
+ - copyloopvar
+ - cyclop
+ - depguard
+ - dogsled
+ - dupword
+ - exhaustruct
+ - forbidigo
+ - ginkgolinter
+ - gochecknoglobals
+ - gochecknoinits
+ - goconst
+ - godot
+ - godox
+ - goheader
+ - gomoddirectives
+ - gomodguard
+ - grouper
+ - importas
+ - ireturn
+ - makezero
+ - mnd
+ - noctx
+ - nonamedreturns
+ - testableexamples
+ - testifylint
+ - testpackage
+ - unparam
+ - zerologlint
+ - noinlineerr
+ settings:
+ exhaustive:
+ default-signifies-exhaustive: true
+ funlen:
+ lines: 100
+ statements: 50
+ ignore-comments: true
+ lll:
+ line-length: 140
+ tab-width: 1
+ nlreturn:
+ block-size: 2
+ varnamelen:
+ ignore-type-assert-ok: true
+ ignore-map-index-ok: true
+ ignore-chan-recv-ok: true
+ ignore-decls:
+ - n int
+ - x int
+ - y int
+ - z int
+ - i int
+ - a int
+ - r int
+ - g int
+ - b int
+ - r int64
+ - g int64
+ - b int64
+ - c int
+ - j int
+ - T any
+ - a any
+ - b any
+ - c any
+ - d any
+ - data any
+ - n any
+ - ch chan T
+ - ch chan int
+ - ch chan any
+ - wg sync.WaitGroup
+ - t time.Time
+ - f func()
+ - f func(T)
+ - cb func()
+ - t testing.T
+ - b testing.B
+ - sb strings.Builder
+ - w http.ResponseWriter
+ - r *http.Request
+ wsl:
+ force-case-trailing-whitespace: 3
+ allow-cuddle-declarations: true
+ force-err-cuddling: true
+ exclusions:
+ generated: lax
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ rules:
+ - linters:
+ - dupl
+ - errcheck
+ - funlen
+ - gochecknoglobals
+ - noctx
+ path: _test(_[^/]+)?\.go
+ - linters:
+ - revive
+ text: "unused-parameter:"
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+formatters:
+ enable:
+ - gci
+ - gofmt
+ - gofumpt
+ - goimports
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/README.md b/README.md
index 2b9017e..6efd8ea 100644
--- a/README.md
+++ b/README.md
@@ -1,20 +1,3 @@
-
-
AtomicGo | service
@@ -76,86 +59,117 @@
-
-
-
-
-# service
+---
-```go
-import "atomicgo.dev/service"
-```
+A minimal boilerplate wrapper for building production-ready Go HTTP services. This library reduces the boilerplate of writing production/enterprise-grade Go services to a minimum.
-Package service provides a lightweight, production\-ready HTTP service framework for Go applications.
+**What this library provides:**
+- Essential production features out of the box (metrics, health checks, graceful shutdown)
+- Kubernetes and containerization boilerplate
+- Lightweight wrapper around http.Server for high availability services
-The service framework is designed to be Kubernetes\-ready and follows best practices for highly available microservices. It includes built\-in support for graceful shutdown, Prometheus metrics, structured logging, middleware, and environment\-based configuration.
+**What this library does NOT provide:**
+- HTTP framework or routing
+- Business logic or application patterns
+- Restrictions on how you write your HTTP handlers
+- Opinionated application architecture
-\#\# Features
+Write HTTP handlers exactly as you prefer, using any patterns or frameworks you choose. This library handles the operational concerns while staying out of your application logic.
-\- \*\*HTTP Server\*\*: Configurable HTTP server with timeouts and graceful shutdown \- \*\*Metrics\*\*: Built\-in Prometheus metrics collection with automatic request tracking \- \*\*Logging\*\*: Structured logging with slog integration and context\-aware loggers \- \*\*Middleware\*\*: Extensible middleware system with built\-in recovery and logging \- \*\*Configuration\*\*: Environment\-based configuration with sensible defaults \- \*\*Graceful Shutdown\*\*: Signal handling with configurable shutdown hooks \- \*\*Health Checks\*\*: Built\-in health check endpoints \- \*\*Kubernetes Ready\*\*: Designed for containerized deployments
+## Features
-\#\# Quick Start
+- **Minimal Boilerplate**: Reduces production service setup to a few lines of code
+- **HTTP Server Wrapper**: Lightweight wrapper around http.Server with production defaults
+- **Metrics**: Built-in Prometheus metrics collection with automatic request tracking
+- **Logging**: Structured logging with slog integration and context-aware loggers
+- **Middleware**: Extensible middleware system with built-in recovery and logging
+- **Configuration**: Environment-based configuration with sensible defaults
+- **Graceful Shutdown**: Signal handling with configurable shutdown hooks
+- **Health Checks**: Built-in health check endpoints for Kubernetes
+- **Framework Agnostic**: Works with any HTTP patterns or frameworks you prefer (as long as the framework supports the standard `http` package)
-\`\`\`go package main
+## Quick Start
-import \(
+Minimal boilerplate to get a production-ready service with metrics, health checks, and graceful shutdown:
-```
-"log/slog"
-"net/http"
-"os"
+```go
+package main
-"atomicgo.dev/service"
-```
+import (
+ "log/slog"
+ "net/http"
+ "os"
-\)
+ "atomicgo.dev/service"
+)
-```
func main() {
// Create service with default configuration
svc := service.New("my-service", nil)
- // Register handlers
+ // Write HTTP handlers exactly as you prefer
svc.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- logger := service.GetLogger(r)
+ logger := service.GetLogger(r) // Easy access to the logger
logger.Info("Hello, World!")
w.Write([]byte("Hello, World!"))
})
- // Start with graceful shutdown
- if err := svc.StartWithGracefulShutdown(); err != nil {
+ // Start service (includes graceful shutdown, metrics, health checks)
+ if err := svc.Start(); err != nil {
os.Exit(1)
}
}
```
-\`\`\`
+That's it! Your service now has:
+- Prometheus metrics at `:9090/metrics`
+- Comprehensive health checks at `:9090/health`
+- Kubernetes readiness probe at `:9090/ready`
+- Kubernetes liveness probe at `:9090/live`
+- Graceful shutdown handling
+- Structured logging
+- Kubernetes-ready configuration
-\#\# Configuration
+## Configuration
The framework supports configuration via environment variables with sensible defaults:
-\- \`ADDR\`: HTTP server address \(default: ":8080"\) \- \`METRICS\_ADDR\`: Metrics server address \(default: ":9090"\) \- \`METRICS\_PATH\`: Metrics endpoint path \(default: "/metrics"\) \- \`READ\_TIMEOUT\`: HTTP read timeout \(default: "10s"\) \- \`WRITE\_TIMEOUT\`: HTTP write timeout \(default: "10s"\) \- \`IDLE\_TIMEOUT\`: HTTP idle timeout \(default: "120s"\) \- \`SHUTDOWN\_TIMEOUT\`: Graceful shutdown timeout \(default: "30s"\)
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `ADDR` | `:8080` | HTTP server address |
+| `METRICS_ADDR` | `:9090` | Metrics server address |
+| `METRICS_PATH` | `/metrics` | Metrics endpoint path |
+| `HEALTH_PATH` | `/health` | Health check endpoint path |
+| `READINESS_PATH` | `/ready` | Readiness probe endpoint path |
+| `LIVENESS_PATH` | `/live` | Liveness probe endpoint path |
+| `SERVICE_VERSION` | `v1.0.0` | Service version for health checks |
+| `READ_TIMEOUT` | `10s` | HTTP read timeout |
+| `WRITE_TIMEOUT` | `10s` | HTTP write timeout |
+| `IDLE_TIMEOUT` | `120s` | HTTP idle timeout |
+| `SHUTDOWN_TIMEOUT` | `30s` | Graceful shutdown timeout |
-\`\`\`go // Load configuration from environment config, err := service.LoadFromEnv\(\)
-
-```
+```go
+// Load configuration from environment
+config, err := service.LoadFromEnv()
if err != nil {
log.Fatal(err)
}
-```
-
-// Create service with custom configuration svc := service.New\("my\-service", config\) \`\`\`
-\#\# Middleware
+// Create service with custom configuration
+svc := service.New("my-service", config)
+```
-The framework includes several built\-in middleware:
+## Middleware
-\- \*\*LoggerMiddleware\*\*: Injects logger into request context \- \*\*RecoveryMiddleware\*\*: Recovers from panics and logs errors \- \*\*RequestLoggingMiddleware\*\*: Logs incoming requests \- \*\*MetricsMiddleware\*\*: Tracks HTTP metrics for Prometheus
+The framework includes several built-in middleware:
-\`\`\`go // Add custom middleware
+- **LoggerMiddleware**: Injects logger into request context
+- **RecoveryMiddleware**: Recovers from panics and logs errors
+- **RequestLoggingMiddleware**: Logs incoming requests
+- **MetricsMiddleware**: Tracks HTTP metrics for Prometheus
-```
+```go
+// Add custom middleware
svc.Use(func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Custom", "value")
@@ -164,375 +178,276 @@ svc.Use(func(next http.Handler) http.Handler {
})
```
-\`\`\`
+## Metrics
-\#\# Metrics
+The framework provides a flexible metrics system with built-in HTTP metrics and support for custom metrics.
-The framework automatically collects Prometheus metrics:
+### Built-in HTTP Metrics
-\- \`\{service\_name\}\_http\_requests\_total\`: Total HTTP requests \- \`\{service\_name\}\_http\_request\_duration\_seconds\`: Request duration \- \`\{service\_name\}\_http\_requests\_in\_flight\`: In\-flight requests
+The framework automatically collects these Prometheus metrics for every service:
-Metrics are available at \`:9090/metrics\` by default.
+- `{service_name}_http_requests_total`: Total HTTP requests by method, endpoint, and status
+- `{service_name}_http_request_duration_seconds`: Request duration histogram by method, endpoint, and status
+- `{service_name}_http_requests_in_flight`: Current number of in-flight requests
-\`\`\`go // Access metrics in handlers
+These metrics are provided automatically without any configuration required.
+### Custom Metrics
+
+You can easily register and use custom metrics in your service:
+
+```go
+func main() {
+ svc := service.New("my-service", nil)
+
+ // Register custom metrics
+ svc.RegisterCounter(service.MetricConfig{
+ Name: "user_registrations_total",
+ Help: "Total number of user registrations",
+ Labels: []string{"source", "status"},
+ })
+
+ svc.RegisterGauge(service.MetricConfig{
+ Name: "active_users",
+ Help: "Number of currently active users",
+ Labels: []string{"user_type"},
+ })
+
+ svc.RegisterHistogram(service.MetricConfig{
+ Name: "request_processing_duration_seconds",
+ Help: "Time spent processing requests",
+ Labels: []string{"operation"},
+ Buckets: []float64{0.001, 0.01, 0.1, 1.0, 10.0},
+ })
+
+ svc.RegisterSummary(service.MetricConfig{
+ Name: "response_size_bytes",
+ Help: "Size of responses in bytes",
+ Labels: []string{"endpoint"},
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+ })
+
+ // Use metrics in handlers
+ svc.HandleFunc("/register", func(w http.ResponseWriter, r *http.Request) {
+ // Increment counter
+ service.IncCounter(r, "user_registrations_total", "web", "success")
+
+ // Set gauge value
+ service.SetGauge(r, "active_users", 42.0, "premium")
+
+ // Observe histogram
+ service.ObserveHistogram(r, "request_processing_duration_seconds", 0.25, "registration")
+
+ // Observe summary
+ service.ObserveSummary(r, "response_size_bytes", 1024.0, "/register")
+
+ w.Write([]byte("User registered"))
+ })
+
+ svc.Start()
+}
```
+
+### Metric Types
+
+The framework supports all standard Prometheus metric types:
+
+- **Counter**: Monotonically increasing values (e.g., total requests, errors)
+- **Gauge**: Values that can go up and down (e.g., active connections, memory usage)
+- **Histogram**: Observations in configurable buckets (e.g., request duration, response size)
+- **Summary**: Observations with configurable quantiles (e.g., request latency percentiles)
+
+### Direct Access
+
+For advanced use cases, you can access the metrics collector directly:
+
+```go
func myHandler(w http.ResponseWriter, r *http.Request) {
metrics := service.GetMetrics(r)
if metrics != nil {
- // Custom metric operations can be added here
+ // Direct access to metrics collector
+ metrics.IncCounter("my_counter", "label_value")
+ metrics.SetGauge("my_gauge", 42.0, "label_value")
+
+ // Access the underlying Prometheus registry
+ registry := metrics.GetRegistry()
+ // Use registry for custom integrations
}
}
```
-\`\`\`
+All metrics are available at `:9090/metrics` by default.
-\#\# Graceful Shutdown
+## Graceful Shutdown
-The framework supports graceful shutdown with signal handling and custom hooks:
+The framework includes graceful shutdown by default with signal handling and custom hooks:
-\`\`\`go // Add shutdown hooks
-
-```
+```go
+// Add shutdown hooks
svc.AddShutdownHook(func() error {
// Cleanup resources
return nil
})
-```
-
-// Start with graceful shutdown svc.StartWithGracefulShutdown\(\) \`\`\`
-\#\# Logging
+// Start service (includes graceful shutdown)
+svc.Start()
+```
-The framework uses structured logging with slog and provides context\-aware loggers:
+## Logging
-\`\`\`go
+The framework uses structured logging with slog and provides context-aware loggers:
-```
+```go
func myHandler(w http.ResponseWriter, r *http.Request) {
logger := service.GetLogger(r)
logger.Info("request processed", "path", r.URL.Path)
}
```
-\`\`\`
-
-\#\# Health Checks
-
-Health check endpoints are automatically available:
-
-\- \`:9090/health\`: Basic health check \- \`:9090/metrics\`: Prometheus metrics
-
-\#\# Kubernetes Deployment
-
-The framework is designed for Kubernetes deployments with:
-
-\- Graceful shutdown handling SIGTERM \- Health check endpoints for liveness/readiness probes \- Prometheus metrics for monitoring \- Configurable resource limits via environment variables
-
-\#\# Examples
-
-See the \`\_example/\` directory for complete working examples demonstrating:
-
-\- Basic service setup \- Custom middleware \- Environment configuration \- Graceful shutdown \- Metrics integration
-
-The framework is designed to be lightweight while providing all essential features for production\-ready microservices.
-
-## Index
-
-- [func GetLogger\(r \*http.Request\) \*slog.Logger](<#GetLogger>)
-- [func IncCounter\(r \*http.Request, name string, labels ...string\)](<#IncCounter>)
-- [func ObserveHistogram\(r \*http.Request, name string, value float64, labels ...string\)](<#ObserveHistogram>)
-- [type Config](<#Config>)
- - [func DefaultConfig\(\) \*Config](<#DefaultConfig>)
- - [func LoadFromEnv\(\) \(\*Config, error\)](<#LoadFromEnv>)
- - [func \(c \*Config\) AddShutdownHook\(hook func\(\) error\)](<#Config.AddShutdownHook>)
-- [type ContextKey](<#ContextKey>)
-- [type MetricsCollector](<#MetricsCollector>)
- - [func GetMetrics\(r \*http.Request\) \*MetricsCollector](<#GetMetrics>)
- - [func NewMetricsCollector\(serviceName string\) \*MetricsCollector](<#NewMetricsCollector>)
-- [type Middleware](<#Middleware>)
- - [func LoggerMiddleware\(logger \*slog.Logger\) Middleware](<#LoggerMiddleware>)
- - [func MetricsMiddleware\(metrics \*MetricsCollector\) Middleware](<#MetricsMiddleware>)
- - [func RecoveryMiddleware\(logger \*slog.Logger\) Middleware](<#RecoveryMiddleware>)
- - [func RequestLoggingMiddleware\(logger \*slog.Logger\) Middleware](<#RequestLoggingMiddleware>)
-- [type Service](<#Service>)
- - [func New\(name string, config \*Config\) \*Service](<#New>)
- - [func \(s \*Service\) AddShutdownHook\(hook func\(\) error\)](<#Service.AddShutdownHook>)
- - [func \(s \*Service\) Handle\(pattern string, handler http.Handler\)](<#Service.Handle>)
- - [func \(s \*Service\) HandleFunc\(pattern string, handler http.HandlerFunc\)](<#Service.HandleFunc>)
- - [func \(s \*Service\) Start\(\) error](<#Service.Start>)
- - [func \(s \*Service\) StartWithGracefulShutdown\(\) error](<#Service.StartWithGracefulShutdown>)
- - [func \(s \*Service\) Stop\(\) error](<#Service.Stop>)
- - [func \(s \*Service\) Use\(middleware Middleware\)](<#Service.Use>)
-
-
-
-## func [GetLogger]()
-
-```go
-func GetLogger(r *http.Request) *slog.Logger
-```
-
-GetLogger retrieves the logger from the request context
+## Health Checks
-
-## func [IncCounter]()
+The framework integrates with [HelloFresh's health-go library](https://github.com/hellofresh/health-go) to provide comprehensive health checking capabilities.
-```go
-func IncCounter(r *http.Request, name string, labels ...string)
-```
-
-IncCounter increments a counter metric
-
-
-## func [ObserveHistogram]()
+### Built-in Health Endpoints
-```go
-func ObserveHistogram(r *http.Request, name string, value float64, labels ...string)
-```
+Health check endpoints are automatically available on the metrics server:
-ObserveHistogram observes a histogram metric
+- `:9090/health`: Comprehensive health check with detailed status information
+- `:9090/ready`: Kubernetes readiness probe endpoint
+- `:9090/live`: Kubernetes liveness probe endpoint
+- `:9090/metrics`: Prometheus metrics
-
-## type [Config]()
+### Adding Custom Health Checks
-Config holds all configuration for the service
+You can register custom health checks using the `RegisterHealthCheck` method:
```go
-type Config struct {
- // HTTP Server configuration
- Addr string `env:"ADDR" envDefault:":8080"`
- ReadTimeout time.Duration `env:"READ_TIMEOUT" envDefault:"10s"`
- WriteTimeout time.Duration `env:"WRITE_TIMEOUT" envDefault:"10s"`
- IdleTimeout time.Duration `env:"IDLE_TIMEOUT" envDefault:"120s"`
-
- // Metrics server configuration
- MetricsAddr string `env:"METRICS_ADDR" envDefault:":9090"`
- MetricsPath string `env:"METRICS_PATH" envDefault:"/metrics"`
+func main() {
+ svc := service.New("my-service", nil)
- // Graceful shutdown configuration
- ShutdownTimeout time.Duration `env:"SHUTDOWN_TIMEOUT" envDefault:"30s"`
+ // Register a database health check
+ svc.RegisterHealthCheck(health.Config{
+ Name: "database",
+ Timeout: time.Second * 5,
+ SkipOnErr: false, // This check is critical
+ Check: func(ctx context.Context) error {
+ // Your database health check logic here
+ return db.PingContext(ctx)
+ },
+ })
- // Logger configuration
- Logger *slog.Logger `env:"-"`
+ // Register a Redis health check
+ svc.RegisterHealthCheck(health.Config{
+ Name: "redis",
+ Timeout: time.Second * 3,
+ SkipOnErr: true, // This check is optional
+ Check: func(ctx context.Context) error {
+ // Your Redis health check logic here
+ return redisClient.Ping(ctx).Err()
+ },
+ })
- // Custom shutdown hooks
- ShutdownHooks []func() error `env:"-"`
+ svc.Start()
}
```
-
-### func [DefaultConfig]()
+### Using Built-in Health Checkers
-```go
-func DefaultConfig() *Config
-```
-
-DefaultConfig creates a new config with default values
-
-
-### func [LoadFromEnv]()
+The health-go library provides several built-in health checkers for common services:
```go
-func LoadFromEnv() (*Config, error)
-```
-
-LoadFromEnv loads configuration from environment variables
-
-
-### func \(\*Config\) [AddShutdownHook]()
-
-```go
-func (c *Config) AddShutdownHook(hook func() error)
-```
-
-AddShutdownHook adds a function to be called during graceful shutdown
-
-
-## type [ContextKey]()
-
-ContextKey is a custom type for context keys to avoid collisions
-
-```go
-type ContextKey string
-```
-
-
-
-```go
-const (
- // LoggerKey is the context key for the logger
- LoggerKey ContextKey = "logger"
- // MetricsKey is the context key for metrics
- MetricsKey ContextKey = "metrics"
+import (
+ "github.com/hellofresh/health-go/v5"
+ healthPostgres "github.com/hellofresh/health-go/v5/checks/postgres"
+ healthRedis "github.com/hellofresh/health-go/v5/checks/redis"
)
-```
-
-## type [MetricsCollector]()
+func main() {
+ svc := service.New("my-service", nil)
-MetricsCollector holds all the metrics for the service
+ // PostgreSQL health check
+ svc.RegisterHealthCheck(health.Config{
+ Name: "postgresql",
+ Timeout: time.Second * 5,
+ SkipOnErr: false, // Critical check - service is unhealthy if DB is down
+ Check: healthPostgres.New(healthPostgres.Config{
+ DSN: dbURL,
+ }),
+ })
+
+ // Redis health check
+ svc.RegisterHealthCheck(health.Config{
+ Name: "redis",
+ Timeout: time.Second * 3,
+ Check: healthRedis.New(healthRedis.Config{
+ Addr: "localhost:6379",
+ }),
+ })
-```go
-type MetricsCollector struct {
- // contains filtered or unexported fields
+ svc.Start()
}
```
-
-### func [GetMetrics]()
-
-```go
-func GetMetrics(r *http.Request) *MetricsCollector
-```
-
-GetMetrics retrieves the metrics collector from the request context
-
-
-### func [NewMetricsCollector]()
-
-```go
-func NewMetricsCollector(serviceName string) *MetricsCollector
-```
-
-NewMetricsCollector creates a new metrics collector
-
-
-## type [Middleware]()
-
-Middleware represents a middleware function
-
-```go
-type Middleware func(http.Handler) http.Handler
-```
-
-
-### func [LoggerMiddleware]()
-
-```go
-func LoggerMiddleware(logger *slog.Logger) Middleware
-```
-
-LoggerMiddleware injects the logger into the request context
-
-
-### func [MetricsMiddleware]()
-
-```go
-func MetricsMiddleware(metrics *MetricsCollector) Middleware
-```
-
-MetricsMiddleware creates middleware that records HTTP metrics
+### Health Check Configuration
-
-### func [RecoveryMiddleware]()
+You can configure health check endpoints using environment variables:
-```go
-func RecoveryMiddleware(logger *slog.Logger) Middleware
-```
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `HEALTH_PATH` | `/health` | Main health check endpoint path |
+| `READINESS_PATH` | `/ready` | Kubernetes readiness probe path |
+| `LIVENESS_PATH` | `/live` | Kubernetes liveness probe path |
-RecoveryMiddleware recovers from panics and logs them
+### Accessing Health Checker in Handlers
-
-### func [RequestLoggingMiddleware]()
+You can access the health checker in your HTTP handlers:
```go
-func RequestLoggingMiddleware(logger *slog.Logger) Middleware
-```
-
-RequestLoggingMiddleware logs incoming requests
-
-
-## type [Service]()
-
-Service represents the main service instance
-
-```go
-type Service struct {
- Name string
- Config *Config
- Logger *slog.Logger
- Metrics *MetricsCollector
- // contains filtered or unexported fields
+func myHandler(w http.ResponseWriter, r *http.Request) {
+ healthChecker := service.GetHealthChecker(r)
+ if healthChecker != nil {
+ check := healthChecker.Measure(r.Context())
+ // Use status information
+ w.Write([]byte(fmt.Sprintf("Health checks: %+v", check)))
+ }
}
```
-
-### func [New]()
-
-```go
-func New(name string, config *Config) *Service
-```
-
-New creates a new service instance
-
-
-### func \(\*Service\) [AddShutdownHook]()
-
-```go
-func (s *Service) AddShutdownHook(hook func() error)
-```
-
-AddShutdownHook adds a function to be called during graceful shutdown
-
-
-### func \(\*Service\) [Handle]()
-
-```go
-func (s *Service) Handle(pattern string, handler http.Handler)
-```
-
-Handle registers a handler for the given pattern
+## Kubernetes Deployment
-
-### func \(\*Service\) [HandleFunc]()
+The library provides all the boilerplate needed for Kubernetes deployments:
-```go
-func (s *Service) HandleFunc(pattern string, handler http.HandlerFunc)
-```
+- Graceful shutdown handling SIGTERM
+- Health check endpoints for liveness/readiness probes
+- Prometheus metrics for monitoring
+- Configurable resource limits via environment variables
+- No additional Kubernetes-specific code required
-HandleFunc registers a handler function for the given pattern
+## Examples
-
-### func \(\*Service\) [Start]()
+See the `_examples/` directory for complete working examples demonstrating:
-```go
-func (s *Service) Start() error
-```
-
-Start starts the service and metrics server
+- **minimal/**: Basic service setup with default configuration
+- **custom-metrics/**: Comprehensive custom metrics registration and usage
+- **prometheus-counter/**: Simple custom counter example (HTTP metrics are automatic)
+- **health-check-***: Various health check integrations
+- **shutdown-hook/**: Graceful shutdown with custom cleanup
-
-### func \(\*Service\) [StartWithGracefulShutdown]()
+## Best Practices
-```go
-func (s *Service) StartWithGracefulShutdown() error
-```
-
-StartWithGracefulShutdown starts the service with graceful shutdown handling
-
-
-### func \(\*Service\) [Stop]()
-
-```go
-func (s *Service) Stop() error
-```
-
-Stop stops the service gracefully
-
-
-### func \(\*Service\) [Use]()
-
-```go
-func (s *Service) Use(middleware Middleware)
-```
+1. **Minimal setup** - Start with default configuration and customize only what you need
+2. **Write HTTP handlers naturally** - Use any patterns or frameworks you prefer
+3. **Add custom shutdown hooks** for resource cleanup when needed
+4. **Use structured logging** for better observability
+5. **Monitor metrics** in production environments
-Use adds middleware to the service
+## Contributing
-Generated by [gomarkdoc]()
+We welcome contributions! Please see our [Contributing Guide](https://github.com/atomicgo/atomicgo/blob/main/CONTRIBUTING.md) for details.
+## License
-
+This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
---
diff --git a/_example/main.go b/_example/main.go
deleted file mode 100644
index c4fd65f..0000000
--- a/_example/main.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package main
-
-import (
- "fmt"
- "log/slog"
- "net/http"
- "os"
- "time"
-
- "atomicgo.dev/service"
-)
-
-func main() {
- // Load configuration from environment variables
- config, err := service.LoadFromEnv()
- if err != nil {
- slog.Error("failed to load config", "error", err)
- os.Exit(1)
- }
-
- // Create service with loaded configuration
- svc := service.New("example", config)
-
- // Add custom middleware
- svc.Use(func(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("X-Service", "example")
- next.ServeHTTP(w, r)
- })
- })
-
- // Add shutdown hook to demonstrate graceful shutdown
- svc.AddShutdownHook(func() error {
- slog.Info("cleaning up resources...")
- time.Sleep(1 * time.Second) // Simulate cleanup
- slog.Info("cleanup complete")
- return nil
- })
-
- // Register handlers
- svc.HandleFunc("/", handleHelloWorld)
- svc.HandleFunc("/health", handleHealth)
- svc.HandleFunc("/metrics-demo", handleMetricsDemo)
-
- // Start service with graceful shutdown
- slog.Info("starting service with graceful shutdown support")
- if err := svc.StartWithGracefulShutdown(); err != nil {
- svc.Logger.Error("failed to start service", "error", err)
- os.Exit(1)
- }
-
- slog.Info("service stopped")
-}
-
-func handleHelloWorld(w http.ResponseWriter, r *http.Request) {
- logger := service.GetLogger(r)
- logger.Info("Hello, World! endpoint called")
-
- w.WriteHeader(http.StatusOK)
- w.Write([]byte("Hello, World!"))
-}
-
-func handleHealth(w http.ResponseWriter, r *http.Request) {
- logger := service.GetLogger(r)
- logger.Info("health check called")
-
- w.WriteHeader(http.StatusOK)
- w.Write([]byte("OK"))
-}
-
-func handleMetricsDemo(w http.ResponseWriter, r *http.Request) {
- logger := service.GetLogger(r)
- logger.Info("metrics demo endpoint called")
-
- // Simulate some work
- time.Sleep(100 * time.Millisecond)
-
- // Example of using metrics in a handler
- // The metrics middleware automatically tracks requests, but you can also
- // interact with metrics manually if needed
- metrics := service.GetMetrics(r)
- if metrics != nil {
- logger.Info("metrics collector is available in context")
- }
-
- w.WriteHeader(http.StatusOK)
- w.Write([]byte(fmt.Sprintf("Metrics demo completed. Check :9090/metrics for Prometheus metrics.")))
-}
diff --git a/_examples/health-check-access/main.go b/_examples/health-check-access/main.go
new file mode 100644
index 0000000..67e835e
--- /dev/null
+++ b/_examples/health-check-access/main.go
@@ -0,0 +1,60 @@
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "os"
+ "time"
+
+ "atomicgo.dev/service"
+ "github.com/hellofresh/health-go/v5"
+ healthHttp "github.com/hellofresh/health-go/v5/checks/http"
+ _ "github.com/lib/pq"
+)
+
+func main() {
+ // Create service
+ svc := service.New("accessing-health-checker-from-handlers", nil)
+
+ // Register external API health check using built-in checker
+ svc.RegisterHealthCheck(health.Config{
+ Name: "external-api-should-success",
+ Timeout: time.Second * 5,
+ SkipOnErr: false,
+ Check: healthHttp.New(healthHttp.Config{
+ URL: "https://httb.dev/status/200",
+ }),
+ })
+
+ svc.RegisterHealthCheck(health.Config{
+ Name: "external-api-should-fail",
+ Timeout: time.Second * 5,
+ SkipOnErr: false,
+ Check: healthHttp.New(healthHttp.Config{
+ URL: "https://httb.dev/status/503",
+ }),
+ })
+
+ // Simple handler that accesses the health checker
+ svc.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ healthChecker := service.GetHealthChecker(r)
+ if healthChecker != nil {
+ check := healthChecker.Measure(r.Context())
+ // Pretty print the check
+ json, err := json.MarshalIndent(check, "", " ")
+ if err != nil {
+ w.Write([]byte(fmt.Sprintf("Error marshalling check: %v", err)))
+ }
+ w.Write(json)
+ }
+ })
+
+ svc.Logger.Info("Service available at http://localhost:8080")
+ svc.Logger.Info("Health check at http://localhost:9090/health")
+
+ if err := svc.Start(); err != nil {
+ svc.Logger.Error("Failed to start service", "error", err)
+ os.Exit(1)
+ }
+}
diff --git a/_examples/health-check-custom/main.go b/_examples/health-check-custom/main.go
new file mode 100644
index 0000000..9a0f263
--- /dev/null
+++ b/_examples/health-check-custom/main.go
@@ -0,0 +1,69 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "os"
+ "time"
+
+ "atomicgo.dev/service"
+ "github.com/hellofresh/health-go/v5"
+)
+
+func main() {
+ svc := service.New("custom-health-service", nil)
+
+ // go-health provides an http health check, but for this example we'll build our own
+ // This health check will pass, as the external API is reachable
+ svc.RegisterHealthCheck(health.Config{
+ Name: "external-api",
+ Timeout: time.Second * 5,
+ Check: checkExternalAPI("https://httb.dev/status/200"),
+ })
+
+ // This will fail, as the external API is not reachable
+ svc.RegisterHealthCheck(health.Config{
+ Name: "external-api-failing",
+ Timeout: time.Second * 5,
+ Check: checkExternalAPI("https://httb.dev/status/404"),
+ })
+
+ // Main handler
+ svc.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("Hello, World!"))
+ })
+
+ svc.Logger.Info("Service available at http://localhost:8080")
+ svc.Logger.Info("Health check at http://localhost:9090/health")
+
+ if err := svc.Start(); err != nil {
+ svc.Logger.Error("Failed to start service", "error", err)
+ os.Exit(1)
+ }
+}
+
+// Custom health check function that checks if the external API is reachable
+func checkExternalAPI(url string) health.CheckFunc {
+ return func(ctx context.Context) error {
+ client := &http.Client{Timeout: 3 * time.Second}
+
+ // Replace with a non-existent URL to simulate a failed health check
+ req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
+ if err != nil {
+ return fmt.Errorf("failed to create request: %w", err)
+ }
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return fmt.Errorf("external API unreachable: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("external API returned status %d", resp.StatusCode)
+ }
+
+ return nil
+ }
+}
diff --git a/_examples/health-check-postgresql/main.go b/_examples/health-check-postgresql/main.go
new file mode 100644
index 0000000..454cc5d
--- /dev/null
+++ b/_examples/health-check-postgresql/main.go
@@ -0,0 +1,47 @@
+package main
+
+import (
+ "net/http"
+ "os"
+ "time"
+
+ "atomicgo.dev/service"
+ "github.com/hellofresh/health-go/v5"
+ healthPostgres "github.com/hellofresh/health-go/v5/checks/postgres"
+ _ "github.com/lib/pq"
+)
+
+func main() {
+ // Database connection string - in production, use environment variables
+ dbURL := os.Getenv("DATABASE_URL")
+ if dbURL == "" {
+ dbURL = "postgres://user:password@localhost/dbname?sslmode=disable"
+ }
+
+ // Create service
+ svc := service.New("postgresql-health-service", nil)
+
+ // Register PostgreSQL health check using built-in checker
+ svc.RegisterHealthCheck(health.Config{
+ Name: "postgresql",
+ Timeout: time.Second * 5,
+ SkipOnErr: false, // Critical check - service is unhealthy if DB is down
+ Check: healthPostgres.New(healthPostgres.Config{
+ DSN: dbURL,
+ }),
+ })
+
+ // Simple handler
+ svc.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("Hello, World!"))
+ })
+
+ svc.Logger.Info("Service available at http://localhost:8080")
+ svc.Logger.Info("Health check at http://localhost:9090/health")
+ svc.Logger.Info("Database URL: " + dbURL)
+
+ if err := svc.Start(); err != nil {
+ svc.Logger.Error("Failed to start service", "error", err)
+ os.Exit(1)
+ }
+}
diff --git a/_examples/minimal/main.go b/_examples/minimal/main.go
new file mode 100644
index 0000000..f14cb82
--- /dev/null
+++ b/_examples/minimal/main.go
@@ -0,0 +1,31 @@
+package main
+
+import (
+ "net/http"
+ "os"
+
+ "atomicgo.dev/service"
+)
+
+func main() {
+ // Create service with default configuration
+ svc := service.New("minimal-service", nil)
+
+ // Simple handler
+ svc.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ logger := service.GetLogger(r)
+ logger.Info("Hello from minimal service!")
+ w.Write([]byte("Hello from minimal service!"))
+ })
+
+ // Start service - includes graceful shutdown, metrics, and health checks
+ svc.Logger.Info("Starting minimal service...")
+ svc.Logger.Info("Service available at http://localhost:8080")
+ svc.Logger.Info("Health check at http://localhost:9090/health")
+ svc.Logger.Info("Metrics at http://localhost:9090/metrics")
+
+ if err := svc.Start(); err != nil {
+ svc.Logger.Error("Failed to start service", "error", err)
+ os.Exit(1)
+ }
+}
diff --git a/_examples/prometheus-counter/main.go b/_examples/prometheus-counter/main.go
new file mode 100644
index 0000000..0521620
--- /dev/null
+++ b/_examples/prometheus-counter/main.go
@@ -0,0 +1,41 @@
+package main
+
+import (
+ "net/http"
+ "os"
+
+ "atomicgo.dev/service"
+)
+
+func main() {
+ svc := service.New("prometheus-counter-service", nil)
+
+ // Register a simple custom counter to demonstrate the metrics system
+ err := svc.RegisterCounter(service.MetricConfig{
+ Name: "demo_counter",
+ Help: "Total number of demo events processed",
+ Labels: []string{"event_type", "result"},
+ })
+ if err != nil {
+ svc.Logger.Error("Failed to register demo counter", "error", err)
+ os.Exit(1)
+ }
+
+ // Main handler
+ svc.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("Hello, World!"))
+ })
+
+ svc.HandleFunc("/demo", func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("Demo counter incremented"))
+ })
+
+ svc.Logger.Info("Service available at http://localhost:8080")
+ svc.Logger.Info("Demo counter at http://localhost:8080/demo")
+ svc.Logger.Info("Prometheus metrics at http://localhost:9090/metrics")
+
+ if err := svc.Start(); err != nil {
+ svc.Logger.Error("Failed to start service", "error", err)
+ os.Exit(1)
+ }
+}
diff --git a/_examples/shutdown-hook/main.go b/_examples/shutdown-hook/main.go
new file mode 100644
index 0000000..2fe4d40
--- /dev/null
+++ b/_examples/shutdown-hook/main.go
@@ -0,0 +1,207 @@
+package main
+
+import (
+ "fmt"
+ "log/slog"
+ "net/http"
+ "os"
+ "sync"
+ "time"
+
+ "atomicgo.dev/service"
+)
+
+var startTime = time.Now()
+
+// Simulate various resources that need cleanup
+type DatabaseConnection struct {
+ connected bool
+ mu sync.Mutex
+}
+
+func (db *DatabaseConnection) Connect() error {
+ db.mu.Lock()
+ defer db.mu.Unlock()
+
+ // Note: We can't use svc.Logger here as it's not available in this context
+ // In a real application, you'd pass the logger to this function
+ time.Sleep(100 * time.Millisecond) // Simulate connection time
+ db.connected = true
+ return nil
+}
+
+func (db *DatabaseConnection) Close() error {
+ db.mu.Lock()
+ defer db.mu.Unlock()
+
+ if !db.connected {
+ return nil
+ }
+
+ // Note: We can't use svc.Logger here as it's not available in this context
+ // In a real application, you'd pass the logger to this function
+ time.Sleep(200 * time.Millisecond) // Simulate cleanup time
+ db.connected = false
+ return nil
+}
+
+func (db *DatabaseConnection) IsConnected() bool {
+ db.mu.Lock()
+ defer db.mu.Unlock()
+ return db.connected
+}
+
+type CacheService struct {
+ active bool
+ mu sync.Mutex
+}
+
+func (c *CacheService) Start() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ // Note: We can't use svc.Logger here as it's not available in this context
+ // In a real application, you'd pass the logger to this function
+ time.Sleep(50 * time.Millisecond)
+ c.active = true
+ return nil
+}
+
+func (c *CacheService) Stop() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if !c.active {
+ return nil
+ }
+
+ // Note: We can't use svc.Logger here as it's not available in this context
+ // In a real application, you'd pass the logger to this function
+ time.Sleep(150 * time.Millisecond)
+ c.active = false
+ return nil
+}
+
+func main() {
+ // Initialize resources
+ db := &DatabaseConnection{}
+ cache := &CacheService{}
+
+ // Connect to resources
+ if err := db.Connect(); err != nil {
+ // We can't use svc.Logger here yet, so we'll use slog for this error
+ slog.Error("Failed to connect to database", "error", err)
+ os.Exit(1)
+ }
+
+ if err := cache.Start(); err != nil {
+ // We can't use svc.Logger here yet, so we'll use slog for this error
+ slog.Error("Failed to start cache service", "error", err)
+ os.Exit(1)
+ }
+
+ // Create service
+ svc := service.New("shutdown-hook-service", nil)
+
+ // Register shutdown hooks in reverse order of initialization
+ // The last registered hook runs first during shutdown
+
+ // Hook 1: Cache cleanup (runs first during shutdown)
+ svc.AddShutdownHook(func() error {
+ slog.Info("Shutdown hook: Cleaning up cache service...")
+ return cache.Stop()
+ })
+
+ // Hook 2: Database cleanup (runs second during shutdown)
+ svc.AddShutdownHook(func() error {
+ slog.Info("Shutdown hook: Cleaning up database connection...")
+ return db.Close()
+ })
+
+ // Hook 3: Final cleanup (runs last during shutdown)
+ svc.AddShutdownHook(func() error {
+ slog.Info("Shutdown hook: Performing final cleanup...")
+
+ // Simulate final cleanup operations
+ slog.Info("Saving application state...")
+ time.Sleep(100 * time.Millisecond)
+
+ slog.Info("Flushing logs...")
+ time.Sleep(50 * time.Millisecond)
+
+ slog.Info("Final cleanup completed")
+ return nil
+ })
+
+ // Hook 4: Demonstrate error handling in shutdown hooks
+ svc.AddShutdownHook(func() error {
+ slog.Info("Shutdown hook: Demonstrating error handling...")
+
+ // Simulate a non-critical error during shutdown
+ if time.Now().UnixNano()%2 == 0 {
+ slog.Warn("Non-critical error during shutdown (this is expected)")
+ return fmt.Errorf("simulated non-critical shutdown error")
+ }
+
+ slog.Info("Shutdown hook completed without errors")
+ return nil
+ })
+
+ // Register handlers
+ svc.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ logger := service.GetLogger(r)
+ logger.Info("Request received")
+
+ w.Write([]byte("Shutdown Hook Demo Service\n"))
+ w.Write([]byte(fmt.Sprintf("Database connected: %v\n", db.IsConnected())))
+ w.Write([]byte("Send SIGTERM or SIGINT to trigger graceful shutdown\n"))
+ w.Write([]byte("Press Ctrl+C to test shutdown hooks\n"))
+ })
+
+ // Status endpoint
+ svc.HandleFunc("/status", func(w http.ResponseWriter, r *http.Request) {
+ logger := service.GetLogger(r)
+ logger.Info("Status check requested")
+
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(fmt.Sprintf(`{
+ "database_connected": %v,
+ "cache_active": %v,
+ "uptime": "%v"
+ }`, db.IsConnected(), cache.active, time.Since(startTime))))
+ })
+
+ // Simulate some background work
+ go func() {
+ ticker := time.NewTicker(5 * time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ if db.IsConnected() {
+ svc.Logger.Info("Background task: Database is healthy")
+ }
+ }
+ }
+ }()
+
+ svc.Logger.Info("Starting shutdown hook demo service...")
+ svc.Logger.Info("Service available at http://localhost:8080")
+ svc.Logger.Info("Status at http://localhost:8080/status")
+ svc.Logger.Info("Health check at http://localhost:9090/health")
+ svc.Logger.Info("")
+ svc.Logger.Info("To test shutdown hooks:")
+ svc.Logger.Info(" - Press Ctrl+C")
+ svc.Logger.Info(" - Send SIGTERM: kill -TERM ")
+ svc.Logger.Info(" - Send SIGINT: kill -INT ")
+ svc.Logger.Info("")
+ svc.Logger.Info("Watch the logs to see shutdown hooks execute in order")
+
+ if err := svc.Start(); err != nil {
+ svc.Logger.Error("Failed to start service", "error", err)
+ os.Exit(1)
+ }
+
+ svc.Logger.Info("Service shutdown complete")
+}
diff --git a/config.go b/config.go
index 737ad5d..d5c42b8 100644
--- a/config.go
+++ b/config.go
@@ -1,6 +1,7 @@
package service
import (
+ "fmt"
"log/slog"
"os"
"time"
@@ -11,10 +12,10 @@ import (
// Config holds all configuration for the service
type Config struct {
// HTTP Server configuration
- Addr string `env:"ADDR" envDefault:":8080"`
- ReadTimeout time.Duration `env:"READ_TIMEOUT" envDefault:"10s"`
+ Addr string `env:"ADDR" envDefault:":8080"`
+ ReadTimeout time.Duration `env:"READ_TIMEOUT" envDefault:"10s"`
WriteTimeout time.Duration `env:"WRITE_TIMEOUT" envDefault:"10s"`
- IdleTimeout time.Duration `env:"IDLE_TIMEOUT" envDefault:"120s"`
+ IdleTimeout time.Duration `env:"IDLE_TIMEOUT" envDefault:"120s"`
// Metrics server configuration
MetricsAddr string `env:"METRICS_ADDR" envDefault:":9090"`
@@ -23,6 +24,14 @@ type Config struct {
// Graceful shutdown configuration
ShutdownTimeout time.Duration `env:"SHUTDOWN_TIMEOUT" envDefault:"30s"`
+ // Service information
+ Version string `env:"SERVICE_VERSION" envDefault:"v1.0.0"`
+
+ // Health check configuration
+ HealthPath string `env:"HEALTH_PATH" envDefault:"/health"`
+ ReadinessPath string `env:"READINESS_PATH" envDefault:"/ready"`
+ LivenessPath string `env:"LIVENESS_PATH" envDefault:"/live"`
+
// Logger configuration
Logger *slog.Logger `env:"-"`
@@ -40,6 +49,10 @@ func DefaultConfig() *Config {
MetricsAddr: ":9090",
MetricsPath: "/metrics",
ShutdownTimeout: 30 * time.Second,
+ Version: "v1.0.0",
+ HealthPath: "/health",
+ ReadinessPath: "/ready",
+ LivenessPath: "/live",
Logger: slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelInfo})),
ShutdownHooks: make([]func() error, 0),
}
@@ -50,7 +63,7 @@ func LoadFromEnv() (*Config, error) {
config := DefaultConfig()
if err := env.Parse(config); err != nil {
- return nil, err
+ return nil, fmt.Errorf("failed to parse environment variables: %w", err)
}
return config, nil
diff --git a/doc.go b/doc.go
index ba93052..53b8376 100644
--- a/doc.go
+++ b/doc.go
@@ -1,178 +1,22 @@
/*
-Package service provides a lightweight, production-ready HTTP service framework for Go applications.
-
-The service framework is designed to be Kubernetes-ready and follows best practices for
-highly available microservices. It includes built-in support for graceful shutdown,
-Prometheus metrics, structured logging, middleware, and environment-based configuration.
-
-## Features
-
-- **HTTP Server**: Configurable HTTP server with timeouts and graceful shutdown
-- **Metrics**: Built-in Prometheus metrics collection with automatic request tracking
-- **Logging**: Structured logging with slog integration and context-aware loggers
-- **Middleware**: Extensible middleware system with built-in recovery and logging
-- **Configuration**: Environment-based configuration with sensible defaults
-- **Graceful Shutdown**: Signal handling with configurable shutdown hooks
-- **Health Checks**: Built-in health check endpoints
-- **Kubernetes Ready**: Designed for containerized deployments
-
-## Quick Start
-
-```go
-package main
-
-import (
-
- "log/slog"
- "net/http"
- "os"
-
- "atomicgo.dev/service"
-
-)
-
- func main() {
- // Create service with default configuration
- svc := service.New("my-service", nil)
-
- // Register handlers
- svc.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- logger := service.GetLogger(r)
- logger.Info("Hello, World!")
- w.Write([]byte("Hello, World!"))
- })
-
- // Start with graceful shutdown
- if err := svc.StartWithGracefulShutdown(); err != nil {
- os.Exit(1)
- }
- }
-
-```
-
-## Configuration
-
-The framework supports configuration via environment variables with sensible defaults:
-
-- `ADDR`: HTTP server address (default: ":8080")
-- `METRICS_ADDR`: Metrics server address (default: ":9090")
-- `METRICS_PATH`: Metrics endpoint path (default: "/metrics")
-- `READ_TIMEOUT`: HTTP read timeout (default: "10s")
-- `WRITE_TIMEOUT`: HTTP write timeout (default: "10s")
-- `IDLE_TIMEOUT`: HTTP idle timeout (default: "120s")
-- `SHUTDOWN_TIMEOUT`: Graceful shutdown timeout (default: "30s")
-
-```go
-// Load configuration from environment
-config, err := service.LoadFromEnv()
-
- if err != nil {
- log.Fatal(err)
- }
-
-// Create service with custom configuration
-svc := service.New("my-service", config)
-```
-
-## Middleware
-
-The framework includes several built-in middleware:
-
-- **LoggerMiddleware**: Injects logger into request context
-- **RecoveryMiddleware**: Recovers from panics and logs errors
-- **RequestLoggingMiddleware**: Logs incoming requests
-- **MetricsMiddleware**: Tracks HTTP metrics for Prometheus
-
-```go
-// Add custom middleware
-
- svc.Use(func(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("X-Custom", "value")
- next.ServeHTTP(w, r)
- })
- })
-
-```
-
-## Metrics
-
-The framework automatically collects Prometheus metrics:
-
-- `{service_name}_http_requests_total`: Total HTTP requests
-- `{service_name}_http_request_duration_seconds`: Request duration
-- `{service_name}_http_requests_in_flight`: In-flight requests
-
-Metrics are available at `:9090/metrics` by default.
-
-```go
-// Access metrics in handlers
-
- func myHandler(w http.ResponseWriter, r *http.Request) {
- metrics := service.GetMetrics(r)
- if metrics != nil {
- // Custom metric operations can be added here
- }
- }
-
-```
-
-## Graceful Shutdown
-
-The framework supports graceful shutdown with signal handling and custom hooks:
-
-```go
-// Add shutdown hooks
-
- svc.AddShutdownHook(func() error {
- // Cleanup resources
- return nil
- })
-
-// Start with graceful shutdown
-svc.StartWithGracefulShutdown()
-```
-
-## Logging
-
-The framework uses structured logging with slog and provides context-aware loggers:
-
-```go
-
- func myHandler(w http.ResponseWriter, r *http.Request) {
- logger := service.GetLogger(r)
- logger.Info("request processed", "path", r.URL.Path)
- }
-
-```
-
-## Health Checks
-
-Health check endpoints are automatically available:
-
-- `:9090/health`: Basic health check
-- `:9090/metrics`: Prometheus metrics
-
-## Kubernetes Deployment
-
-The framework is designed for Kubernetes deployments with:
-
-- Graceful shutdown handling SIGTERM
-- Health check endpoints for liveness/readiness probes
-- Prometheus metrics for monitoring
-- Configurable resource limits via environment variables
-
-## Examples
-
-See the `_example/` directory for complete working examples demonstrating:
-
-- Basic service setup
-- Custom middleware
-- Environment configuration
-- Graceful shutdown
-- Metrics integration
-
-The framework is designed to be lightweight while providing all essential features
-for production-ready microservices.
+Package service provides a minimal boilerplate wrapper for building production-ready Go HTTP services.
+
+This library reduces the boilerplate of writing production/enterprise-grade Go services to a minimum.
+It does NOT provide an HTTP framework, business logic, or impose restrictions on web frameworks.
+Instead, it's a lightweight wrapper around http.Server that provides essential production features
+out of the box for high availability services.
+
+Key benefits:
+- Minimal boilerplate for Kubernetes and containerized production deployments
+- Built-in Prometheus metrics collection and health checks
+- Graceful shutdown with signal handling
+- Structured logging with slog integration
+- Environment-based configuration with sensible defaults
+- Extensible middleware system
+- No restrictions on HTTP frameworks
+
+The framework is designed to be a thin layer that handles the operational concerns of production
+services while letting you write HTTP handlers exactly as you prefer, using any patterns or
+frameworks you choose.
*/
package service
diff --git a/go.mod b/go.mod
index 53ffb1f..c660058 100644
--- a/go.mod
+++ b/go.mod
@@ -4,16 +4,19 @@ go 1.24
require (
github.com/caarlos0/env/v11 v11.3.1
+ github.com/hellofresh/health-go/v5 v5.5.5
github.com/prometheus/client_golang v1.22.0
+ github.com/prometheus/client_model v0.6.1
)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
- github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
+ go.opentelemetry.io/otel v1.35.0 // indirect
+ go.opentelemetry.io/otel/trace v1.35.0 // indirect
golang.org/x/sys v0.30.0 // indirect
google.golang.org/protobuf v1.36.5 // indirect
)
diff --git a/go.sum b/go.sum
index 87f5699..7985aa2 100644
--- a/go.sum
+++ b/go.sum
@@ -8,6 +8,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
+github.com/hellofresh/health-go/v5 v5.5.5 h1:JZwZ8kZzAgjdGCvjgrIJTcu1sImvZoHbwAj7CK19fpw=
+github.com/hellofresh/health-go/v5 v5.5.5/go.mod h1:W+6uiWHS/m9jaB0aYBVlUBTeyE98yom6f+0ewLoBPYQ=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
@@ -24,8 +26,14 @@ github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
+go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
+go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
+go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
diff --git a/health.go b/health.go
new file mode 100644
index 0000000..2fe8c2f
--- /dev/null
+++ b/health.go
@@ -0,0 +1,117 @@
+package service
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/hellofresh/health-go/v5"
+)
+
+// HealthChecker wraps the health-go library health checker
+type HealthChecker struct {
+ checker *health.Health
+}
+
+// NewHealthChecker creates a new health checker with the service component information
+func NewHealthChecker(serviceName, version string) (*HealthChecker, error) {
+ checker, err := health.New(
+ health.WithComponent(health.Component{
+ Name: serviceName,
+ Version: version,
+ }),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to create health checker: %w", err)
+ }
+
+ return &HealthChecker{
+ checker: checker,
+ }, nil
+}
+
+// Register adds a health check to the health checker
+func (hc *HealthChecker) Register(config health.Config) error {
+ return fmt.Errorf("failed to register health check: %w", hc.checker.Register(config))
+}
+
+// Handler returns the HTTP handler for health checks
+func (hc *HealthChecker) Handler() http.Handler {
+ return hc.checker.Handler()
+}
+
+// HandlerFunc returns the HTTP handler function for health checks
+func (hc *HealthChecker) HandlerFunc(w http.ResponseWriter, r *http.Request) {
+ hc.checker.HandlerFunc(w, r)
+}
+
+// Measure returns the current health status
+func (hc *HealthChecker) Measure(ctx context.Context) health.Check {
+ return hc.checker.Measure(ctx)
+}
+
+// IsHealthy returns true if all health checks are passing
+func (hc *HealthChecker) IsHealthy(ctx context.Context) bool {
+ check := hc.Measure(ctx)
+ return check.Status == health.StatusOK
+}
+
+// IsReady returns true if the service is ready to serve requests
+// This is typically used for Kubernetes readiness probes
+func (hc *HealthChecker) IsReady(ctx context.Context) bool {
+ // For readiness, we want to check if critical services are available
+ // This is the same as health check for now, but can be customized
+ return hc.IsHealthy(ctx)
+}
+
+// IsAlive returns true if the service is alive
+// This is typically used for Kubernetes liveness probes
+func (hc *HealthChecker) IsAlive(ctx context.Context) bool {
+ // For liveness, we want to check if the service is still running
+ // This should be more lenient than health checks
+ // For now, we'll just return true as the service is running if this is called
+ return true
+}
+
+// ReadinessHandler returns an HTTP handler for readiness checks
+func (hc *HealthChecker) ReadinessHandler() http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second)
+ defer cancel()
+
+ if hc.IsReady(ctx) {
+ w.WriteHeader(http.StatusOK)
+ _, _ = w.Write([]byte("Ready"))
+ } else {
+ w.WriteHeader(http.StatusServiceUnavailable)
+ _, _ = w.Write([]byte("Not Ready"))
+ }
+ }
+}
+
+// LivenessHandler returns an HTTP handler for liveness checks
+func (hc *HealthChecker) LivenessHandler() http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second)
+ defer cancel()
+
+ if hc.IsAlive(ctx) {
+ w.WriteHeader(http.StatusOK)
+ _, _ = w.Write([]byte("Alive"))
+ } else {
+ w.WriteHeader(http.StatusServiceUnavailable)
+ _, _ = w.Write([]byte("Not Alive"))
+ }
+ }
+}
+
+// GetHealthChecker retrieves the health checker from the request context
+func GetHealthChecker(r *http.Request) *HealthChecker {
+ hc, ok := r.Context().Value(HealthCheckerKey).(*HealthChecker)
+ if !ok {
+ return nil
+ }
+
+ return hc
+}
diff --git a/health_test.go b/health_test.go
new file mode 100644
index 0000000..1141a0f
--- /dev/null
+++ b/health_test.go
@@ -0,0 +1,415 @@
+package service
+
+import (
+ "context"
+ "errors"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/hellofresh/health-go/v5"
+)
+
+func TestNewHealthChecker(t *testing.T) {
+ t.Parallel()
+
+ t.Run("creates health checker successfully", func(t *testing.T) {
+ t.Parallel()
+
+ healthChecker, err := NewHealthChecker("test-service", "v1.0.0")
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ if healthChecker == nil {
+ t.Fatal("expected health checker to be created")
+ }
+
+ if healthChecker.checker == nil {
+ t.Fatal("expected internal health checker to be initialized")
+ }
+ })
+}
+
+func TestHealthChecker_Register(t *testing.T) {
+ t.Parallel()
+
+ healthChecker, err := NewHealthChecker("test-service", "v1.0.0")
+ if err != nil {
+ t.Fatalf("failed to create health checker: %v", err)
+ }
+
+ // Register a simple health check
+ healthChecker.Register(health.Config{
+ Name: "test-check",
+ Check: func(ctx context.Context) error {
+ return nil
+ },
+ })
+
+ // Verify health check was registered by measuring health
+ check := healthChecker.Measure(context.Background())
+
+ if check.Status != health.StatusOK {
+ t.Errorf("expected status OK, got %s", check.Status)
+ }
+}
+
+func TestHealthChecker_IsHealthy(t *testing.T) {
+ t.Parallel()
+
+ healthChecker, err := NewHealthChecker("test-service", "v1.0.0")
+ if err != nil {
+ t.Fatalf("failed to create health checker: %v", err)
+ }
+
+ t.Run("returns true when all checks pass", func(t *testing.T) {
+ t.Parallel()
+
+ healthChecker.Register(health.Config{
+ Name: "passing-check",
+ Check: func(_ context.Context) error {
+ return nil
+ },
+ })
+
+ if !healthChecker.IsHealthy(context.Background()) {
+ t.Error("expected IsHealthy to return true")
+ }
+ })
+
+ t.Run("returns false when check fails", func(t *testing.T) {
+ t.Parallel()
+
+ healthChecker.Register(health.Config{
+ Name: "failing-check",
+ Check: func(ctx context.Context) error {
+ return errors.New("check failed") //nolint:err113
+ },
+ })
+
+ if healthChecker.IsHealthy(context.Background()) {
+ t.Error("expected IsHealthy to return false")
+ }
+ })
+}
+
+func TestHealthChecker_IsReady(t *testing.T) {
+ t.Parallel()
+
+ healthChecker, err := NewHealthChecker("test-service", "v1.0.0")
+ if err != nil {
+ t.Fatalf("failed to create health checker: %v", err)
+ }
+
+ t.Run("returns true when healthy", func(t *testing.T) {
+ t.Parallel()
+
+ healthChecker.Register(health.Config{
+ Name: "ready-check",
+ Check: func(ctx context.Context) error {
+ return nil
+ },
+ })
+
+ if !healthChecker.IsReady(context.Background()) {
+ t.Error("expected IsReady to return true")
+ }
+ })
+}
+
+func TestHealthChecker_IsAlive(t *testing.T) {
+ t.Parallel()
+
+ healthChecker, err := NewHealthChecker("test-service", "v1.0.0")
+ if err != nil {
+ t.Fatalf("failed to create health checker: %v", err)
+ }
+
+ // IsAlive should always return true for a running service
+ if !healthChecker.IsAlive(context.Background()) {
+ t.Error("expected IsAlive to return true")
+ }
+}
+
+func TestHealthChecker_Handlers(t *testing.T) {
+ t.Parallel()
+
+ healthChecker, err := NewHealthChecker("test-service", "v1.0.0")
+ if err != nil {
+ t.Fatalf("failed to create health checker: %v", err)
+ }
+
+ // Register a health check
+ healthChecker.Register(health.Config{
+ Name: "test-check",
+ Check: func(ctx context.Context) error {
+ return nil
+ },
+ })
+
+ t.Run("Handler returns 200 for healthy service", func(t *testing.T) {
+ t.Parallel()
+
+ req := httptest.NewRequest(http.MethodGet, "/health", nil)
+ recorder := httptest.NewRecorder()
+
+ healthChecker.Handler().ServeHTTP(recorder, req)
+
+ if recorder.Code != http.StatusOK {
+ t.Errorf("expected status 200, got %d", recorder.Code)
+ }
+ })
+
+ t.Run("HandlerFunc returns 200 for healthy service", func(t *testing.T) {
+ t.Parallel()
+
+ req := httptest.NewRequest(http.MethodGet, "/health", nil)
+ recorder := httptest.NewRecorder()
+
+ healthChecker.HandlerFunc(recorder, req)
+
+ if recorder.Code != http.StatusOK {
+ t.Errorf("expected status 200, got %d", recorder.Code)
+ }
+ })
+
+ t.Run("ReadinessHandler returns 200 for ready service", func(t *testing.T) {
+ t.Parallel()
+
+ req := httptest.NewRequest(http.MethodGet, "/ready", nil)
+ recorder := httptest.NewRecorder()
+
+ healthChecker.ReadinessHandler()(recorder, req)
+
+ if recorder.Code != http.StatusOK {
+ t.Errorf("expected status 200, got %d", recorder.Code)
+ }
+
+ if recorder.Body.String() != "Ready" {
+ t.Errorf("expected body 'Ready', got %s", recorder.Body.String())
+ }
+ })
+
+ t.Run("LivenessHandler returns 200 for alive service", func(t *testing.T) {
+ t.Parallel()
+
+ req := httptest.NewRequest(http.MethodGet, "/live", nil)
+ recorder := httptest.NewRecorder()
+
+ healthChecker.LivenessHandler()(recorder, req)
+
+ if recorder.Code != http.StatusOK {
+ t.Errorf("expected status 200, got %d", recorder.Code)
+ }
+
+ if recorder.Body.String() != "Alive" {
+ t.Errorf("expected body 'Alive', got %s", recorder.Body.String())
+ }
+ })
+}
+
+func TestHealthChecker_HandlersWithFailures(t *testing.T) {
+ t.Parallel()
+
+ healthChecker, err := NewHealthChecker("test-service", "v1.0.0")
+ if err != nil {
+ t.Fatalf("failed to create health checker: %v", err)
+ }
+
+ // Register a failing health check
+ healthChecker.Register(health.Config{
+ Name: "failing-check",
+ Check: func(ctx context.Context) error {
+ return errors.New("check failed") //nolint:err113
+ },
+ })
+
+ t.Run("ReadinessHandler returns 503 for not ready service", func(t *testing.T) {
+ t.Parallel()
+
+ req := httptest.NewRequest(http.MethodGet, "/ready", nil)
+ recorder := httptest.NewRecorder()
+
+ healthChecker.ReadinessHandler()(recorder, req)
+
+ if recorder.Code != http.StatusServiceUnavailable {
+ t.Errorf("expected status 503, got %d", recorder.Code)
+ }
+
+ if recorder.Body.String() != "Not Ready" {
+ t.Errorf("expected body 'Not Ready', got %s", recorder.Body.String())
+ }
+ })
+}
+
+func TestGetHealthChecker(t *testing.T) {
+ t.Parallel()
+
+ healthChecker, err := NewHealthChecker("test-service", "v1.0.0")
+ if err != nil {
+ t.Fatalf("failed to create health checker: %v", err)
+ }
+
+ t.Run("returns health checker from context", func(t *testing.T) {
+ t.Parallel()
+
+ handler := HealthCheckerMiddleware(healthChecker)(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ retrievedHC := GetHealthChecker(r)
+ if retrievedHC == nil {
+ t.Error("expected health checker to be retrieved from context")
+ }
+
+ if retrievedHC != healthChecker {
+ t.Error("expected retrieved health checker to match original")
+ }
+
+ w.WriteHeader(http.StatusOK)
+ }))
+
+ req := httptest.NewRequest(http.MethodGet, "/test", nil)
+ recorder := httptest.NewRecorder()
+
+ handler.ServeHTTP(recorder, req)
+
+ if recorder.Code != http.StatusOK {
+ t.Errorf("expected status 200, got %d", recorder.Code)
+ }
+ })
+
+ t.Run("returns nil when not in context", func(t *testing.T) {
+ t.Parallel()
+
+ req := httptest.NewRequest(http.MethodGet, "/test", nil)
+
+ retrievedHC := GetHealthChecker(req)
+ if retrievedHC != nil {
+ t.Error("expected health checker to be nil when not in context")
+ }
+ })
+}
+
+func TestHealthCheckerMiddleware(t *testing.T) {
+ t.Parallel()
+
+ healthChecker, err := NewHealthChecker("test-service", "v1.0.0")
+ if err != nil {
+ t.Fatalf("failed to create health checker: %v", err)
+ }
+
+ middleware := HealthCheckerMiddleware(healthChecker)
+
+ handler := middleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Verify health checker is available in context
+ retrievedHC := GetHealthChecker(r)
+ if retrievedHC == nil {
+ t.Error("health checker should be available in context")
+ }
+
+ w.WriteHeader(http.StatusOK)
+ }))
+
+ req := httptest.NewRequest(http.MethodGet, "/test", nil)
+ recorder := httptest.NewRecorder()
+
+ handler.ServeHTTP(recorder, req)
+
+ if recorder.Code != http.StatusOK {
+ t.Errorf("expected status 200, got %d", recorder.Code)
+ }
+}
+
+func TestHealthChecker_Timeout(t *testing.T) {
+ t.Parallel()
+
+ healthChecker, err := NewHealthChecker("test-service", "v1.0.0")
+ if err != nil {
+ t.Fatalf("failed to create health checker: %v", err)
+ }
+
+ // Register a health check with a timeout
+ healthChecker.Register(health.Config{
+ Name: "slow-check",
+ Timeout: 100 * time.Millisecond,
+ Check: func(ctx context.Context) error {
+ // Simulate a slow operation
+ time.Sleep(200 * time.Millisecond)
+ return nil
+ },
+ })
+
+ // This should timeout
+ ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
+ defer cancel()
+
+ if healthChecker.IsHealthy(ctx) {
+ t.Error("expected health check to fail due to timeout")
+ }
+}
+
+func TestService_RegisterHealthCheck(t *testing.T) {
+ t.Parallel()
+
+ svc := New("test-service", nil)
+
+ t.Run("registers health check successfully", func(t *testing.T) {
+ t.Parallel()
+
+ checkCalled := false
+
+ svc.RegisterHealthCheck(health.Config{
+ Name: "test-check",
+ Check: func(ctx context.Context) error {
+ checkCalled = true
+ return nil
+ },
+ })
+
+ // Verify the check was registered by measuring health
+ if svc.HealthChecker != nil {
+ check := svc.HealthChecker.Measure(context.Background())
+
+ if check.Status != health.StatusOK {
+ t.Errorf("expected status OK, got %s", check.Status)
+ }
+
+ if !checkCalled {
+ t.Error("expected health check to be called")
+ }
+ }
+ })
+
+ t.Run("handles nil health checker gracefully", func(t *testing.T) {
+ t.Parallel()
+
+ svcWithoutHealth := &Service{
+ Name: "test",
+ Logger: svc.Logger,
+ HealthChecker: nil,
+ }
+
+ // This should not panic
+ svcWithoutHealth.RegisterHealthCheck(health.Config{
+ Name: "test-check",
+ Check: func(ctx context.Context) error {
+ return nil
+ },
+ })
+ })
+}
+
+func TestService_GetHealthChecker(t *testing.T) {
+ t.Parallel()
+
+ svc := New("test-service", nil)
+
+ healthChecker := svc.GetHealthChecker()
+ if healthChecker == nil {
+ t.Error("expected health checker to be available")
+ }
+
+ if healthChecker != svc.HealthChecker {
+ t.Error("expected returned health checker to match service health checker")
+ }
+}
diff --git a/metrics.go b/metrics.go
index c61cdaf..06e99f2 100644
--- a/metrics.go
+++ b/metrics.go
@@ -2,58 +2,381 @@ package service
import (
"context"
+ "errors"
+ "fmt"
"net/http"
"strconv"
+ "strings"
+ "sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
-// MetricsCollector holds all the metrics for the service
+// MetricsCollector holds all the metrics for the service with a flexible registry
type MetricsCollector struct {
+ serviceName string
+ registry *prometheus.Registry
+ mu sync.RWMutex
+
+ // Built-in HTTP metrics (always available)
httpRequestsTotal *prometheus.CounterVec
httpRequestDuration *prometheus.HistogramVec
httpRequestsInFlight prometheus.Gauge
+
+ // Custom metrics registry
+ counters map[string]*prometheus.CounterVec
+ gauges map[string]*prometheus.GaugeVec
+ histograms map[string]*prometheus.HistogramVec
+ summaries map[string]*prometheus.SummaryVec
+}
+
+// MetricConfig holds configuration for creating custom metrics
+type MetricConfig struct {
+ Name string
+ Help string
+ Labels []string
+ Buckets []float64 // For histograms
+ Objectives map[float64]float64 // For summaries
}
-// NewMetricsCollector creates a new metrics collector
+// NewMetricsCollector creates a new metrics collector with a flexible registry
func NewMetricsCollector(serviceName string) *MetricsCollector {
- mc := &MetricsCollector{
- httpRequestsTotal: prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Name: serviceName + "_http_requests_total",
- Help: "Total number of HTTP requests",
- },
- []string{"method", "endpoint", "status_code"},
- ),
- httpRequestDuration: prometheus.NewHistogramVec(
- prometheus.HistogramOpts{
- Name: serviceName + "_http_request_duration_seconds",
- Help: "HTTP request duration in seconds",
- Buckets: prometheus.DefBuckets,
- },
- []string{"method", "endpoint", "status_code"},
- ),
- httpRequestsInFlight: prometheus.NewGauge(
- prometheus.GaugeOpts{
- Name: serviceName + "_http_requests_in_flight",
- Help: "Number of HTTP requests currently being processed",
- },
- ),
- }
-
- // Register metrics with Prometheus (ignore if already registered)
- prometheus.DefaultRegisterer.Register(mc.httpRequestsTotal)
- prometheus.DefaultRegisterer.Register(mc.httpRequestDuration)
- prometheus.DefaultRegisterer.Register(mc.httpRequestsInFlight)
-
- return mc
+ registry := prometheus.NewRegistry()
+
+ metricsCollector := &MetricsCollector{
+ serviceName: serviceName,
+ registry: registry,
+ counters: make(map[string]*prometheus.CounterVec),
+ gauges: make(map[string]*prometheus.GaugeVec),
+ histograms: make(map[string]*prometheus.HistogramVec),
+ summaries: make(map[string]*prometheus.SummaryVec),
+ }
+
+ // Create built-in HTTP metrics
+ metricsCollector.httpRequestsTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: serviceName + "_http_requests_total",
+ Help: "Total number of HTTP requests",
+ },
+ []string{"method", "endpoint", "status_code"},
+ )
+
+ metricsCollector.httpRequestDuration = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: serviceName + "_http_request_duration_seconds",
+ Help: "HTTP request duration in seconds",
+ Buckets: prometheus.DefBuckets,
+ },
+ []string{"method", "endpoint", "status_code"},
+ )
+
+ metricsCollector.httpRequestsInFlight = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Name: serviceName + "_http_requests_in_flight",
+ Help: "Number of HTTP requests currently being processed",
+ },
+ )
+
+ // Register built-in metrics
+ registry.MustRegister(metricsCollector.httpRequestsTotal)
+ registry.MustRegister(metricsCollector.httpRequestDuration)
+ registry.MustRegister(metricsCollector.httpRequestsInFlight)
+
+ return metricsCollector
+}
+
+// RegisterCounter registers a new counter metric
+func (mc *MetricsCollector) RegisterCounter(config MetricConfig) error {
+ mc.mu.Lock()
+ defer mc.mu.Unlock()
+
+ // Ensure metric name has service prefix
+ prefixedName := mc.ensureMetricNamePrefix(config.Name)
+
+ if _, exists := mc.counters[prefixedName]; exists {
+ return fmt.Errorf("counter %s already exists", prefixedName) //nolint:err113
+ }
+
+ counter := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: prefixedName,
+ Help: config.Help,
+ },
+ config.Labels,
+ )
+
+ if err := mc.registry.Register(counter); err != nil {
+ return fmt.Errorf("failed to register counter %s: %w", prefixedName, err)
+ }
+
+ mc.counters[prefixedName] = counter
+
+ return nil
+}
+
+// RegisterGauge registers a new gauge metric
+func (mc *MetricsCollector) RegisterGauge(config MetricConfig) error {
+ mc.mu.Lock()
+ defer mc.mu.Unlock()
+
+ // Ensure metric name has service prefix
+ prefixedName := mc.ensureMetricNamePrefix(config.Name)
+
+ if _, exists := mc.gauges[prefixedName]; exists {
+ return fmt.Errorf("gauge %s already exists", prefixedName) //nolint:err113
+ }
+
+ gauge := prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Name: prefixedName,
+ Help: config.Help,
+ },
+ config.Labels,
+ )
+
+ if err := mc.registry.Register(gauge); err != nil {
+ return fmt.Errorf("failed to register gauge %s: %w", prefixedName, err)
+ }
+
+ mc.gauges[prefixedName] = gauge
+
+ return nil
+}
+
+// RegisterHistogram registers a new histogram metric
+func (mc *MetricsCollector) RegisterHistogram(config MetricConfig) error {
+ mc.mu.Lock()
+ defer mc.mu.Unlock()
+
+ // Ensure metric name has service prefix
+ prefixedName := mc.ensureMetricNamePrefix(config.Name)
+
+ if _, exists := mc.histograms[prefixedName]; exists {
+ return fmt.Errorf("histogram %s already exists", prefixedName) //nolint:err113
+ }
+
+ buckets := config.Buckets
+ if len(buckets) == 0 {
+ buckets = prometheus.DefBuckets
+ }
+
+ histogram := prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: prefixedName,
+ Help: config.Help,
+ Buckets: buckets,
+ },
+ config.Labels,
+ )
+
+ if err := mc.registry.Register(histogram); err != nil {
+ return fmt.Errorf("failed to register histogram %s: %w", prefixedName, err)
+ }
+
+ mc.histograms[prefixedName] = histogram
+
+ return nil
+}
+
+// RegisterSummary registers a new summary metric
+func (mc *MetricsCollector) RegisterSummary(config MetricConfig) error {
+ mc.mu.Lock()
+ defer mc.mu.Unlock()
+
+ // Ensure metric name has service prefix
+ prefixedName := mc.ensureMetricNamePrefix(config.Name)
+
+ if _, exists := mc.summaries[prefixedName]; exists {
+ return fmt.Errorf("summary %s already exists", prefixedName) //nolint:err113
+ }
+
+ objectives := config.Objectives
+ if len(objectives) == 0 {
+ objectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
+ }
+
+ summary := prometheus.NewSummaryVec(
+ prometheus.SummaryOpts{
+ Name: prefixedName,
+ Help: config.Help,
+ Objectives: objectives,
+ },
+ config.Labels,
+ )
+
+ if err := mc.registry.Register(summary); err != nil {
+ return fmt.Errorf("failed to register summary %s: %w", prefixedName, err)
+ }
+
+ mc.summaries[prefixedName] = summary
+
+ return nil
+}
+
+// IncCounter increments a counter metric
+func (mc *MetricsCollector) IncCounter(name string, labels ...string) error {
+ mc.mu.RLock()
+ defer mc.mu.RUnlock()
+
+ // Ensure metric name has service prefix
+ prefixedName := mc.ensureMetricNamePrefix(name)
+
+ counter, exists := mc.counters[prefixedName]
+ if !exists {
+ return fmt.Errorf("counter %s not found", prefixedName) //nolint:err113
+ }
+
+ counter.WithLabelValues(labels...).Inc()
+
+ return nil
+}
+
+// AddCounter adds a value to a counter metric
+func (mc *MetricsCollector) AddCounter(name string, value float64, labels ...string) error {
+ mc.mu.RLock()
+ defer mc.mu.RUnlock()
+
+ // Ensure metric name has service prefix
+ prefixedName := mc.ensureMetricNamePrefix(name)
+
+ counter, exists := mc.counters[prefixedName]
+ if !exists {
+ return fmt.Errorf("counter %s not found", prefixedName) //nolint:err113
+ }
+
+ counter.WithLabelValues(labels...).Add(value)
+
+ return nil
+}
+
+// SetGauge sets a gauge metric value
+func (mc *MetricsCollector) SetGauge(name string, value float64, labels ...string) error {
+ mc.mu.RLock()
+ defer mc.mu.RUnlock()
+
+ // Ensure metric name has service prefix
+ prefixedName := mc.ensureMetricNamePrefix(name)
+
+ gauge, exists := mc.gauges[prefixedName]
+ if !exists {
+ return fmt.Errorf("gauge %s not found", prefixedName) //nolint:err113
+ }
+
+ gauge.WithLabelValues(labels...).Set(value)
+
+ return nil
+}
+
+// IncGauge increments a gauge metric
+func (mc *MetricsCollector) IncGauge(name string, labels ...string) error {
+ mc.mu.RLock()
+ defer mc.mu.RUnlock()
+
+ // Ensure metric name has service prefix
+ prefixedName := mc.ensureMetricNamePrefix(name)
+
+ gauge, exists := mc.gauges[prefixedName]
+ if !exists {
+ return fmt.Errorf("gauge %s not found", prefixedName) //nolint:err113
+ }
+
+ gauge.WithLabelValues(labels...).Inc()
+
+ return nil
+}
+
+// DecGauge decrements a gauge metric
+func (mc *MetricsCollector) DecGauge(name string, labels ...string) error {
+ mc.mu.RLock()
+ defer mc.mu.RUnlock()
+
+ // Ensure metric name has service prefix
+ prefixedName := mc.ensureMetricNamePrefix(name)
+
+ gauge, exists := mc.gauges[prefixedName]
+ if !exists {
+ return fmt.Errorf("gauge %s not found", prefixedName) //nolint:err113
+ }
+
+ gauge.WithLabelValues(labels...).Dec()
+
+ return nil
+}
+
+// AddGauge adds a value to a gauge metric
+func (mc *MetricsCollector) AddGauge(name string, value float64, labels ...string) error {
+ mc.mu.RLock()
+ defer mc.mu.RUnlock()
+
+ // Ensure metric name has service prefix
+ prefixedName := mc.ensureMetricNamePrefix(name)
+
+ gauge, exists := mc.gauges[prefixedName]
+ if !exists {
+ return fmt.Errorf("gauge %s not found", prefixedName) //nolint:err113
+ }
+
+ gauge.WithLabelValues(labels...).Add(value)
+
+ return nil
+}
+
+// ObserveHistogram observes a value in a histogram metric
+func (mc *MetricsCollector) ObserveHistogram(name string, value float64, labels ...string) error {
+ mc.mu.RLock()
+ defer mc.mu.RUnlock()
+
+ // Ensure metric name has service prefix
+ prefixedName := mc.ensureMetricNamePrefix(name)
+
+ histogram, exists := mc.histograms[prefixedName]
+ if !exists {
+ return fmt.Errorf("histogram %s not found", prefixedName) //nolint:err113
+ }
+
+ histogram.WithLabelValues(labels...).Observe(value)
+
+ return nil
+}
+
+// ObserveSummary observes a value in a summary metric
+func (mc *MetricsCollector) ObserveSummary(name string, value float64, labels ...string) error {
+ mc.mu.RLock()
+ defer mc.mu.RUnlock()
+
+ // Ensure metric name has service prefix
+ prefixedName := mc.ensureMetricNamePrefix(name)
+
+ summary, exists := mc.summaries[prefixedName]
+ if !exists {
+ return fmt.Errorf("summary %s not found", prefixedName) //nolint:err113
+ }
+
+ summary.WithLabelValues(labels...).Observe(value)
+
+ return nil
+}
+
+// GetRegistry returns the Prometheus registry for custom integrations
+func (mc *MetricsCollector) GetRegistry() *prometheus.Registry {
+ return mc.registry
+}
+
+// ensureMetricNamePrefix ensures the metric name has the service name prefix
+func (mc *MetricsCollector) ensureMetricNamePrefix(name string) string {
+ if !strings.HasPrefix(name, mc.serviceName+"_") {
+ return mc.serviceName + "_" + name
+ }
+
+ return name
}
// responseWriter wraps http.ResponseWriter to capture status code
type responseWriter struct {
http.ResponseWriter
+
statusCode int
}
@@ -108,57 +431,135 @@ func GetMetrics(r *http.Request) *MetricsCollector {
if !ok {
return nil
}
+
return metrics
}
-// IncCounter increments a counter metric
-func IncCounter(r *http.Request, name string, labels ...string) {
+// Helper functions for easy metric manipulation from handlers
+
+// IncCounter increments a counter metric from a request context
+func IncCounter(r *http.Request, name string, labels ...string) error {
+ metrics := GetMetrics(r)
+ if metrics == nil {
+ return errors.New("metrics not available in request context") //nolint:err113
+ }
+
+ return metrics.IncCounter(name, labels...)
+}
+
+// AddCounter adds a value to a counter metric from a request context
+func AddCounter(r *http.Request, name string, value float64, labels ...string) error {
metrics := GetMetrics(r)
if metrics == nil {
- return
+ return errors.New("metrics not available in request context") //nolint:err113
}
- // This is a simplified version - in a real implementation,
- // you'd want to have a more flexible metric registration system
- switch name {
- case "http_requests_total":
- if len(labels) >= 3 {
- metrics.httpRequestsTotal.WithLabelValues(labels[0], labels[1], labels[2]).Inc()
- }
+ return metrics.AddCounter(name, value, labels...)
+}
+
+// SetGauge sets a gauge metric value from a request context
+func SetGauge(r *http.Request, name string, value float64, labels ...string) error {
+ metrics := GetMetrics(r)
+ if metrics == nil {
+ return errors.New("metrics not available in request context") //nolint:err113
}
+
+ return metrics.SetGauge(name, value, labels...)
}
-// ObserveHistogram observes a histogram metric
-func ObserveHistogram(r *http.Request, name string, value float64, labels ...string) {
+// IncGauge increments a gauge metric from a request context
+func IncGauge(r *http.Request, name string, labels ...string) error {
metrics := GetMetrics(r)
if metrics == nil {
- return
+ return errors.New("metrics not available in request context") //nolint:err113
}
- switch name {
- case "http_request_duration_seconds":
- if len(labels) >= 3 {
- metrics.httpRequestDuration.WithLabelValues(labels[0], labels[1], labels[2]).Observe(value)
- }
+ return metrics.IncGauge(name, labels...)
+}
+
+// DecGauge decrements a gauge metric from a request context
+func DecGauge(r *http.Request, name string, labels ...string) error {
+ metrics := GetMetrics(r)
+ if metrics == nil {
+ return errors.New("metrics not available in request context") //nolint:err113
+ }
+
+ return metrics.DecGauge(name, labels...)
+}
+
+// AddGauge adds a value to a gauge metric from a request context
+func AddGauge(r *http.Request, name string, value float64, labels ...string) error {
+ metrics := GetMetrics(r)
+ if metrics == nil {
+ return errors.New("metrics not available in request context") //nolint:err113
}
+
+ return metrics.AddGauge(name, value, labels...)
+}
+
+// ObserveHistogram observes a value in a histogram metric from a request context
+func ObserveHistogram(r *http.Request, name string, value float64, labels ...string) error {
+ metrics := GetMetrics(r)
+ if metrics == nil {
+ return errors.New("metrics not available in request context") //nolint:err113
+ }
+
+ return metrics.ObserveHistogram(name, value, labels...)
+}
+
+// ObserveSummary observes a value in a summary metric from a request context
+func ObserveSummary(r *http.Request, name string, value float64, labels ...string) error {
+ metrics := GetMetrics(r)
+ if metrics == nil {
+ return errors.New("metrics not available in request context") //nolint:err113
+ }
+
+ return metrics.ObserveSummary(name, value, labels...)
}
// startMetricsServer starts the Prometheus metrics server
func (s *Service) startMetricsServer() error {
mux := http.NewServeMux()
- mux.Handle(s.Config.MetricsPath, promhttp.Handler())
- // Add health check endpoint
- mux.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
- w.WriteHeader(http.StatusOK)
- w.Write([]byte("OK"))
- })
+ // Use the custom registry from metrics collector
+ handler := promhttp.HandlerFor(s.Metrics.GetRegistry(), promhttp.HandlerOpts{})
+ mux.Handle(s.Config.MetricsPath, handler)
+
+ // Add health check endpoints
+ if s.HealthChecker != nil {
+ // Main health check endpoint (comprehensive health status)
+ mux.Handle(s.Config.HealthPath, s.HealthChecker.Handler())
+
+ // Kubernetes readiness probe endpoint
+ mux.HandleFunc(s.Config.ReadinessPath, s.HealthChecker.ReadinessHandler())
+
+ // Kubernetes liveness probe endpoint
+ mux.HandleFunc(s.Config.LivenessPath, s.HealthChecker.LivenessHandler())
+ } else {
+ // Fallback basic health endpoints if health checker is not available
+ mux.HandleFunc(s.Config.HealthPath, func(w http.ResponseWriter, _ *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ _, _ = w.Write([]byte("OK"))
+ })
+ mux.HandleFunc(s.Config.ReadinessPath, func(w http.ResponseWriter, _ *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ _, _ = w.Write([]byte("Ready"))
+ })
+ mux.HandleFunc(s.Config.LivenessPath, func(w http.ResponseWriter, _ *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ _, _ = w.Write([]byte("Alive"))
+ })
+ }
s.metricsServer = &http.Server{
- Addr: s.Config.MetricsAddr,
- Handler: mux,
+ Addr: s.Config.MetricsAddr,
+ Handler: mux,
+ ReadTimeout: 5 * time.Minute,
+ WriteTimeout: 5 * time.Minute,
+ IdleTimeout: 5 * time.Minute,
}
s.Logger.Info("starting metrics server", "addr", s.Config.MetricsAddr, "path", s.Config.MetricsPath)
- return s.metricsServer.ListenAndServe()
+
+ return s.metricsServer.ListenAndServe() //nolint:wrapcheck
}
diff --git a/metrics_test.go b/metrics_test.go
new file mode 100644
index 0000000..7585b5e
--- /dev/null
+++ b/metrics_test.go
@@ -0,0 +1,774 @@
+package service
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+ dto "github.com/prometheus/client_model/go"
+)
+
+func TestNewMetricsCollector(t *testing.T) {
+ t.Parallel()
+
+ metrics := NewMetricsCollector("test-service")
+
+ if metrics == nil {
+ t.Fatal("expected metrics collector to be created")
+ }
+
+ if metrics.serviceName != "test-service" {
+ t.Errorf("expected service name 'test-service', got %s", metrics.serviceName)
+ }
+
+ if metrics.registry == nil {
+ t.Fatal("expected registry to be created")
+ }
+
+ if metrics.counters == nil {
+ t.Fatal("expected counters map to be initialized")
+ }
+
+ if metrics.gauges == nil {
+ t.Fatal("expected gauges map to be initialized")
+ }
+
+ if metrics.histograms == nil {
+ t.Fatal("expected histograms map to be initialized")
+ }
+
+ if metrics.summaries == nil {
+ t.Fatal("expected summaries map to be initialized")
+ }
+
+ // Check that built-in HTTP metrics are created
+ if metrics.httpRequestsTotal == nil {
+ t.Fatal("expected HTTP requests total metric to be created")
+ }
+
+ if metrics.httpRequestDuration == nil {
+ t.Fatal("expected HTTP request duration metric to be created")
+ }
+
+ if metrics.httpRequestsInFlight == nil {
+ t.Fatal("expected HTTP requests in flight metric to be created")
+ }
+}
+
+func TestMetricsCollector_RegisterCounter(t *testing.T) {
+ t.Parallel()
+
+ metrics := NewMetricsCollector("test-service")
+
+ t.Run("registers counter successfully", func(t *testing.T) {
+ t.Parallel()
+
+ config := MetricConfig{
+ Name: "test_counter",
+ Help: "Test counter metric",
+ Labels: []string{"label1", "label2"},
+ }
+
+ err := metrics.RegisterCounter(config)
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ if _, exists := metrics.counters["test-service_test_counter"]; !exists {
+ t.Error("expected counter to be registered")
+ }
+ })
+
+ t.Run("fails to register duplicate counter", func(t *testing.T) {
+ t.Parallel()
+
+ config := MetricConfig{
+ Name: "duplicate_counter",
+ Help: "Duplicate counter metric",
+ Labels: []string{"label1"},
+ }
+
+ err := metrics.RegisterCounter(config)
+ if err != nil {
+ t.Fatalf("expected no error on first registration, got %v", err)
+ }
+
+ err = metrics.RegisterCounter(config)
+ if err == nil {
+ t.Error("expected error on duplicate registration")
+ }
+
+ if !strings.Contains(err.Error(), "already exists") {
+ t.Errorf("expected 'already exists' error, got %v", err)
+ }
+ })
+}
+
+func TestMetricsCollector_RegisterGauge(t *testing.T) {
+ t.Parallel()
+
+ metrics := NewMetricsCollector("test-service")
+
+ config := MetricConfig{
+ Name: "test_gauge",
+ Help: "Test gauge metric",
+ Labels: []string{"label1"},
+ }
+
+ err := metrics.RegisterGauge(config)
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ if _, exists := metrics.gauges["test-service_test_gauge"]; !exists {
+ t.Error("expected gauge to be registered")
+ }
+}
+
+func TestMetricsCollector_RegisterHistogram(t *testing.T) {
+ t.Parallel()
+
+ metrics := NewMetricsCollector("test-service")
+
+ t.Run("registers histogram with default buckets", func(t *testing.T) {
+ t.Parallel()
+
+ config := MetricConfig{
+ Name: "test_histogram",
+ Help: "Test histogram metric",
+ Labels: []string{"label1"},
+ }
+
+ err := metrics.RegisterHistogram(config)
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ if _, exists := metrics.histograms["test-service_test_histogram"]; !exists {
+ t.Error("expected histogram to be registered")
+ }
+ })
+
+ t.Run("registers histogram with custom buckets", func(t *testing.T) {
+ t.Parallel()
+
+ config := MetricConfig{
+ Name: "test_histogram_custom",
+ Help: "Test histogram with custom buckets",
+ Labels: []string{"label1"},
+ Buckets: []float64{0.1, 0.5, 1.0, 5.0, 10.0},
+ }
+
+ err := metrics.RegisterHistogram(config)
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ if _, exists := metrics.histograms["test-service_test_histogram_custom"]; !exists {
+ t.Error("expected histogram to be registered")
+ }
+ })
+}
+
+func TestMetricsCollector_RegisterSummary(t *testing.T) {
+ t.Parallel()
+
+ metrics := NewMetricsCollector("test-service")
+
+ t.Run("registers summary with default objectives", func(t *testing.T) {
+ t.Parallel()
+
+ config := MetricConfig{
+ Name: "test_summary",
+ Help: "Test summary metric",
+ Labels: []string{"label1"},
+ }
+
+ err := metrics.RegisterSummary(config)
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ if _, exists := metrics.summaries["test-service_test_summary"]; !exists {
+ t.Error("expected summary to be registered")
+ }
+ })
+
+ t.Run("registers summary with custom objectives", func(t *testing.T) {
+ t.Parallel()
+
+ config := MetricConfig{
+ Name: "test_summary_custom",
+ Help: "Test summary with custom objectives",
+ Labels: []string{"label1"},
+ Objectives: map[float64]float64{0.5: 0.05, 0.95: 0.01},
+ }
+
+ err := metrics.RegisterSummary(config)
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ if _, exists := metrics.summaries["test-service_test_summary_custom"]; !exists {
+ t.Error("expected summary to be registered")
+ }
+ })
+}
+
+func TestMetricsCollector_CounterOperations(t *testing.T) {
+ t.Parallel()
+
+ metrics := NewMetricsCollector("test-service")
+
+ // Register a counter
+ config := MetricConfig{
+ Name: "test_counter_ops",
+ Help: "Test counter operations",
+ Labels: []string{"operation"},
+ }
+
+ err := metrics.RegisterCounter(config)
+ if err != nil {
+ t.Fatalf("failed to register counter: %v", err)
+ }
+
+ t.Run("increments counter", func(t *testing.T) {
+ t.Parallel()
+
+ err := metrics.IncCounter("test_counter_ops", "inc")
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ // Verify the counter was incremented
+ counter := metrics.counters["test-service_test_counter_ops"]
+ metric := &dto.Metric{}
+
+ err = counter.WithLabelValues("inc").Write(metric)
+ if err != nil {
+ t.Fatalf("failed to write metric: %v", err)
+ }
+
+ if metric.GetCounter().GetValue() != 1 {
+ t.Errorf("expected counter value 1, got %f", metric.GetCounter().GetValue())
+ }
+ })
+
+ t.Run("adds value to counter", func(t *testing.T) {
+ t.Parallel()
+
+ err := metrics.AddCounter("test_counter_ops", 5.5, "add")
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ // Verify the counter was incremented by 5.5
+ counter := metrics.counters["test-service_test_counter_ops"]
+ metric := &dto.Metric{}
+
+ err = counter.WithLabelValues("add").Write(metric)
+ if err != nil {
+ t.Fatalf("failed to write metric: %v", err)
+ }
+
+ if metric.GetCounter().GetValue() != 5.5 {
+ t.Errorf("expected counter value 5.5, got %f", metric.GetCounter().GetValue())
+ }
+ })
+
+ t.Run("fails with non-existent counter", func(t *testing.T) {
+ t.Parallel()
+
+ err := metrics.IncCounter("non_existent_counter", "test")
+ if err == nil {
+ t.Error("expected error for non-existent counter")
+ }
+
+ if !strings.Contains(err.Error(), "not found") {
+ t.Errorf("expected 'not found' error, got %v", err)
+ }
+ })
+}
+
+//nolint:gocognit
+func TestMetricsCollector_GaugeOperations(t *testing.T) {
+ t.Parallel()
+
+ metrics := NewMetricsCollector("test-service")
+
+ // Register a gauge
+ config := MetricConfig{
+ Name: "test_gauge_ops",
+ Help: "Test gauge operations",
+ Labels: []string{"operation"},
+ }
+
+ err := metrics.RegisterGauge(config)
+ if err != nil {
+ t.Fatalf("failed to register gauge: %v", err)
+ }
+
+ t.Run("sets gauge value", func(t *testing.T) {
+ t.Parallel()
+
+ err := metrics.SetGauge("test_gauge_ops", 42.5, "set")
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ // Verify the gauge value
+ gauge := metrics.gauges["test-service_test_gauge_ops"]
+ metric := &dto.Metric{}
+
+ err = gauge.WithLabelValues("set").Write(metric)
+ if err != nil {
+ t.Fatalf("failed to write metric: %v", err)
+ }
+
+ if metric.GetGauge().GetValue() != 42.5 {
+ t.Errorf("expected gauge value 42.5, got %f", metric.GetGauge().GetValue())
+ }
+ })
+
+ t.Run("increments gauge", func(t *testing.T) {
+ // First set a value
+ t.Parallel()
+
+ err := metrics.SetGauge("test_gauge_ops", 10, "inc")
+ if err != nil {
+ t.Fatalf("failed to set initial gauge value: %v", err)
+ }
+
+ err = metrics.IncGauge("test_gauge_ops", "inc")
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ // Verify the gauge was incremented
+ gauge := metrics.gauges["test-service_test_gauge_ops"]
+ metric := &dto.Metric{}
+
+ err = gauge.WithLabelValues("inc").Write(metric)
+ if err != nil {
+ t.Fatalf("failed to write metric: %v", err)
+ }
+
+ if metric.GetGauge().GetValue() != 11 {
+ t.Errorf("expected gauge value 11, got %f", metric.GetGauge().GetValue())
+ }
+ })
+
+ t.Run("decrements gauge", func(t *testing.T) {
+ // First set a value
+ t.Parallel()
+
+ err := metrics.SetGauge("test_gauge_ops", 10, "dec")
+ if err != nil {
+ t.Fatalf("failed to set initial gauge value: %v", err)
+ }
+
+ err = metrics.DecGauge("test_gauge_ops", "dec")
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ // Verify the gauge was decremented
+ gauge := metrics.gauges["test-service_test_gauge_ops"]
+ metric := &dto.Metric{}
+
+ err = gauge.WithLabelValues("dec").Write(metric)
+ if err != nil {
+ t.Fatalf("failed to write metric: %v", err)
+ }
+
+ if metric.GetGauge().GetValue() != 9 {
+ t.Errorf("expected gauge value 9, got %f", metric.GetGauge().GetValue())
+ }
+ })
+
+ t.Run("adds value to gauge", func(t *testing.T) {
+ // First set a value
+ t.Parallel()
+
+ err := metrics.SetGauge("test_gauge_ops", 10, "add")
+ if err != nil {
+ t.Fatalf("failed to set initial gauge value: %v", err)
+ }
+
+ err = metrics.AddGauge("test_gauge_ops", 5.5, "add")
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ // Verify the gauge value
+ gauge := metrics.gauges["test-service_test_gauge_ops"]
+ metric := &dto.Metric{}
+
+ err = gauge.WithLabelValues("add").Write(metric)
+ if err != nil {
+ t.Fatalf("failed to write metric: %v", err)
+ }
+
+ if metric.GetGauge().GetValue() != 15.5 {
+ t.Errorf("expected gauge value 15.5, got %f", metric.GetGauge().GetValue())
+ }
+ })
+}
+
+func TestMetricsCollector_HistogramOperations(t *testing.T) {
+ t.Parallel()
+
+ metrics := NewMetricsCollector("test-service")
+
+ // Register a histogram
+ config := MetricConfig{
+ Name: "test_histogram_ops",
+ Help: "Test histogram operations",
+ Labels: []string{"operation"},
+ }
+
+ err := metrics.RegisterHistogram(config)
+ if err != nil {
+ t.Fatalf("failed to register histogram: %v", err)
+ }
+
+ t.Run("observes histogram values", func(t *testing.T) {
+ t.Parallel()
+
+ values := []float64{0.1, 0.5, 1.0, 2.0, 5.0}
+ for _, value := range values {
+ err := metrics.ObserveHistogram("test_histogram_ops", value, "observe")
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+ }
+
+ // Verify the histogram recorded the observations by checking the registry
+ registry := metrics.GetRegistry()
+
+ metricFamilies, err := registry.Gather()
+ if err != nil {
+ t.Fatalf("failed to gather metrics: %v", err)
+ }
+
+ found := false
+
+ for _, mf := range metricFamilies {
+ if mf.GetName() == "test-service_test_histogram_ops" {
+ found = true
+
+ for _, metric := range mf.GetMetric() {
+ if metric.GetHistogram().GetSampleCount() == uint64(len(values)) {
+ return // Test passed
+ }
+ }
+ }
+ }
+
+ if !found {
+ t.Error("expected to find histogram metric")
+ }
+ })
+}
+
+func TestMetricsCollector_SummaryOperations(t *testing.T) {
+ t.Parallel()
+
+ metrics := NewMetricsCollector("test-service")
+
+ // Register a summary
+ config := MetricConfig{
+ Name: "test_summary_ops",
+ Help: "Test summary operations",
+ Labels: []string{"operation"},
+ }
+
+ err := metrics.RegisterSummary(config)
+ if err != nil {
+ t.Fatalf("failed to register summary: %v", err)
+ }
+
+ t.Run("observes summary values", func(t *testing.T) {
+ t.Parallel()
+
+ values := []float64{0.1, 0.5, 1.0, 2.0, 5.0}
+ for _, value := range values {
+ err := metrics.ObserveSummary("test_summary_ops", value, "observe")
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+ }
+
+ // Verify the summary recorded the observations by checking the registry
+ registry := metrics.GetRegistry()
+
+ metricFamilies, err := registry.Gather()
+ if err != nil {
+ t.Fatalf("failed to gather metrics: %v", err)
+ }
+
+ found := false
+
+ for _, mf := range metricFamilies {
+ if mf.GetName() == "test-service_test_summary_ops" {
+ found = true
+
+ for _, metric := range mf.GetMetric() {
+ if metric.GetSummary().GetSampleCount() == uint64(len(values)) {
+ return // Test passed
+ }
+ }
+ }
+ }
+
+ if !found {
+ t.Error("expected to find summary metric")
+ }
+ })
+}
+
+func TestHelperFunctions(t *testing.T) {
+ t.Parallel()
+
+ svc := New("test-service", nil)
+
+ // Register custom metrics
+ err := svc.RegisterCounter(MetricConfig{
+ Name: "test_helper_counter",
+ Help: "Test helper counter",
+ Labels: []string{"action"},
+ })
+ if err != nil {
+ t.Fatalf("failed to register counter: %v", err)
+ }
+
+ err = svc.RegisterGauge(MetricConfig{
+ Name: "test_helper_gauge",
+ Help: "Test helper gauge",
+ Labels: []string{"action"},
+ })
+ if err != nil {
+ t.Fatalf("failed to register gauge: %v", err)
+ }
+
+ // Test helper functions with middleware
+ handler := applyMiddleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Test counter helpers
+ err := IncCounter(r, "test_helper_counter", "increment")
+ if err != nil {
+ t.Errorf("IncCounter failed: %v", err)
+ }
+
+ err = AddCounter(r, "test_helper_counter", 5.0, "add")
+ if err != nil {
+ t.Errorf("AddCounter failed: %v", err)
+ }
+
+ // Test gauge helpers
+ err = SetGauge(r, "test_helper_gauge", 42.0, "set")
+ if err != nil {
+ t.Errorf("SetGauge failed: %v", err)
+ }
+
+ err = IncGauge(r, "test_helper_gauge", "increment")
+ if err != nil {
+ t.Errorf("IncGauge failed: %v", err)
+ }
+
+ w.WriteHeader(http.StatusOK)
+ }), MetricsMiddleware(svc.Metrics))
+
+ req := httptest.NewRequest(http.MethodGet, "/test", nil)
+ recorder := httptest.NewRecorder()
+
+ handler.ServeHTTP(recorder, req)
+
+ if recorder.Code != http.StatusOK {
+ t.Errorf("expected status 200, got %d", recorder.Code)
+ }
+}
+
+func TestHelperFunctionsWithoutMetrics(t *testing.T) {
+ t.Parallel()
+
+ // Test helper functions without metrics in context
+ req := httptest.NewRequest(http.MethodGet, "/test", nil)
+
+ err := IncCounter(req, "test_counter", "test")
+ if err == nil {
+ t.Error("expected error when metrics not available")
+ }
+
+ if !strings.Contains(err.Error(), "not available") {
+ t.Errorf("expected 'not available' error, got %v", err)
+ }
+}
+
+func TestMetricsMiddleware_CustomMetrics(t *testing.T) {
+ t.Parallel()
+
+ svc := New("test-service", nil)
+
+ handler := applyMiddleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Simulate some processing time
+ time.Sleep(10 * time.Millisecond)
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte("test"))
+ }), MetricsMiddleware(svc.Metrics))
+
+ req := httptest.NewRequest(http.MethodGet, "/test", nil)
+ recorder := httptest.NewRecorder()
+
+ handler.ServeHTTP(recorder, req)
+
+ if recorder.Code != http.StatusOK {
+ t.Errorf("expected status 200, got %d", recorder.Code)
+ }
+
+ // Verify metrics were recorded
+ registry := svc.Metrics.GetRegistry()
+
+ metricFamilies, err := registry.Gather()
+ if err != nil {
+ t.Fatalf("failed to gather metrics: %v", err)
+ }
+
+ // Check that we have the expected metrics
+ foundRequestsTotal := false
+ foundRequestDuration := false
+ foundRequestsInFlight := false
+
+ for _, mf := range metricFamilies {
+ switch mf.GetName() {
+ case "test-service_http_requests_total":
+ foundRequestsTotal = true
+ case "test-service_http_request_duration_seconds":
+ foundRequestDuration = true
+ case "test-service_http_requests_in_flight":
+ foundRequestsInFlight = true
+ }
+ }
+
+ if !foundRequestsTotal {
+ t.Error("expected to find http_requests_total metric")
+ }
+
+ if !foundRequestDuration {
+ t.Error("expected to find http_request_duration_seconds metric")
+ }
+
+ if !foundRequestsInFlight {
+ t.Error("expected to find http_requests_in_flight metric")
+ }
+}
+
+func TestMetricsRegistry(t *testing.T) {
+ t.Parallel()
+
+ metrics := NewMetricsCollector("test-service")
+
+ // Test that we can get the registry
+ registry := metrics.GetRegistry()
+ if registry == nil {
+ t.Fatal("expected registry to be returned")
+ }
+
+ // Test that we can create a custom handler
+ handler := promhttp.HandlerFor(registry, promhttp.HandlerOpts{})
+ if handler == nil {
+ t.Fatal("expected handler to be created")
+ }
+
+ // Test that the handler works
+ req := httptest.NewRequest(http.MethodGet, "/metrics", nil)
+ recorder := httptest.NewRecorder()
+
+ handler.ServeHTTP(recorder, req)
+
+ if recorder.Code != http.StatusOK {
+ t.Errorf("expected status 200, got %d", recorder.Code)
+ }
+
+ // Check that the response contains metrics
+ body := recorder.Body.String()
+ if !strings.Contains(body, "test_service_http_requests_in_flight") {
+ t.Errorf("expected metrics output to contain http_requests_in_flight, got: %s", body)
+ }
+}
+
+func TestService_RegisterMetrics(t *testing.T) {
+ t.Parallel()
+
+ svc := New("test-service", nil)
+
+ t.Run("registers counter via service", func(t *testing.T) {
+ t.Parallel()
+
+ err := svc.RegisterCounter(MetricConfig{
+ Name: "service_test_counter",
+ Help: "Test counter via service",
+ Labels: []string{"label1"},
+ })
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ if _, exists := svc.Metrics.counters["test-service_service_test_counter"]; !exists {
+ t.Error("expected counter to be registered")
+ }
+ })
+
+ t.Run("registers gauge via service", func(t *testing.T) {
+ t.Parallel()
+
+ err := svc.RegisterGauge(MetricConfig{
+ Name: "service_test_gauge",
+ Help: "Test gauge via service",
+ Labels: []string{"label1"},
+ })
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ if _, exists := svc.Metrics.gauges["test-service_service_test_gauge"]; !exists {
+ t.Error("expected gauge to be registered")
+ }
+ })
+
+ t.Run("registers histogram via service", func(t *testing.T) {
+ t.Parallel()
+
+ err := svc.RegisterHistogram(MetricConfig{
+ Name: "service_test_histogram",
+ Help: "Test histogram via service",
+ Labels: []string{"label1"},
+ })
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ if _, exists := svc.Metrics.histograms["test-service_service_test_histogram"]; !exists {
+ t.Error("expected histogram to be registered")
+ }
+ })
+
+ t.Run("registers summary via service", func(t *testing.T) {
+ t.Parallel()
+
+ err := svc.RegisterSummary(MetricConfig{
+ Name: "service_test_summary",
+ Help: "Test summary via service",
+ Labels: []string{"label1"},
+ })
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+
+ if _, exists := svc.Metrics.summaries["test-service_service_test_summary"]; !exists {
+ t.Error("expected summary to be registered")
+ }
+ })
+}
diff --git a/middleware.go b/middleware.go
index f23f040..14fb8bf 100644
--- a/middleware.go
+++ b/middleware.go
@@ -14,6 +14,8 @@ const (
LoggerKey ContextKey = "logger"
// MetricsKey is the context key for metrics
MetricsKey ContextKey = "metrics"
+ // HealthCheckerKey is the context key for the health checker
+ HealthCheckerKey ContextKey = "health_checker"
)
// Middleware represents a middleware function
@@ -42,6 +44,7 @@ func GetLogger(r *http.Request) *slog.Logger {
// Return a default logger if none is found
return slog.Default()
}
+
return logger
}
@@ -76,10 +79,25 @@ func RequestLoggingMiddleware(logger *slog.Logger) Middleware {
}
}
+// HealthCheckerMiddleware injects the health checker into the request context
+func HealthCheckerMiddleware(healthChecker *HealthChecker) Middleware {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Add health checker to context
+ ctx := context.WithValue(r.Context(), HealthCheckerKey, healthChecker)
+ r = r.WithContext(ctx)
+
+ // Call the next handler
+ next.ServeHTTP(w, r)
+ })
+ }
+}
+
// applyMiddleware applies multiple middleware functions to a handler
func applyMiddleware(h http.Handler, middlewares ...Middleware) http.Handler {
for i := len(middlewares) - 1; i >= 0; i-- {
h = middlewares[i](h)
}
+
return h
}
diff --git a/service.go b/service.go
index 484dc20..12f9e13 100644
--- a/service.go
+++ b/service.go
@@ -1,16 +1,23 @@
package service
import (
+ "errors"
"log/slog"
"net/http"
+ "os"
+ "os/signal"
+ "syscall"
+
+ "github.com/hellofresh/health-go/v5"
)
// Service represents the main service instance
type Service struct {
- Name string
- Config *Config
- Logger *slog.Logger
- Metrics *MetricsCollector
+ Name string
+ Config *Config
+ Logger *slog.Logger
+ Metrics *MetricsCollector
+ HealthChecker *HealthChecker
server *http.Server
metricsServer *http.Server
@@ -27,12 +34,21 @@ func New(name string, config *Config) *Service {
// Create metrics collector
metrics := NewMetricsCollector(name)
+ // Create health checker
+ healthChecker, err := NewHealthChecker(name, config.Version)
+ if err != nil {
+ config.Logger.Error("failed to create health checker", "error", err)
+ // Continue without health checker - it's not critical for basic operation
+ healthChecker = nil
+ }
+
svc := &Service{
- Name: name,
- Config: config,
- Logger: config.Logger,
- Metrics: metrics,
- mux: http.NewServeMux(),
+ Name: name,
+ Config: config,
+ Logger: config.Logger,
+ Metrics: metrics,
+ HealthChecker: healthChecker,
+ mux: http.NewServeMux(),
}
// Add default middleware (order matters: metrics should be first to capture all requests)
@@ -43,6 +59,11 @@ func New(name string, config *Config) *Service {
RequestLoggingMiddleware(config.Logger),
}
+ // Add health checker middleware if available
+ if healthChecker != nil {
+ svc.middlewares = append(svc.middlewares, HealthCheckerMiddleware(healthChecker))
+ }
+
return svc
}
@@ -65,24 +86,88 @@ func (s *Service) Use(middleware Middleware) {
s.middlewares = append(s.middlewares, middleware)
}
-// Start starts the service and metrics server
+// Start starts the service with graceful shutdown handling
func (s *Service) Start() error {
- // Start metrics server in a goroutine
+ // Create a channel to receive OS signals
+ quit := make(chan os.Signal, 1)
+ signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
+
+ // Start the servers in goroutines
+ serverErrors := make(chan error, 2)
+
+ // Start metrics server
go func() {
- if err := s.startMetricsServer(); err != nil && err != http.ErrServerClosed {
+ if err := s.startMetricsServer(); err != nil && !errors.Is(err, http.ErrServerClosed) {
s.Logger.Error("metrics server error", "error", err)
+
+ serverErrors <- err
}
}()
// Start main HTTP server
- s.server = &http.Server{
- Addr: s.Config.Addr,
- Handler: s.mux,
- ReadTimeout: s.Config.ReadTimeout,
- WriteTimeout: s.Config.WriteTimeout,
- IdleTimeout: s.Config.IdleTimeout,
+ go func() {
+ s.server = &http.Server{
+ Addr: s.Config.Addr,
+ Handler: s.mux,
+ ReadTimeout: s.Config.ReadTimeout,
+ WriteTimeout: s.Config.WriteTimeout,
+ IdleTimeout: s.Config.IdleTimeout,
+ }
+
+ s.Logger.Info("starting service", "name", s.Name, "addr", s.Config.Addr)
+
+ if err := s.server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
+ s.Logger.Error("server error", "error", err)
+
+ serverErrors <- err
+ }
+ }()
+
+ // Wait for either a signal or a server error
+ select {
+ case <-quit:
+ s.Logger.Info("received shutdown signal")
+ case err := <-serverErrors:
+ s.Logger.Error("server error, shutting down", "error", err)
+ return err
}
- s.Logger.Info("starting service", "name", s.Name, "addr", s.Config.Addr)
- return s.server.ListenAndServe()
+ // Perform graceful shutdown
+ return s.gracefulShutdown()
+}
+
+// RegisterHealthCheck adds a health check to the service
+func (s *Service) RegisterHealthCheck(config health.Config) error {
+ if s.HealthChecker != nil {
+ return s.HealthChecker.Register(config)
+ }
+
+ s.Logger.Warn("health checker not available, skipping health check registration", "name", config.Name)
+
+ return nil
+}
+
+// RegisterCounter registers a new counter metric
+func (s *Service) RegisterCounter(config MetricConfig) error {
+ return s.Metrics.RegisterCounter(config)
+}
+
+// RegisterGauge registers a new gauge metric
+func (s *Service) RegisterGauge(config MetricConfig) error {
+ return s.Metrics.RegisterGauge(config)
+}
+
+// RegisterHistogram registers a new histogram metric
+func (s *Service) RegisterHistogram(config MetricConfig) error {
+ return s.Metrics.RegisterHistogram(config)
+}
+
+// RegisterSummary registers a new summary metric
+func (s *Service) RegisterSummary(config MetricConfig) error {
+ return s.Metrics.RegisterSummary(config)
+}
+
+// GetHealthChecker returns the health checker instance
+func (s *Service) GetHealthChecker() *HealthChecker {
+ return s.HealthChecker
}
diff --git a/service_test.go b/service_test.go
index c53f8eb..22fb3ff 100644
--- a/service_test.go
+++ b/service_test.go
@@ -3,14 +3,17 @@ package service
import (
"net/http"
"net/http/httptest"
- "os"
"strings"
"testing"
"time"
)
func TestNew(t *testing.T) {
+ t.Parallel()
+
t.Run("with config", func(t *testing.T) {
+ t.Parallel()
+
config := DefaultConfig()
config.Addr = ":8081"
@@ -34,6 +37,8 @@ func TestNew(t *testing.T) {
})
t.Run("with nil config", func(t *testing.T) {
+ t.Parallel()
+
svc := New("test", nil)
if svc.Config == nil {
@@ -47,6 +52,8 @@ func TestNew(t *testing.T) {
}
func TestDefaultConfig(t *testing.T) {
+ t.Parallel()
+
config := DefaultConfig()
if config.Addr != ":8080" {
@@ -72,14 +79,9 @@ func TestDefaultConfig(t *testing.T) {
func TestLoadFromEnv(t *testing.T) {
// Set environment variables
- os.Setenv("ADDR", ":8888")
- os.Setenv("METRICS_ADDR", ":9999")
- os.Setenv("METRICS_PATH", "/custom-metrics")
- defer func() {
- os.Unsetenv("ADDR")
- os.Unsetenv("METRICS_ADDR")
- os.Unsetenv("METRICS_PATH")
- }()
+ t.Setenv("ADDR", ":8888")
+ t.Setenv("METRICS_ADDR", ":9999")
+ t.Setenv("METRICS_PATH", "/custom-metrics")
config, err := LoadFromEnv()
if err != nil {
@@ -100,16 +102,18 @@ func TestLoadFromEnv(t *testing.T) {
}
func TestHandleFunc(t *testing.T) {
+ t.Parallel()
+
svc := New("test", nil)
// Add a simple handler
- svc.HandleFunc("/test", func(w http.ResponseWriter, r *http.Request) {
+ svc.HandleFunc("/test", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte("test response"))
})
// Create a test request
- req := httptest.NewRequest("GET", "/test", nil)
+ req := httptest.NewRequest(http.MethodGet, "/test", nil)
recorder := httptest.NewRecorder()
// Serve the request
@@ -125,6 +129,8 @@ func TestHandleFunc(t *testing.T) {
}
func TestGetLogger(t *testing.T) {
+ t.Parallel()
+
svc := New("test", nil)
// Test with a request that has logger middleware applied
@@ -133,10 +139,11 @@ func TestGetLogger(t *testing.T) {
if logger == nil {
t.Error("logger should not be nil")
}
+
w.WriteHeader(http.StatusOK)
}), LoggerMiddleware(svc.Logger))
- req := httptest.NewRequest("GET", "/test", nil)
+ req := httptest.NewRequest(http.MethodGet, "/test", nil)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@@ -147,6 +154,8 @@ func TestGetLogger(t *testing.T) {
}
func TestGetMetrics(t *testing.T) {
+ t.Parallel()
+
svc := New("test", nil)
// Test with a request that has metrics middleware applied
@@ -155,10 +164,11 @@ func TestGetMetrics(t *testing.T) {
if metrics == nil {
t.Error("metrics should not be nil")
}
+
w.WriteHeader(http.StatusOK)
}), MetricsMiddleware(svc.Metrics))
- req := httptest.NewRequest("GET", "/test", nil)
+ req := httptest.NewRequest(http.MethodGet, "/test", nil)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@@ -169,14 +179,16 @@ func TestGetMetrics(t *testing.T) {
}
func TestRecoveryMiddleware(t *testing.T) {
+ t.Parallel()
+
svc := New("test", nil)
// Create a handler that panics
- handler := applyMiddleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ handler := applyMiddleware(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) {
panic("test panic")
}), RecoveryMiddleware(svc.Logger))
- req := httptest.NewRequest("GET", "/test", nil)
+ req := httptest.NewRequest(http.MethodGet, "/test", nil)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@@ -191,6 +203,8 @@ func TestRecoveryMiddleware(t *testing.T) {
}
func TestMetricsMiddleware(t *testing.T) {
+ t.Parallel()
+
svc := New("test", nil)
// Create a simple handler
@@ -199,7 +213,7 @@ func TestMetricsMiddleware(t *testing.T) {
w.Write([]byte("test"))
}), MetricsMiddleware(svc.Metrics))
- req := httptest.NewRequest("GET", "/test", nil)
+ req := httptest.NewRequest(http.MethodGet, "/test", nil)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@@ -207,23 +221,23 @@ func TestMetricsMiddleware(t *testing.T) {
if recorder.Code != http.StatusOK {
t.Errorf("expected status 200, got %d", recorder.Code)
}
-
- // Note: In a real test, you'd check if the metrics were actually recorded
- // This would require exposing the metrics or using a metrics registry
}
func TestShutdownHooks(t *testing.T) {
+ t.Parallel()
+
svc := New("test", nil)
hookCalled := false
+
svc.AddShutdownHook(func() error {
hookCalled = true
return nil
})
// Create a minimal server setup
- svc.server = &http.Server{Addr: ":0"}
- svc.metricsServer = &http.Server{Addr: ":0"}
+ svc.server = &http.Server{Addr: ":0"} //nolint:gosec
+ svc.metricsServer = &http.Server{Addr: ":0"} //nolint:gosec
// Test graceful shutdown
err := svc.gracefulShutdown()
@@ -237,6 +251,8 @@ func TestShutdownHooks(t *testing.T) {
}
func TestUse(t *testing.T) {
+ t.Parallel()
+
svc := New("test", nil)
initialCount := len(svc.middlewares)
@@ -254,11 +270,11 @@ func TestUse(t *testing.T) {
}
// Test that the custom middleware is applied
- svc.HandleFunc("/test", func(w http.ResponseWriter, r *http.Request) {
+ svc.HandleFunc("/test", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
})
- req := httptest.NewRequest("GET", "/test", nil)
+ req := httptest.NewRequest(http.MethodGet, "/test", nil)
recorder := httptest.NewRecorder()
svc.mux.ServeHTTP(recorder, req)
@@ -269,6 +285,8 @@ func TestUse(t *testing.T) {
}
func TestIntegration(t *testing.T) {
+ t.Parallel()
+
// Create a service with custom configuration
config := DefaultConfig()
config.Addr = ":0" // Use random port
@@ -286,7 +304,7 @@ func TestIntegration(t *testing.T) {
})
// Test the handler
- req := httptest.NewRequest("GET", "/hello", nil)
+ req := httptest.NewRequest(http.MethodGet, "/hello", nil)
recorder := httptest.NewRecorder()
svc.mux.ServeHTTP(recorder, req)
@@ -303,6 +321,8 @@ func TestIntegration(t *testing.T) {
}
func TestStartMetricsServer(t *testing.T) {
+ t.Parallel()
+
svc := New("test", nil)
// Test that metrics server can be started (we'll use a mock)
@@ -329,10 +349,11 @@ func BenchmarkHandleFunc(b *testing.B) {
svc.HandleFunc("/benchmark", handler)
- req := httptest.NewRequest("GET", "/benchmark", nil)
+ req := httptest.NewRequest(http.MethodGet, "/benchmark", nil)
b.ResetTimer()
- for i := 0; i < b.N; i++ {
+
+ for range b.N {
recorder := httptest.NewRecorder()
svc.mux.ServeHTTP(recorder, req)
}
@@ -348,10 +369,11 @@ func BenchmarkMiddleware(b *testing.B) {
wrappedHandler := applyMiddleware(handler, svc.middlewares...)
- req := httptest.NewRequest("GET", "/benchmark", nil)
+ req := httptest.NewRequest(http.MethodGet, "/benchmark", nil)
b.ResetTimer()
- for i := 0; i < b.N; i++ {
+
+ for range b.N {
recorder := httptest.NewRecorder()
wrappedHandler.ServeHTTP(recorder, req)
}
diff --git a/shutdown.go b/shutdown.go
index 4a44ef2..7d53f84 100644
--- a/shutdown.go
+++ b/shutdown.go
@@ -2,59 +2,8 @@ package service
import (
"context"
- "net/http"
- "os"
- "os/signal"
- "syscall"
)
-// StartWithGracefulShutdown starts the service with graceful shutdown handling
-func (s *Service) StartWithGracefulShutdown() error {
- // Create a channel to receive OS signals
- quit := make(chan os.Signal, 1)
- signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
-
- // Start the servers in goroutines
- serverErrors := make(chan error, 2)
-
- // Start metrics server
- go func() {
- if err := s.startMetricsServer(); err != nil && err != http.ErrServerClosed {
- s.Logger.Error("metrics server error", "error", err)
- serverErrors <- err
- }
- }()
-
- // Start main HTTP server
- go func() {
- s.server = &http.Server{
- Addr: s.Config.Addr,
- Handler: s.mux,
- ReadTimeout: s.Config.ReadTimeout,
- WriteTimeout: s.Config.WriteTimeout,
- IdleTimeout: s.Config.IdleTimeout,
- }
-
- s.Logger.Info("starting service", "name", s.Name, "addr", s.Config.Addr)
- if err := s.server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
- s.Logger.Error("server error", "error", err)
- serverErrors <- err
- }
- }()
-
- // Wait for either a signal or a server error
- select {
- case <-quit:
- s.Logger.Info("received shutdown signal")
- case err := <-serverErrors:
- s.Logger.Error("server error, shutting down", "error", err)
- return err
- }
-
- // Perform graceful shutdown
- return s.gracefulShutdown()
-}
-
// gracefulShutdown performs graceful shutdown of the service
func (s *Service) gracefulShutdown() error {
s.Logger.Info("starting graceful shutdown")
@@ -66,9 +15,9 @@ func (s *Service) gracefulShutdown() error {
// Execute shutdown hooks
for i, hook := range s.Config.ShutdownHooks {
s.Logger.Info("executing shutdown hook", "index", i)
+
if err := hook(); err != nil {
s.Logger.Error("shutdown hook failed", "index", i, "error", err)
- // Continue with other hooks even if one fails
}
}
@@ -78,6 +27,7 @@ func (s *Service) gracefulShutdown() error {
// Shutdown main HTTP server
if s.server != nil {
s.Logger.Info("shutting down HTTP server")
+
if err := s.server.Shutdown(ctx); err != nil {
s.Logger.Error("HTTP server shutdown error", "error", err)
shutdownErrors = append(shutdownErrors, err)
@@ -87,6 +37,7 @@ func (s *Service) gracefulShutdown() error {
// Shutdown metrics server
if s.metricsServer != nil {
s.Logger.Info("shutting down metrics server")
+
if err := s.metricsServer.Shutdown(ctx); err != nil {
s.Logger.Error("metrics server shutdown error", "error", err)
shutdownErrors = append(shutdownErrors, err)
@@ -99,6 +50,7 @@ func (s *Service) gracefulShutdown() error {
}
s.Logger.Info("graceful shutdown completed")
+
return nil
}