diff --git a/append_lifecycle.go b/append_lifecycle.go index f09004b52..bb55f2740 100644 --- a/append_lifecycle.go +++ b/append_lifecycle.go @@ -24,6 +24,8 @@ import ( "sync/atomic" "time" + "log/slog" + f_log "github.com/transparency-dev/formats/log" "github.com/transparency-dev/merkle/rfc6962" "github.com/transparency-dev/tessera/api/layout" @@ -34,7 +36,6 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" "golang.org/x/mod/sumdb/note" - "k8s.io/klog/v2" ) const ( @@ -89,7 +90,8 @@ func init() { metric.WithDescription("Number of calls to the appender lifecycle Add function"), metric.WithUnit("{call}")) if err != nil { - klog.Exitf("Failed to create appenderAddsTotal metric: %v", err) + slog.Error("Failed to create appenderAddsTotal metric", slog.Any("error", err)) + os.Exit(1) } appenderAddHistogram, err = meter.Int64Histogram( @@ -98,14 +100,16 @@ func init() { metric.WithUnit("ms"), metric.WithExplicitBucketBoundaries(histogramBuckets...)) if err != nil { - klog.Exitf("Failed to create appenderAddDuration metric: %v", err) + slog.Error("Failed to create appenderAddDuration metric", slog.Any("error", err)) + os.Exit(1) } appenderHighestIndex, err = meter.Int64Gauge( "tessera.appender.index", metric.WithDescription("Highest index assigned by appender lifecycle Add function")) if err != nil { - klog.Exitf("Failed to create appenderHighestIndex metric: %v", err) + slog.Error("Failed to create appenderHighestIndex metric", slog.Any("error", err)) + os.Exit(1) } appenderIntegratedSize, err = meter.Int64Gauge( @@ -113,7 +117,8 @@ func init() { metric.WithDescription("Size of the integrated (but not necessarily published) tree"), metric.WithUnit("{entry}")) if err != nil { - klog.Exitf("Failed to create appenderIntegratedSize metric: %v", err) + slog.Error("Failed to create appenderIntegratedSize metric", slog.Any("error", err)) + os.Exit(1) } appenderIntegrateLatency, err = meter.Int64Histogram( @@ -122,7 +127,8 @@ func init() { metric.WithUnit("ms"), metric.WithExplicitBucketBoundaries(histogramBuckets...)) if err != nil { - klog.Exitf("Failed to create appenderIntegrateLatency metric: %v", err) + slog.Error("Failed to create appenderIntegrateLatency metric", slog.Any("error", err)) + os.Exit(1) } appenderDeadlineRemaining, err = meter.Int64Histogram( @@ -131,14 +137,16 @@ func init() { metric.WithUnit("ms"), metric.WithExplicitBucketBoundaries(histogramBuckets...)) if err != nil { - klog.Exitf("Failed to create appenderDeadlineRemaining metric: %v", err) + slog.Error("Failed to create appenderDeadlineRemaining metric", slog.Any("error", err)) + os.Exit(1) } appenderNextIndex, err = meter.Int64Gauge( "tessera.appender.next_index", metric.WithDescription("The next available index to be assigned to entries")) if err != nil { - klog.Exitf("Failed to create appenderNextIndex metric: %v", err) + slog.Error("Failed to create appenderNextIndex metric", slog.Any("error", err)) + os.Exit(1) } appenderSignedSize, err = meter.Int64Gauge( @@ -146,7 +154,8 @@ func init() { metric.WithDescription("Size of the latest signed checkpoint"), metric.WithUnit("{entry}")) if err != nil { - klog.Exitf("Failed to create appenderSignedSize metric: %v", err) + slog.Error("Failed to create appenderSignedSize metric", slog.Any("error", err)) + os.Exit(1) } appenderWitnessedSize, err = meter.Int64Gauge( @@ -154,7 +163,8 @@ func init() { metric.WithDescription("Size of the latest successfully witnessed checkpoint"), metric.WithUnit("{entry}")) if err != nil { - klog.Exitf("Failed to create appenderWitnessedSize metric: %v", err) + slog.Error("Failed to create appenderWitnessedSize metric", slog.Any("error", err)) + os.Exit(1) } followerEntriesProcessed, err = meter.Int64Gauge( @@ -162,7 +172,8 @@ func init() { metric.WithDescription("Number of entries processed"), metric.WithUnit("{entry}")) if err != nil { - klog.Exitf("Failed to create followerEntriesProcessed metric: %v", err) + slog.Error("Failed to create followerEntriesProcessed metric", slog.Any("error", err)) + os.Exit(1) } followerLag, err = meter.Int64Gauge( @@ -170,7 +181,8 @@ func init() { metric.WithDescription("Number of unprocessed entries in the current integrated tree"), metric.WithUnit("{entry}")) if err != nil { - klog.Exitf("Failed to create followerLag metric: %v", err) + slog.Error("Failed to create followerLag metric", slog.Any("error", err)) + os.Exit(1) } appenderWitnessRequests, err = meter.Int64Counter( @@ -178,7 +190,8 @@ func init() { metric.WithDescription("Number of attempts to witness a log checkpoint"), metric.WithUnit("{call}")) if err != nil { - klog.Exitf("Failed to create appenderWitnessRequests metric: %v", err) + slog.Error("Failed to create appenderWitnessRequests metric", slog.Any("error", err)) + os.Exit(1) } appenderWitnessHistogram, err = meter.Int64Histogram( @@ -187,7 +200,8 @@ func init() { metric.WithUnit("ms"), metric.WithExplicitBucketBoundaries(histogramBuckets...)) if err != nil { - klog.Exitf("Failed to create appenderWitnessHistogram metric: %v", err) + slog.Error("Failed to create appenderWitnessHistogram metric", slog.Any("error", err)) + os.Exit(1) } } @@ -338,12 +352,12 @@ func followerStats(ctx context.Context, f Follower, size func(context.Context) ( n, err := f.EntriesProcessed(ctx) if err != nil { - klog.Errorf("followerStats: follower %q EntriesProcessed(): %v", name, err) + slog.Error("followerStats: error in EntriesProcessed", slog.String("name", name), slog.Any("error", err)) continue } s, err := size(ctx) if err != nil { - klog.Errorf("followerStats: follower %q size(): %v", name, err) + slog.Error("followerStats: error in follower size", slog.String("name", name), slog.Any("error", err)) } attrs := metric.WithAttributes(followerNameKey.String(name)) followerEntriesProcessed.Record(ctx, otel.Clamp64(n), attrs) @@ -401,7 +415,7 @@ func (i *integrationStats) latency(size uint64) (time.Duration, bool) { // This is a long running function, exitingly only when the provided context is done. func (i *integrationStats) updateStats(ctx context.Context, r LogReader) { if r == nil { - klog.Warning("updateStates: nil logreader provided, not updating stats") + slog.Warn("updateStates: nil logreader provided, not updating stats") return } t := time.NewTicker(100 * time.Millisecond) @@ -413,7 +427,7 @@ func (i *integrationStats) updateStats(ctx context.Context, r LogReader) { } s, err := r.IntegratedSize(ctx) if err != nil { - klog.Errorf("IntegratedSize: %v", err) + slog.Error("Error calling IntegratedSize", slog.Any("error", err)) continue } appenderIntegratedSize.Record(ctx, otel.Clamp64(s)) @@ -422,7 +436,7 @@ func (i *integrationStats) updateStats(ctx context.Context, r LogReader) { } i, err := r.NextIndex(ctx) if err != nil { - klog.Errorf("NextIndex: %v", err) + slog.Error("Error calling NextIndex", slog.Any("error", err)) } appenderNextIndex.Record(ctx, otel.Clamp64(i)) } @@ -545,7 +559,7 @@ func (t *terminator) Shutdown(ctx context.Context) error { if err != nil { return err } - klog.V(1).Infof("Shutting down, waiting for checkpoint committing to size %d (current checkpoint is %d)", maxIndex, size) + slog.Debug("Shutting down, waiting for checkpoint", slog.Uint64("goal", maxIndex), slog.Uint64("current", size)) if size > maxIndex { return nil } @@ -660,7 +674,7 @@ func (o AppendOptions) CheckpointPublisher(lr LogReader, httpClient *http.Client var oldSize uint64 oldCP, err := lr.ReadCheckpoint(ctx) if err != nil { - klog.Infof("Failed to fetch old checkpoint: %v", err) + slog.Info("Failed to fetch old checkpoint", slog.Any("error", err)) } else { _, oldSize, _, err = parse.CheckpointUnsafe(oldCP) if err != nil { @@ -682,7 +696,7 @@ func (o AppendOptions) CheckpointPublisher(lr LogReader, httpClient *http.Client appenderWitnessRequests.Add(ctx, 1, metric.WithAttributes(attribute.String("error.type", "failed"))) return nil, err } - klog.Warningf("WitnessGateway: failing-open despite error: %v", err) + slog.Warn("WitnessGateway: failing-open despite error", slog.Any("error", err)) witAttr = append(witAttr, attribute.String("error.type", "failed_open")) } @@ -746,7 +760,8 @@ func (o *AppendOptions) WithCheckpointSigner(s note.Signer, additionalSigners .. origin := s.Name() for _, signer := range additionalSigners { if origin != signer.Name() { - klog.Exitf("WithCheckpointSigner: additional signer name (%q) does not match primary signer name (%q)", signer.Name(), origin) + slog.Error("WithCheckpointSigner: additional signer name does not match primary signer name", slog.String("name", signer.Name()), slog.String("origin", origin)) + os.Exit(1) } } o.newCP = func(ctx context.Context, size uint64, hash []byte) ([]byte, error) { diff --git a/await.go b/await.go index 6366fc28e..9d7e595dd 100644 --- a/await.go +++ b/await.go @@ -22,10 +22,11 @@ import ( "sync" "time" + "log/slog" + "github.com/transparency-dev/tessera/internal/otel" "github.com/transparency-dev/tessera/internal/parse" "go.opentelemetry.io/otel/trace" - "k8s.io/klog/v2" ) // NewPublicationAwaiter provides an PublicationAwaiter that can be cancelled @@ -123,7 +124,7 @@ func (a *PublicationAwaiter) pollLoop(ctx context.Context, readCheckpoint func(c select { case <-ctx.Done(): span.AddEvent("context.done") - klog.V(2).Info("PublicationAwaiter exiting due to context completion") + slog.Debug("PublicationAwaiter exiting due to context completion") cp, cpSize, cpErr = nil, 0, ctx.Err() ctxDone = true case <-time.After(pollPeriod): diff --git a/client/fetcher.go b/client/fetcher.go index d036c8a2c..8ab088c57 100644 --- a/client/fetcher.go +++ b/client/fetcher.go @@ -24,9 +24,10 @@ import ( "path" "strings" + "log/slog" + "github.com/transparency-dev/tessera/api/layout" "github.com/transparency-dev/tessera/internal/fetcher" - "k8s.io/klog/v2" ) // NewHTTPFetcher creates a new HTTPFetcher for the log rooted at the given URL, using @@ -88,7 +89,7 @@ func (h HTTPFetcher) fetch(ctx context.Context, p string) ([]byte, error) { defer func() { if err := r.Body.Close(); err != nil { - klog.Errorf("resp.Body.Close(): %v", err) + slog.Error("resp.Body.Close", slog.Any("error", err)) } }() return io.ReadAll(r.Body) diff --git a/client/stream.go b/client/stream.go index 6f87a3f41..2b4f52068 100644 --- a/client/stream.go +++ b/client/stream.go @@ -20,8 +20,9 @@ import ( "fmt" "iter" + "log/slog" + "github.com/transparency-dev/tessera/api/layout" - "k8s.io/klog/v2" ) // TreeSizeFunc is a function which knows how to return the current tree size of a log. @@ -86,7 +87,7 @@ func EntryBundles(ctx context.Context, numWorkers uint, getSize TreeSizeFunc, ge tokens <- struct{}{} } - klog.V(1).Infof("stream.EntryBundles: streaming [%d, %d)", fromEntry, fromEntry+N) + slog.Debug("stream.EntryBundles: streaming", slog.Uint64("from", fromEntry), slog.Uint64("N", N)) // For each bundle, pop a future into the bundles channel and kick off an async request // to resolve it. @@ -114,7 +115,7 @@ func EntryBundles(ctx context.Context, numWorkers uint, getSize TreeSizeFunc, ge bundles <- f } - klog.V(1).Infof("stream.EntryBundles: exiting") + slog.Debug("stream.EntryBundles: exiting") }() return func(yield func(Bundle, error) bool) { @@ -131,7 +132,7 @@ func EntryBundles(ctx context.Context, numWorkers uint, getSize TreeSizeFunc, ge return } } - klog.V(1).Infof("stream.EntryBundles: iter done") + slog.Debug("stream.EntryBundles: iter done") } } diff --git a/cmd/conformance/aws/main.go b/cmd/conformance/aws/main.go index 8e3966df2..9a6ae22c9 100644 --- a/cmd/conformance/aws/main.go +++ b/cmd/conformance/aws/main.go @@ -22,8 +22,11 @@ import ( "fmt" "io" "net/http" + "os" "time" + "log/slog" + aaws "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/service/s3" @@ -34,7 +37,6 @@ import ( "golang.org/x/mod/sumdb/note" "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" - "k8s.io/klog/v2" ) var ( @@ -54,6 +56,7 @@ var ( signer = flag.String("signer", "", "Note signer to use to sign checkpoints") publishInterval = flag.Duration("publish_interval", 3*time.Second, "How frequently to publish updated checkpoints") traceFraction = flag.Float64("trace_fraction", 0, "Fraction of open-telemetry span traces to sample") + slogLevel = flag.Int("slog_level", 0, "The cut-off threshold for structured logging. Default is 0 (INFO). See https://pkg.go.dev/log/slog#Level for other levels.") additionalSigners = []string{} antispamEnable = flag.Bool("antispam", false, "EXPERIMENTAL: Set to true to enable persistent antispam storage") @@ -68,9 +71,9 @@ func init() { } func main() { - klog.InitFlags(nil) flag.Parse() ctx := context.Background() + slog.SetDefault(slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.Level(*slogLevel)}))) shutdownOTel := initOTel(ctx, *traceFraction) defer shutdownOTel(ctx) @@ -80,7 +83,8 @@ func main() { awsCfg := storageConfigFromFlags() driver, err := aws.New(ctx, awsCfg) if err != nil { - klog.Exitf("Failed to create new AWS storage: %v", err) + slog.Error("Failed to create new AWS storage", slog.Any("error", err)) + os.Exit(1) } var antispam tessera.Antispam // Persistent antispam is currently experimental, so there's no documentation yet! @@ -88,7 +92,8 @@ func main() { asOpts := aws_as.AntispamOpts{} // Use defaults antispam, err = aws_as.NewAntispam(ctx, antispamMysqlConfig().FormatDSN(), asOpts) if err != nil { - klog.Exitf("Failed to create new AWS antispam storage: %v", err) + slog.Error("Failed to create new AWS antispam storage", slog.Any("error", err)) + os.Exit(1) } } appender, shutdown, _, err := tessera.NewAppender(ctx, driver, tessera.NewAppendOptions(). @@ -98,7 +103,8 @@ func main() { WithPushback(10*4096). WithAntispam(tessera.DefaultAntispamInMemorySize, antispam)) if err != nil { - klog.Exit(err) + slog.Error("Failed to create new appender", slog.Any("error", err)) + os.Exit(1) } // Expose a HTTP handler for the conformance test writes. @@ -133,14 +139,17 @@ func main() { ReadHeaderTimeout: 5 * time.Second, } if err := http2.ConfigureServer(h1s, h2s); err != nil { - klog.Exitf("http2.ConfigureServer: %v", err) + slog.Error("http2.ConfigureServer", slog.Any("error", err)) + os.Exit(1) } if err := h1s.ListenAndServe(); err != nil { if err := shutdown(ctx); err != nil { - klog.Exit(err) + slog.Error("Failed to cleanly shutdown after ListenAndServe", slog.Any("error", err)) + os.Exit(1) } - klog.Exitf("ListenAndServe: %v", err) + slog.Error("ListenAndServe", slog.Any("error", err)) + os.Exit(1) } } @@ -148,23 +157,29 @@ func main() { // provided via flags. func storageConfigFromFlags() aws.Config { if *bucket == "" { - klog.Exit("--bucket must be set") + slog.Error("--bucket must be set") + os.Exit(1) } if *dbName == "" { - klog.Exit("--db_name must be set") + slog.Error("--db_name must be set") + os.Exit(1) } if *dbHost == "" { - klog.Exit("--db_host must be set") + slog.Error("--db_host must be set") + os.Exit(1) } if *dbPort == 0 { - klog.Exit("--db_port must be set") + slog.Error("--db_port must be set") + os.Exit(1) } if *dbUser == "" { - klog.Exit("--db_user must be set") + slog.Error("--db_user must be set") + os.Exit(1) } // Empty password isn't an option with AuroraDB MySQL. if *dbPassword == "" { - klog.Exit("--db_password must be set") + slog.Error("--db_password must be set") + os.Exit(1) } c := mysql.Config{ @@ -206,20 +221,25 @@ func storageConfigFromFlags() aws.Config { func antispamMysqlConfig() *mysql.Config { if *antispamDb == "" { - klog.Exit("--antispam_db_name must be set") + slog.Error("--antispam_db_name must be set") + os.Exit(1) } if *dbHost == "" { - klog.Exit("--db_host must be set") + slog.Error("--db_host must be set") + os.Exit(1) } if *dbPort == 0 { - klog.Exit("--db_port must be set") + slog.Error("--db_port must be set") + os.Exit(1) } if *dbUser == "" { - klog.Exit("--db_user must be set") + slog.Error("--db_user must be set") + os.Exit(1) } // Empty password isn't an option with AuroraDB MySQL. if *dbPassword == "" { - klog.Exit("--db_password must be set") + slog.Error("--db_password must be set") + os.Exit(1) } return &mysql.Config{ @@ -236,14 +256,16 @@ func antispamMysqlConfig() *mysql.Config { func signerFromFlags() (note.Signer, []note.Signer) { s, err := note.NewSigner(*signer) if err != nil { - klog.Exitf("Failed to create new signer: %v", err) + slog.Error("Failed to create new signer", slog.Any("error", err)) + os.Exit(1) } var a []note.Signer for _, as := range additionalSigners { s, err := note.NewSigner(as) if err != nil { - klog.Exitf("Failed to create additional signer: %v", err) + slog.Error("Failed to create additional signer", slog.Any("error", err)) + os.Exit(1) } a = append(a, s) } diff --git a/cmd/conformance/aws/otel.go b/cmd/conformance/aws/otel.go index 70ec17b0c..e09d56ec9 100644 --- a/cmd/conformance/aws/otel.go +++ b/cmd/conformance/aws/otel.go @@ -17,6 +17,9 @@ package main import ( "context" "errors" + "os" + + "log/slog" ec2 "go.opentelemetry.io/contrib/detectors/aws/ec2/v2" "go.opentelemetry.io/contrib/detectors/aws/ecs" @@ -28,7 +31,6 @@ import ( "go.opentelemetry.io/otel/sdk/resource" "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.4.0" - "k8s.io/klog/v2" ) // initOTel initialises the open telemetry support for metrics and tracing. @@ -49,7 +51,7 @@ func initOTel(ctx context.Context, traceFraction float64) func(context.Context) } shutdownFuncs = nil if err != nil { - klog.Errorf("OTel shutdown: %v", err) + slog.Error("OTel shutdown", slog.Any("error", err)) } } @@ -68,7 +70,8 @@ func initOTel(ctx context.Context, traceFraction float64) func(context.Context) resource.WithDetectors(ec2ResourceDetector, ecsResourceDetector), ) if err != nil { - klog.Exitf("Failed to detect resources: %v", err) + slog.Error("Failed to detect resources", slog.Any("error", err)) + os.Exit(1) } // Code below is mostly taken from the OTEL AWS documentation: https://aws-otel.github.io/docs/getting-started/go-sdk/manual-instr @@ -76,7 +79,8 @@ func initOTel(ctx context.Context, traceFraction float64) func(context.Context) // Create and start new OTLP metric exporter metricExporter, err := otlpmetricgrpc.New(ctx, otlpmetricgrpc.WithInsecure(), otlpmetricgrpc.WithEndpoint("localhost:4317")) if err != nil { - klog.Exitf("Failed to create new OTLP metric exporter: %v", err) + slog.Error("Failed to create new OTLP metric exporter", slog.Any("error", err)) + os.Exit(1) } mp := metric.NewMeterProvider( metric.WithReader(metric.NewPeriodicReader(metricExporter)), @@ -88,7 +92,8 @@ func initOTel(ctx context.Context, traceFraction float64) func(context.Context) // Create and start new OTLP trace exporter traceExporter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithInsecure(), otlptracegrpc.WithEndpoint("localhost:4317")) if err != nil { - klog.Exitf("Failed to create new OTLP trace exporter: %v", err) + slog.Error("Failed to create new OTLP trace exporter", slog.Any("error", err)) + os.Exit(1) } idg := xray.NewIDGenerator() diff --git a/cmd/conformance/gcp/main.go b/cmd/conformance/gcp/main.go index 52331918b..10e722802 100644 --- a/cmd/conformance/gcp/main.go +++ b/cmd/conformance/gcp/main.go @@ -34,7 +34,6 @@ import ( "golang.org/x/mod/sumdb/note" "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" - "k8s.io/klog/v2" ) var ( @@ -46,6 +45,7 @@ var ( traceFraction = flag.Float64("trace_fraction", 0.01, "Fraction of open-telemetry span traces to sample") projectID = flag.String("project", "", "GCP Project ID for Cloud Logging traces (optional)") additionalSigners = []string{} + slogLevel = flag.Int("slog_level", 0, "The cut-off threshold for structured logging. Default is 0 (INFO). See https://pkg.go.dev/log/slog#Level for other levels.") ) func init() { @@ -56,12 +56,10 @@ func init() { } func main() { - // We use slogging, but not exclusively. Keep klog until everything is replaced. - klog.InitFlags(nil) flag.Parse() ctx := context.Background() - handler := slog.NewJSONHandler(os.Stderr, nil) + handler := slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: slog.Level(*slogLevel)}) slog.SetDefault(slog.New(logger.NewGCPContextHandler(handler, *projectID))) shutdownOTel := initOTel(ctx, *traceFraction) diff --git a/cmd/conformance/posix/README.md b/cmd/conformance/posix/README.md index 914a51289..0faf61821 100644 --- a/cmd/conformance/posix/README.md +++ b/cmd/conformance/posix/README.md @@ -20,7 +20,6 @@ Then, start the personality: go run ./cmd/conformance/posix \ --storage_dir=${LOG_DIR} \ --listen=:2025 \ - --v=2 ``` ## Add entries to the log diff --git a/cmd/conformance/posix/docker/compose.yaml b/cmd/conformance/posix/docker/compose.yaml index 898ac4885..f0c6e8dc9 100644 --- a/cmd/conformance/posix/docker/compose.yaml +++ b/cmd/conformance/posix/docker/compose.yaml @@ -18,8 +18,7 @@ services: command: [ "--storage_dir=/tmp/tessera-posix-log", "--listen=:2025", - "--alsologtostderr", - "--v=2", + "--slog_level=-4" ] volumes: - /tmp/tessera-posix-log:/tmp/tessera-posix-log diff --git a/cmd/conformance/posix/main.go b/cmd/conformance/posix/main.go index e7eaea000..3eda74db0 100644 --- a/cmd/conformance/posix/main.go +++ b/cmd/conformance/posix/main.go @@ -32,10 +32,11 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" + "log/slog" + "github.com/transparency-dev/tessera" "github.com/transparency-dev/tessera/storage/posix" badger_as "github.com/transparency-dev/tessera/storage/posix/antispam" - "k8s.io/klog/v2" ) var ( @@ -44,6 +45,7 @@ var ( privKeyFile = flag.String("private_key", "", "Location of private key file. If unset, uses the contents of the LOG_PRIVATE_KEY environment variable.") persistentAntispam = flag.Bool("antispam", false, "EXPERIMENTAL: Set to true to enable Badger-based persistent antispam storage") additionalPrivateKeyFiles = []string{} + slogLevel = flag.Int("slog_level", 0, "The cut-off threshold for structured logging. Default is 0 (INFO). See https://pkg.go.dev/log/slog#Level for other levels.") ) func init() { @@ -61,9 +63,9 @@ func addCacheHeaders(value string, fs http.Handler) http.HandlerFunc { } func main() { - klog.InitFlags(nil) flag.Parse() ctx := context.Background() + slog.SetDefault(slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.Level(*slogLevel)}))) // Gather the info needed for reading/writing checkpoints s, a := getSignersOrDie() @@ -71,7 +73,8 @@ func main() { // Create the Tessera POSIX storage, using the directory from the --storage_dir flag driver, err := posix.New(ctx, posix.Config{Path: *storageDir}) if err != nil { - klog.Exitf("Failed to construct storage: %v", err) + slog.Error("Failed to construct storage", slog.Any("error", err)) + os.Exit(1) } var antispam tessera.Antispam // Persistent antispam is currently experimental, so there's no terraform or documentation yet! @@ -79,7 +82,8 @@ func main() { asOpts := badger_as.AntispamOpts{} antispam, err = badger_as.NewAntispam(ctx, filepath.Join(*storageDir, ".state", "antispam"), asOpts) if err != nil { - klog.Exitf("Failed to create new Badger antispam storage: %v", err) + slog.Error("Failed to create new Badger antispam storage", slog.Any("error", err)) + os.Exit(1) } } @@ -90,7 +94,8 @@ func main() { WithBatching(256, time.Second). WithAntispam(tessera.DefaultAntispamInMemorySize, antispam)) if err != nil { - klog.Exit(err) + slog.Error("Failed to create new appender", slog.Any("error", err)) + os.Exit(1) } // Define a handler for /add that accepts POST requests and adds the POST body to the log @@ -107,7 +112,7 @@ func main() { return } if _, err := fmt.Fprintf(w, "%d", idx.Index); err != nil { - klog.Errorf("/add: %v", err) + slog.Error("/add", slog.Any("error", err)) return } }) @@ -118,8 +123,7 @@ func main() { http.Handle("GET /tile/", addCacheHeaders("max-age=31536000, immutable", fs)) http.Handle("GET /entries/", fs) - // TODO(mhutchinson): Change the listen flag to just a port, or fix up this address formatting - klog.Infof("Environment variables useful for accessing this log:\n"+ + fmt.Printf("Environment variables useful for accessing this log:\n"+ "export WRITE_URL=http://localhost%s/ \n"+ "export READ_URL=http://localhost%s/ \n", *listen, *listen) // Run the HTTP server with the single handler and block until this is terminated @@ -130,14 +134,17 @@ func main() { ReadHeaderTimeout: 5 * time.Second, } if err := http2.ConfigureServer(h1s, h2s); err != nil { - klog.Exitf("http2.ConfigureServer: %v", err) + slog.Error("http2.ConfigureServer", slog.Any("error", err)) + os.Exit(1) } if err := h1s.ListenAndServe(); err != nil { if err := shutdown(ctx); err != nil { - klog.Exit(err) + slog.Error("Failed to cleanly shutdown after ListenAndServe", slog.Any("error", err)) + os.Exit(1) } - klog.Exitf("ListenAndServe: %v", err) + slog.Error("ListenAndServe", slog.Any("error", err)) + os.Exit(1) } } @@ -147,11 +154,13 @@ func getSignersOrDie() (note.Signer, []note.Signer) { for _, p := range additionalPrivateKeyFiles { kr, err := getKeyFile(p) if err != nil { - klog.Exitf("Unable to get additional private key from %q: %v", p, err) + slog.Error("Unable to get additional private key", slog.String("file", p), slog.Any("error", err)) + os.Exit(1) } k, err := note.NewSigner(kr) if err != nil { - klog.Exitf("Failed to instantiate signer from %q: %v", p, err) + slog.Error("Failed to instantiate signer", slog.String("file", p), slog.Any("error", err)) + os.Exit(1) } a = append(a, k) } @@ -165,17 +174,20 @@ func getSignerOrDie() note.Signer { if len(*privKeyFile) > 0 { privKey, err = getKeyFile(*privKeyFile) if err != nil { - klog.Exitf("Unable to get private key: %q", err) + slog.Error("Unable to get private key", slog.Any("error", err)) + os.Exit(1) } } else { privKey = os.Getenv("LOG_PRIVATE_KEY") if len(privKey) == 0 { - klog.Exit("Supply private key file path using --private_key or set LOG_PRIVATE_KEY environment variable") + slog.Error("Supply private key file path using --private_key or set LOG_PRIVATE_KEY environment variable") + os.Exit(1) } } s, err := note.NewSigner(privKey) if err != nil { - klog.Exitf("Failed to instantiate signer: %q", err) + slog.Error("Failed to instantiate signer", slog.Any("error", err)) + os.Exit(1) } return s } diff --git a/cmd/examples/posix-oneshot/main.go b/cmd/examples/posix-oneshot/main.go index 5fcb61554..878b301a3 100644 --- a/cmd/examples/posix-oneshot/main.go +++ b/cmd/examples/posix-oneshot/main.go @@ -29,9 +29,10 @@ import ( "golang.org/x/mod/sumdb/note" + "log/slog" + "github.com/transparency-dev/tessera" "github.com/transparency-dev/tessera/storage/posix" - "k8s.io/klog/v2" ) var ( @@ -41,6 +42,7 @@ var ( witnessPolicyFile = flag.String("witness_policy_file", "", "(Optional) Path to the file containing the witness policy in the format describe at https://git.glasklar.is/sigsum/core/sigsum-go/-/blob/main/doc/policy.md") witnessTimeout = flag.Duration("witness_timeout", tessera.DefaultWitnessTimeout, "Maximum time to wait for witness responses.") witnessFailOpen = flag.Bool("witness_fail_open", false, "Still publish a checkpoint even if witness policy could not be met") + slogLevel = flag.Int("slog_level", 0, "The cut-off threshold for structured logging. Default is 0 (INFO). See https://pkg.go.dev/log/slog#Level for other levels.") ) // entryInfo binds the actual bytes to be added as a leaf with a @@ -53,11 +55,11 @@ type entryInfo struct { } func main() { - klog.InitFlags(nil) flag.Parse() ctx := context.Background() + slog.SetDefault(slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.Level(*slogLevel)}))) - klog.V(1).Infof("Initialising driver") + slog.Debug("Initialising driver") // Gather the info needed for reading/writing checkpoints s := getSignerOrDie() @@ -72,10 +74,11 @@ func main() { }, ) if err != nil { - klog.Exitf("Failed to construct storage: %v", err) + slog.Error("Failed to construct storage", slog.Any("error", err)) + os.Exit(1) } - klog.V(1).Infof("Reading entries") + slog.Debug("Reading entries") // Evaluate the glob provided by the --entries flag to determine the files containing leaves filesToAdd := readEntriesOrDie() batchSize := uint(len(filesToAdd)) @@ -84,7 +87,7 @@ func main() { batchSize = 1 } - klog.V(1).Infof("Configuring options") + slog.Debug("Configuring options") opts := tessera.NewAppendOptions(). WithCheckpointSigner(s). // Hint to Tessera the number of entries we're about to add via the batchSize parameter below, @@ -99,11 +102,13 @@ func main() { if *witnessPolicyFile != "" { f, err := os.ReadFile(*witnessPolicyFile) if err != nil { - klog.Exitf("failed to read witness policy file %q: %v", *witnessPolicyFile, err) + slog.Error("Failed to read witness policy file", slog.String("witnesspolicyfile", *witnessPolicyFile), slog.Any("error", err)) + os.Exit(1) } wg, err := tessera.NewWitnessGroupFromPolicy(f) if err != nil { - klog.Exitf("failed to create witness group from policy: %v", err) + slog.Error("Failed to create witness group from policy", slog.Any("error", err)) + os.Exit(1) } wOpts := &tessera.WitnessOptions{ @@ -113,49 +118,53 @@ func main() { opts.WithWitnesses(wg, wOpts) } - klog.V(1).Infof("Creating appender") + slog.Debug("Creating appender") appender, shutdown, r, err := tessera.NewAppender(ctx, driver, opts) if err != nil { - klog.Exit(err) + slog.Error("Failed to create new appender", slog.Any("error", err)) + os.Exit(1) } - klog.V(1).Infof("Creating awaiter") + slog.Debug("Creating awaiter") // We don't want to exit until our entries have been integrated into the tree, so we'll use Tessera's // PublicationAwaiter to help with that. await := tessera.NewPublicationAwaiter(ctx, r.ReadCheckpoint, 100*time.Millisecond) - klog.V(1).Infof("Adding entries") + slog.Debug("Adding entries") // Add each of the leaves in order, and store the futures in a slice // that we will check once all leaves are sent to storage. indexFutures := make([]entryInfo, 0, len(filesToAdd)) for _, fp := range filesToAdd { b, err := os.ReadFile(fp) if err != nil { - klog.Exitf("Failed to read entry file %q: %q", fp, err) + slog.Error("Failed to read entry file", slog.String("fp", fp), slog.Any("error", err)) + os.Exit(1) } f := appender.Add(ctx, tessera.NewEntry(b)) indexFutures = append(indexFutures, entryInfo{name: fp, f: f}) } - klog.V(1).Infof("Awaiting entries") + slog.Debug("Awaiting entries") // Two options to ensure all work is done: // 1) Check each of the futures to ensure that the leaves are sequenced. for _, entry := range indexFutures { seq, _, err := await.Await(ctx, entry.f) if err != nil { - klog.Exitf("Failed to sequence %q: %q", entry.name, err) + slog.Error("Failed to sequence", slog.String("name", entry.name), slog.Any("error", err)) + os.Exit(1) } - klog.Infof("%d: %v", seq.Index, entry.name) + slog.Info("Integrated entry", slog.Uint64("index", seq.Index), slog.String("name", entry.name)) } - klog.V(1).Infof("Futures resolved") - klog.V(1).Infof("Shutting down") + slog.Debug("Futures resolved") + slog.Debug("Shutting down") // 2) shutdown the appender if err := shutdown(ctx); err != nil { - klog.Exitf("Failed to shut down cleanly: %v", err) + slog.Error("Failed to shut down cleanly", slog.Any("error", err)) + os.Exit(1) } - klog.V(1).Infof("Finished") + slog.Debug("Finished") } // Read log private key from file or environment variable @@ -165,17 +174,20 @@ func getSignerOrDie() note.Signer { if len(*privKeyFile) > 0 { privKey, err = getKeyFile(*privKeyFile) if err != nil { - klog.Exitf("Unable to get private key: %q", err) + slog.Error("Unable to get private key", slog.Any("error", err)) + os.Exit(1) } } else { privKey = os.Getenv("LOG_PRIVATE_KEY") if len(privKey) == 0 { - klog.Exit("Supply private key file path using --private_key or set LOG_PRIVATE_KEY environment variable") + slog.Error("Supply private key file path using --private_key or set LOG_PRIVATE_KEY environment variable") + os.Exit(1) } } s, err := note.NewSigner(privKey) if err != nil { - klog.Exitf("Failed to instantiate signer: %q", err) + slog.Error("Failed to instantiate signer", slog.Any("error", err)) + os.Exit(1) } return s } @@ -191,8 +203,9 @@ func getKeyFile(path string) (string, error) { func readEntriesOrDie() []string { toAdd, err := filepath.Glob(*entries) if err != nil { - klog.Exitf("Failed to glob entries %q: %q", *entries, err) + slog.Error("Failed to glob entries", slog.String("entries", *entries), slog.Any("error", err)) + os.Exit(1) } - klog.V(1).Infof("toAdd: %v", toAdd) + slog.Debug("toAdd", slog.Any("files", toAdd)) return toAdd } diff --git a/cmd/experimental/migrate/aws/main.go b/cmd/experimental/migrate/aws/main.go index a46e30c2d..06fa9a23d 100644 --- a/cmd/experimental/migrate/aws/main.go +++ b/cmd/experimental/migrate/aws/main.go @@ -21,6 +21,9 @@ import ( "flag" "fmt" "net/url" + "os" + + "log/slog" aaws "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/credentials" @@ -30,7 +33,6 @@ import ( "github.com/transparency-dev/tessera/client" "github.com/transparency-dev/tessera/internal/parse" "github.com/transparency-dev/tessera/storage/aws" - "k8s.io/klog/v2" ) var ( @@ -48,50 +50,59 @@ var ( sourceURL = flag.String("source_url", "", "Base URL for the source log.") numWorkers = flag.Uint("num_workers", 30, "Number of migration worker goroutines.") + slogLevel = flag.Int("slog_level", 0, "The cut-off threshold for structured logging. Default is 0 (INFO). See https://pkg.go.dev/log/slog#Level for other levels.") ) func main() { - klog.InitFlags(nil) flag.Parse() ctx := context.Background() + slog.SetDefault(slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.Level(*slogLevel)}))) if *sourceURL == "" { - klog.Exit("Missing parameter: --source_url") + slog.Error("Missing parameter: --source_url") + os.Exit(1) } srcURL, err := url.Parse(*sourceURL) if err != nil { - klog.Exitf("Invalid --source_url %q: %v", *sourceURL, err) + slog.Error("Invalid --source_url", slog.String("param", *sourceURL), slog.Any("error", err)) + os.Exit(1) } src, err := client.NewHTTPFetcher(srcURL, nil) if err != nil { - klog.Exitf("Failed to create HTTP fetcher: %v", err) + slog.Error("Failed to create HTTP fetcher", slog.Any("error", err)) + os.Exit(1) } sourceCP, err := src.ReadCheckpoint(ctx) if err != nil { - klog.Exitf("fetch initial source checkpoint: %v", err) + slog.Error("fetch initial source checkpoint", slog.Any("error", err)) + os.Exit(1) } // TODO(mhutchinson): parse this safely. _, sourceSize, sourceRoot, err := parse.CheckpointUnsafe(sourceCP) if err != nil { - klog.Exitf("Failed to parse checkpoint: %v", err) + slog.Error("Failed to parse checkpoint", slog.Any("error", err)) + os.Exit(1) } // Create our Tessera storage backend: awsCfg := storageConfigFromFlags() driver, err := aws.New(ctx, awsCfg) if err != nil { - klog.Exitf("Failed to create new AWS storage: %v", err) + slog.Error("Failed to create new AWS storage", slog.Any("error", err)) + os.Exit(1) } opts := tessera.NewMigrationOptions() m, err := tessera.NewMigrationTarget(ctx, driver, opts) if err != nil { - klog.Exitf("Failed to create MigrationTarget: %v", err) + slog.Error("Failed to create MigrationTarget", slog.Any("error", err)) + os.Exit(1) } - klog.Infof("Starting Migrate() with workers=%d, sourceSize=%d, migrating from %q", *numWorkers, sourceSize, *sourceURL) + slog.Info("Starting Migrate() with workers=, sourceSize=, migrating from", slog.Any("numworkers", *numWorkers), slog.Uint64("sourcesize", sourceSize), slog.String("sourceurl", *sourceURL)) if err := m.Migrate(context.Background(), *numWorkers, sourceSize, sourceRoot, src.ReadEntryBundle); err != nil { - klog.Exitf("Migrate failed: %v", err) + slog.Error("Migrate failed", slog.Any("error", err)) + os.Exit(1) } } @@ -99,23 +110,29 @@ func main() { // provided via flags. func storageConfigFromFlags() aws.Config { if *bucket == "" { - klog.Exit("--bucket must be set") + slog.Error("--bucket must be set") + os.Exit(1) } if *dbName == "" { - klog.Exit("--db_name must be set") + slog.Error("--db_name must be set") + os.Exit(1) } if *dbHost == "" { - klog.Exit("--db_host must be set") + slog.Error("--db_host must be set") + os.Exit(1) } if *dbPort == 0 { - klog.Exit("--db_port must be set") + slog.Error("--db_port must be set") + os.Exit(1) } if *dbUser == "" { - klog.Exit("--db_user must be set") + slog.Error("--db_user must be set") + os.Exit(1) } // Empty passord isn't an option with AuroraDB MySQL. if *dbPassword == "" { - klog.Exit("--db_password must be set") + slog.Error("--db_password must be set") + os.Exit(1) } c := mysql.Config{ diff --git a/cmd/experimental/migrate/gcp/main.go b/cmd/experimental/migrate/gcp/main.go index 18fbf63c7..65598e995 100644 --- a/cmd/experimental/migrate/gcp/main.go +++ b/cmd/experimental/migrate/gcp/main.go @@ -22,14 +22,16 @@ import ( "flag" "fmt" "net/url" + "os" "strconv" "strings" + "log/slog" + "github.com/transparency-dev/tessera" "github.com/transparency-dev/tessera/client" "github.com/transparency-dev/tessera/storage/gcp" gcp_as "github.com/transparency-dev/tessera/storage/gcp/antispam" - "k8s.io/klog/v2" ) var ( @@ -39,40 +41,47 @@ var ( sourceURL = flag.String("source_url", "", "Base URL for the source log.") numWorkers = flag.Uint("num_workers", 30, "Number of migration worker goroutines.") persistentAntispam = flag.Bool("antispam", false, "EXPERIMENTAL: Set to true to enable GCP-based persistent antispam storage") + slogLevel = flag.Int("slog_level", 0, "The cut-off threshold for structured logging. Default is 0 (INFO). See https://pkg.go.dev/log/slog#Level for other levels.") ) func main() { - klog.InitFlags(nil) flag.Parse() ctx := context.Background() + slog.SetDefault(slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.Level(*slogLevel)}))) srcURL, err := url.Parse(*sourceURL) if err != nil { - klog.Exitf("Invalid --source_url %q: %v", *sourceURL, err) + slog.Error("Invalid --source_url", slog.String("param", *sourceURL), slog.Any("error", err)) + os.Exit(1) } src, err := client.NewHTTPFetcher(srcURL, nil) if err != nil { - klog.Exitf("Failed to create HTTP fetcher: %v", err) + slog.Error("Failed to create HTTP fetcher", slog.Any("error", err)) + os.Exit(1) } sourceCP, err := src.ReadCheckpoint(ctx) if err != nil { - klog.Exitf("fetch initial source checkpoint: %v", err) + slog.Error("fetch initial source checkpoint", slog.Any("error", err)) + os.Exit(1) } bits := strings.Split(string(sourceCP), "\n") sourceSize, err := strconv.ParseUint(bits[1], 10, 64) if err != nil { - klog.Exitf("invalid CP size %q: %v", bits[1], err) + slog.Error("invalid CP size", slog.String("size", bits[1]), slog.Any("error", err)) + os.Exit(1) } sourceRoot, err := base64.StdEncoding.DecodeString(bits[2]) if err != nil { - klog.Exitf("invalid checkpoint roothash %q: %v", bits[2], err) + slog.Error("invalid checkpoint roothash", slog.String("hash", bits[2]), slog.Any("error", err)) + os.Exit(1) } // Create our Tessera storage backend: gcpCfg := storageConfigFromFlags() driver, err := gcp.New(ctx, gcpCfg) if err != nil { - klog.Exitf("Failed to create new GCP storage driver: %v", err) + slog.Error("Failed to create new GCP storage driver", slog.Any("error", err)) + os.Exit(1) } opts := tessera.NewMigrationOptions() @@ -85,18 +94,21 @@ func main() { } antispam, err = gcp_as.NewAntispam(ctx, fmt.Sprintf("%s-antispam", *spanner), asOpts) if err != nil { - klog.Exitf("Failed to create new GCP antispam storage: %v", err) + slog.Error("Failed to create new GCP antispam storage", slog.Any("error", err)) + os.Exit(1) } opts.WithAntispam(antispam) } m, err := tessera.NewMigrationTarget(ctx, driver, opts) if err != nil { - klog.Exitf("Failed to create MigrationTarget: %v", err) + slog.Error("Failed to create MigrationTarget", slog.Any("error", err)) + os.Exit(1) } if err := m.Migrate(context.Background(), *numWorkers, sourceSize, sourceRoot, src.ReadEntryBundle); err != nil { - klog.Exitf("Migrate failed: %v", err) + slog.Error("Migrate failed", slog.Any("error", err)) + os.Exit(1) } } @@ -104,10 +116,12 @@ func main() { // provided via flags. func storageConfigFromFlags() gcp.Config { if *bucket == "" { - klog.Exit("--bucket must be set") + slog.Error("--bucket must be set") + os.Exit(1) } if *spanner == "" { - klog.Exit("--spanner must be set") + slog.Error("--spanner must be set") + os.Exit(1) } return gcp.Config{ Bucket: *bucket, diff --git a/cmd/experimental/migrate/posix/main.go b/cmd/experimental/migrate/posix/main.go index eb1059011..af49af18f 100644 --- a/cmd/experimental/migrate/posix/main.go +++ b/cmd/experimental/migrate/posix/main.go @@ -21,59 +21,70 @@ import ( "encoding/base64" "flag" "net/url" + "os" "strconv" "strings" + "log/slog" + "github.com/transparency-dev/tessera" "github.com/transparency-dev/tessera/client" "github.com/transparency-dev/tessera/storage/posix" - "k8s.io/klog/v2" ) var ( storageDir = flag.String("storage_dir", "", "Root directory to store log data.") sourceURL = flag.String("source_url", "", "Base URL for the source log.") numWorkers = flag.Uint("num_workers", 30, "Number of migration worker goroutines.") + slogLevel = flag.Int("slog_level", 0, "The cut-off threshold for structured logging. Default is 0 (INFO). See https://pkg.go.dev/log/slog#Level for other levels.") ) func main() { - klog.InitFlags(nil) flag.Parse() ctx := context.Background() + slog.SetDefault(slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.Level(*slogLevel)}))) srcURL, err := url.Parse(*sourceURL) if err != nil { - klog.Exitf("Invalid --source_url %q: %v", *sourceURL, err) + slog.Error("Invalid --source_url", slog.String("param", *sourceURL), slog.Any("error", err)) + os.Exit(1) } src, err := client.NewHTTPFetcher(srcURL, nil) if err != nil { - klog.Exitf("Failed to create HTTP fetcher: %v", err) + slog.Error("Failed to create HTTP fetcher", slog.Any("error", err)) + os.Exit(1) } sourceCP, err := src.ReadCheckpoint(ctx) if err != nil { - klog.Exitf("fetch initial source checkpoint: %v", err) + slog.Error("fetch initial source checkpoint", slog.Any("error", err)) + os.Exit(1) } bits := strings.Split(string(sourceCP), "\n") sourceSize, err := strconv.ParseUint(bits[1], 10, 64) if err != nil { - klog.Exitf("invalid CP size %q: %v", bits[1], err) + slog.Error("invalid CP size", slog.String("size", bits[1]), slog.Any("error", err)) + os.Exit(1) } sourceRoot, err := base64.StdEncoding.DecodeString(bits[2]) if err != nil { - klog.Exitf("invalid checkpoint roothash %q: %v", bits[2], err) + slog.Error("invalid checkpoint roothash", slog.String("hash", bits[2]), slog.Any("error", err)) + os.Exit(1) } driver, err := posix.New(ctx, posix.Config{Path: *storageDir}) if err != nil { - klog.Exitf("Failed to create new POSIX storage driver: %v", err) + slog.Error("Failed to create new POSIX storage driver", slog.Any("error", err)) + os.Exit(1) } // Create our Tessera migration target instance m, err := tessera.NewMigrationTarget(ctx, driver, tessera.NewMigrationOptions()) if err != nil { - klog.Exitf("Failed to create new POSIX storage: %v", err) + slog.Error("Failed to create new POSIX storage", slog.Any("error", err)) + os.Exit(1) } if err := m.Migrate(context.Background(), *numWorkers, sourceSize, sourceRoot, src.ReadEntryBundle); err != nil { - klog.Exitf("Migrate failed: %v", err) + slog.Error("Migrate failed", slog.Any("error", err)) + os.Exit(1) } } diff --git a/cmd/experimental/mirror/posix/main.go b/cmd/experimental/mirror/posix/main.go index 447d84cfe..c40ee1950 100644 --- a/cmd/experimental/mirror/posix/main.go +++ b/cmd/experimental/mirror/posix/main.go @@ -24,31 +24,34 @@ import ( "path/filepath" "time" + "log/slog" + "github.com/transparency-dev/tessera/api/layout" "github.com/transparency-dev/tessera/client" mirror "github.com/transparency-dev/tessera/cmd/experimental/mirror/internal" - "k8s.io/klog/v2" ) var ( storageDir = flag.String("storage_dir", "", "Root directory to store log data.") sourceURL = flag.String("source_url", "", "Base URL for the source log.") numWorkers = flag.Uint("num_workers", 30, "Number of migration worker goroutines.") + slogLevel = flag.Int("slog_level", 0, "The cut-off threshold for structured logging. Default is 0 (INFO). See https://pkg.go.dev/log/slog#Level for other levels.") ) func main() { - klog.InitFlags(nil) - klog.CopyStandardLogTo("INFO") flag.Parse() ctx := context.Background() + slog.SetDefault(slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.Level(*slogLevel)}))) srcURL, err := url.Parse(*sourceURL) if err != nil { - klog.Exitf("Invalid --source_url %q: %v", *sourceURL, err) + slog.Error("Invalid --source_url", slog.String("param", *sourceURL), slog.Any("error", err)) + os.Exit(1) } src, err := client.NewHTTPFetcher(srcURL, nil) if err != nil { - klog.Exitf("Failed to create HTTP fetcher: %v", err) + slog.Error("Failed to create HTTP fetcher", slog.Any("error", err)) + os.Exit(1) } m := &mirror.Mirror{ @@ -71,11 +74,12 @@ func main() { }() if err := m.Run(ctx); err != nil { - klog.Exitf("Failed to mirror log: %v", err) + slog.Error("Failed to mirror log", slog.Any("error", err)) + os.Exit(1) } printProgress(m.Progress) - klog.Info("Log mirrored successfully.") + slog.Info("Log mirrored successfully.") } func printProgress(f func() (uint64, uint64)) { @@ -85,7 +89,7 @@ func printProgress(f func() (uint64, uint64)) { if total == done && done == 0 { p = 100.0 } - klog.Infof("Progress: %d of %d resources (%0.2f%%)", done, total, p) + slog.Info("Progress", slog.Uint64("done", done), slog.Uint64("total", total), slog.Float64("percent", p)) } type posixTarget struct { diff --git a/cmd/fsck/internal/tui/app.go b/cmd/fsck/internal/tui/app.go index baa6127f0..79f421518 100644 --- a/cmd/fsck/internal/tui/app.go +++ b/cmd/fsck/internal/tui/app.go @@ -18,13 +18,12 @@ package tui import ( "bufio" "context" - "flag" "io" + "log/slog" "time" "github.com/transparency-dev/tessera/cmd/fsck/tui" "github.com/transparency-dev/tessera/fsck" - "k8s.io/klog/v2" tea "github.com/charmbracelet/bubbletea" ) @@ -35,10 +34,8 @@ func RunApp(ctx context.Context, f *fsck.Fsck) error { p := tea.NewProgram(m) // Redirect logging so as to appear above the UI - _ = flag.Set("logtostderr", "false") - _ = flag.Set("alsologtostderr", "false") r, w := io.Pipe() - klog.SetOutput(w) + slog.SetDefault(slog.New(slog.NewTextHandler(w, &slog.HandlerOptions{Level: slog.LevelInfo}))) go func() { s := bufio.NewScanner(r) for s.Scan() { diff --git a/cmd/fsck/main.go b/cmd/fsck/main.go index bc4f8b8dc..ab4e1abc5 100644 --- a/cmd/fsck/main.go +++ b/cmd/fsck/main.go @@ -23,6 +23,8 @@ import ( "os" "time" + "log/slog" + f_note "github.com/transparency-dev/formats/note" "github.com/transparency-dev/merkle/rfc6962" "github.com/transparency-dev/tessera/api" @@ -31,7 +33,6 @@ import ( "github.com/transparency-dev/tessera/fsck" "golang.org/x/mod/sumdb/note" "golang.org/x/time/rate" - "k8s.io/klog/v2" ) var ( @@ -42,15 +43,17 @@ var ( pubKey = flag.String("public_key", "", "Path to a file containing the log's public key") qps = flag.Float64("qps", 0, "Max QPS to send to the target log. Set to zero for unlimited") ui = flag.Bool("ui", true, "Set to true to use a TUI to display progress, or false for logging") + slogLevel = flag.Int("slog_level", 0, "The cut-off threshold for structured logging. Default is 0 (INFO). See https://pkg.go.dev/log/slog#Level for other levels.") ) func main() { - klog.InitFlags(nil) flag.Parse() + slog.SetDefault(slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.Level(*slogLevel)}))) ctx, cancel := context.WithCancel(context.Background()) logURL, err := url.Parse(*storageURL) if err != nil { - klog.Exitf("Invalid --storage_url %q: %v", *storageURL, err) + slog.Error("Invalid --storage_url", slog.String("param", *storageURL), slog.Any("error", err)) + os.Exit(1) } var src fsck.Fetcher @@ -61,7 +64,8 @@ func main() { } else { httpSrc, err := client.NewHTTPFetcher(logURL, nil) if err != nil { - klog.Exitf("Failed to create HTTP fetcher: %v", err) + slog.Error("Failed to create HTTP fetcher", slog.Any("error", err)) + os.Exit(1) } if *bearerToken != "" { httpSrc.SetAuthorizationHeader(fmt.Sprintf("Bearer %s", *bearerToken)) @@ -88,9 +92,9 @@ func main() { // Only log the error if we're using the TUI, otherwise it will be double logged in headless mode. if err != nil && *ui { - klog.Errorf("fsck failed: %v", err) + slog.Error("fsck failed", slog.Any("error", err)) } - klog.V(1).Infof("Completed ranges:\n%s", f.Status()) + slog.Debug("Completed ranges", slog.Any("status", f.Status())) checkResult <- err close(checkResult) @@ -98,7 +102,7 @@ func main() { if *ui { if err := tui.RunApp(ctx, f); err != nil { - klog.Errorf("App exited: %v", err) + slog.Error("App exited", slog.Any("error", err)) } // User may have exited the UI, cancel the context to signal to everything else. cancel() @@ -108,11 +112,12 @@ func main() { case err := <-checkResult: cancel() if err != nil { - klog.Exitf("fsck failed: %v", err) + slog.Error("fsck failed", slog.Any("error", err)) + os.Exit(1) } return case <-time.After(time.Second): - klog.V(1).Infof("Ranges:\n%s", f.Status()) + slog.Debug("Ranges", slog.Any("status", f.Status())) } } } @@ -134,15 +139,18 @@ func defaultMerkleLeafHasher(bundle []byte) ([][]byte, error) { func verifierFromFlags() note.Verifier { if *pubKey == "" { - klog.Exit("Must provide the --public_key flag") + slog.Error("Must provide the --public_key flag") + os.Exit(1) } b, err := os.ReadFile(*pubKey) if err != nil { - klog.Exitf("Failed to read verifier from %q: %v", *pubKey, err) + slog.Error("Failed to read verifier from", slog.String("pubkey", *pubKey), slog.Any("error", err)) + os.Exit(1) } v, err := f_note.NewVerifier(string(b)) if err != nil { - klog.Exitf("Invalid verifier in %q: %v", *pubKey, err) + slog.Error("Invalid verifier in", slog.String("pubkey", *pubKey), slog.Any("error", err)) + os.Exit(1) } return v } diff --git a/deployment/live/aws/codelab/README.md b/deployment/live/aws/codelab/README.md index d77c156d8..6975e537c 100644 --- a/deployment/live/aws/codelab/README.md +++ b/deployment/live/aws/codelab/README.md @@ -144,7 +144,6 @@ endpoint. --db_name=tessera \ --db_host=$LOG_RDS_DB \ --signer=$(cat /home/ec2-user/tessera-keys/$LOG_NAME.sec) \ - --v=3 ``` 1. 🎉 **Congratulations** 🎉 @@ -191,7 +190,6 @@ The instructions to do this for the codelab are largely the same, except: --db_name=tessera \ --db_host=$LOG_RDS_DB \ --signer=$(cat /home/ec2-user/tessera-keys/$LOG_NAME.sec) \ - --v=3 \ --antispam=true \ --antispam_db_name=antispam_db ``` diff --git a/deployment/modules/aws/conformance/main.tf b/deployment/modules/aws/conformance/main.tf index 4c82dc7dd..32a9e31b6 100644 --- a/deployment/modules/aws/conformance/main.tf +++ b/deployment/modules/aws/conformance/main.tf @@ -161,7 +161,7 @@ resource "aws_ecs_task_definition" "conformance" { "--db_host=${module.storage.log_rds_db.endpoint}", "--antispam=${var.antispam}", "--antispam_db_name=${var.antispam_db_name}", - "-v=2" + "--slog_level=-4" ], "logConfiguration" : { "logDriver" : "awslogs", @@ -266,9 +266,8 @@ resource "aws_ecs_task_definition" "hammer" { "--log_public_key=${var.verifier}", "--log_url=https://${module.storage.log_bucket.bucket_regional_domain_name}", "--write_log_url=http://${aws_service_discovery_service.conformance_discovery.name}.${aws_service_discovery_private_dns_namespace.internal.name}:${local.port}", - "-v=3", + "--slog_level=-4", "--show_ui=false", - "--logtostderr", "--num_writers=1100", "--max_write_ops=1500", "--leaf_min_size=1024", diff --git a/deployment/modules/gcp/cloudbuild/main.tf b/deployment/modules/gcp/cloudbuild/main.tf index 0c354bbd5..941818910 100644 --- a/deployment/modules/gcp/cloudbuild/main.tf +++ b/deployment/modules/gcp/cloudbuild/main.tf @@ -116,7 +116,7 @@ resource "google_cloudbuild_trigger" "docker" { ## them in files under /workspace. These are needed for later steps. step { id = "terraform_outputs" - name = "alpine/terragrunt:1.9.8" + name = "alpine/terragrunt:1.9.8" script = <= goal { elapsed := time.Since(startTime) - klog.Infof("Reached tree size goal of %d after %s; exiting", goal, elapsed) + slog.Info("Reached tree size goal; exiting", slog.Uint64("size", goal), slog.Duration("elapsed", elapsed)) cancel() return } @@ -167,13 +169,13 @@ func main() { } if *maxRunTime > 0 { go func() { - klog.Infof("Will fail after %s", *maxRunTime) + slog.Info("Configured max runtime", slog.Duration("duration", *maxRunTime)) for { select { case <-ctx.Done(): return case <-time.After(*maxRunTime): - klog.Infof("Max runtime reached; exiting") + slog.Info("Max runtime reached; exiting") exitCode = 1 cancel() return @@ -241,14 +243,16 @@ func mustCreateReaders(us []string) loadtest.LogReader { } rURL, err := url.Parse(u) if err != nil { - klog.Exitf("Invalid log reader URL %q: %v", u, err) + slog.Error("Invalid log reader URL", slog.String("u", u), slog.Any("error", err)) + os.Exit(1) } switch rURL.Scheme { case "http", "https": c, err := client.NewHTTPFetcher(rURL, hc) if err != nil { - klog.Exitf("Failed to create HTTP fetcher for %q: %v", u, err) + slog.Error("Failed to create HTTP fetcher", slog.String("u", u), slog.Any("error", err)) + os.Exit(1) } if *bearerToken != "" { c.SetAuthorizationHeader(fmt.Sprintf("Bearer %s", *bearerToken)) @@ -257,7 +261,8 @@ func mustCreateReaders(us []string) loadtest.LogReader { case "file": r = append(r, client.FileFetcher{Root: rURL.Path}) default: - klog.Exitf("Unsupported scheme %s on log URL", rURL.Scheme) + slog.Error("Unsupported scheme on log URL", slog.String("scheme", rURL.Scheme)) + os.Exit(1) } } return loadtest.NewRoundRobinReader(r) @@ -272,7 +277,8 @@ func mustCreateWriters(us []string) loadtest.LeafWriter { u += "add" wURL, err := url.Parse(u) if err != nil { - klog.Exitf("Invalid log writer URL %q: %v", u, err) + slog.Error("Invalid log writer URL", slog.String("u", u), slog.Any("error", err)) + os.Exit(1) } w = append(w, httpWriter(wURL, hc, *bearerTokenWrite)) } @@ -281,7 +287,7 @@ func mustCreateWriters(us []string) loadtest.LeafWriter { func httpWriter(u *url.URL, hc *http.Client, bearerToken string) loadtest.LeafWriter { cTrace := &httptrace.ClientTrace{ - GotConn: func(info httptrace.GotConnInfo) { klog.Infof("connection established %#v", info) }, + GotConn: func(info httptrace.GotConnInfo) { slog.Info("connection established %#v") }, } return func(ctx context.Context, newLeaf []byte) (uint64, error) { req, err := http.NewRequest(http.MethodPost, u.String(), bytes.NewReader(newLeaf)) @@ -292,7 +298,7 @@ func httpWriter(u *url.URL, hc *http.Client, bearerToken string) loadtest.LeafWr req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", bearerToken)) } reqCtx := req.Context() - if klog.V(2).Enabled() { + if slog.Default().Enabled(ctx, slog.LevelDebug) { reqCtx = httptrace.WithClientTrace(req.Context(), cTrace) } resp, err := hc.Do(req.WithContext(reqCtx)) diff --git a/internal/hammer/loadtest/analysis.go b/internal/hammer/loadtest/analysis.go index c5d0b94f6..e6a5b6bb1 100644 --- a/internal/hammer/loadtest/analysis.go +++ b/internal/hammer/loadtest/analysis.go @@ -19,8 +19,9 @@ import ( "errors" "time" + "log/slog" + movingaverage "github.com/RobinUS2/golang-moving-average" - "k8s.io/klog/v2" ) func NewHammerAnalyser(treeSizeFn func() uint64) *HammerAnalyser { @@ -117,11 +118,11 @@ func (a *HammerAnalyser) errorLoop(ctx context.Context) { return case <-tick.C: if pbCount > 0 { - klog.Warningf("%d requests received pushback from log", pbCount) + slog.Warn("received pushback from log", slog.Int("requests", pbCount)) pbCount = 0 } if lastErrCount > 0 { - klog.Warningf("(%d x) %s", lastErrCount, lastErr) + slog.Warn("errors", slog.Int("count", lastErrCount), slog.String("lasterror", lastErr)) lastErrCount = 0 } case err := <-a.ErrChan: @@ -131,7 +132,7 @@ func (a *HammerAnalyser) errorLoop(ctx context.Context) { } es := err.Error() if es != lastErr && lastErrCount > 0 { - klog.Warningf("(%d x) %s", lastErrCount, lastErr) + slog.Warn("errors", slog.Int("count", lastErrCount), slog.String("lasterror", lastErr)) lastErr = es lastErrCount = 0 continue diff --git a/internal/hammer/loadtest/hammer.go b/internal/hammer/loadtest/hammer.go index 033beb9cd..45d357199 100644 --- a/internal/hammer/loadtest/hammer.go +++ b/internal/hammer/loadtest/hammer.go @@ -17,11 +17,13 @@ package loadtest import ( "context" "errors" + "fmt" + "os" "time" "github.com/transparency-dev/tessera/client" - "k8s.io/klog/v2" + "log/slog" ) type HammerOpts struct { @@ -99,17 +101,18 @@ func (h *Hammer) updateCheckpointLoop(ctx context.Context) { size := h.tracker.Latest().Size _, _, _, err := h.tracker.Update(ctx) if err != nil { - klog.Warning(err) + slog.Warn("Failed to update LogStateTracker", slog.Any("error", err)) inconsistentErr := client.ErrInconsistency{} if errors.As(err, &inconsistentErr) { - klog.Fatalf("Last Good Checkpoint:\n%s\n\nFirst Bad Checkpoint:\n%s\n\n%v", string(inconsistentErr.SmallerRaw), string(inconsistentErr.LargerRaw), inconsistentErr) + slog.Error(fmt.Sprintf("Last Good Checkpoint:\n%s\n\nFirst Bad Checkpoint:\n%s\n\n%v", string(inconsistentErr.SmallerRaw), string(inconsistentErr.LargerRaw), inconsistentErr), slog.Any("error", inconsistentErr)) + os.Exit(1) } } newSize := h.tracker.Latest().Size if newSize > size { - klog.V(1).Infof("Updated checkpoint from %d to %d", size, newSize) + slog.Debug("Updated checkpoint", slog.Uint64("from", size), slog.Uint64("to", newSize)) } else { - klog.V(2).Infof("Checkpoint size unchanged: %d", newSize) + slog.Debug("Checkpoint size unchanged", slog.Uint64("size", newSize)) } } } diff --git a/internal/hammer/loadtest/tui.go b/internal/hammer/loadtest/tui.go index 34bcdd43a..7b4970453 100644 --- a/internal/hammer/loadtest/tui.go +++ b/internal/hammer/loadtest/tui.go @@ -16,15 +16,15 @@ package loadtest import ( "context" - "flag" "fmt" "strings" "time" + "log/slog" + movingaverage "github.com/RobinUS2/golang-moving-average" "github.com/gdamore/tcell/v2" "github.com/rivo/tview" - "k8s.io/klog/v2" ) type tuiController struct { @@ -68,38 +68,29 @@ func NewController(h *Hammer, a *HammerAnalyser) *tuiController { } func (c *tuiController) Run(ctx context.Context) { - // Redirect logs to the view - if err := flag.Set("logtostderr", "false"); err != nil { - klog.Exitf("Failed to set flag: %v", err) - } - if err := flag.Set("alsologtostderr", "false"); err != nil { - klog.Exitf("Failed to set flag: %v", err) - } - klog.SetOutput(c.logView) - go c.updateStatsLoop(ctx, 500*time.Millisecond) c.app.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey { switch event.Rune() { case '+': - klog.Info("Increasing the read operations per second") + slog.Info("Increasing the read operations per second") c.hammer.readThrottle.Increase() case '-': - klog.Info("Decreasing the read operations per second") + slog.Info("Decreasing the read operations per second") c.hammer.readThrottle.Decrease() case '>': - klog.Info("Increasing the write operations per second") + slog.Info("Increasing the write operations per second") c.hammer.writeThrottle.Increase() case '<': - klog.Info("Decreasing the write operations per second") + slog.Info("Decreasing the write operations per second") c.hammer.writeThrottle.Decrease() case 'w': - klog.Info("Increasing the number of workers") + slog.Info("Increasing the number of workers") c.hammer.randomReaders.Grow(ctx) c.hammer.fullReaders.Grow(ctx) c.hammer.writers.Grow(ctx) case 'W': - klog.Info("Decreasing the number of workers") + slog.Info("Decreasing the number of workers") c.hammer.randomReaders.Shrink(ctx) c.hammer.fullReaders.Shrink(ctx) c.hammer.writers.Shrink(ctx) diff --git a/internal/hammer/loadtest/workers.go b/internal/hammer/loadtest/workers.go index 7ba1646d8..a2af807c4 100644 --- a/internal/hammer/loadtest/workers.go +++ b/internal/hammer/loadtest/workers.go @@ -22,9 +22,10 @@ import ( "math/rand/v2" "time" + "log/slog" + "github.com/transparency-dev/tessera/api/layout" "github.com/transparency-dev/tessera/client" - "k8s.io/klog/v2" ) // LeafWriter is the signature of a function which can write arbitrary data to a log. @@ -85,7 +86,7 @@ func (r *LeafReader) Run(ctx context.Context) { if i >= size { continue } - klog.V(2).Infof("LeafReader getting %d", i) + slog.Debug("LeafReader: getting index", slog.Uint64("i", i)) _, err := r.getLeaf(ctx, i, size) if err != nil { r.errChan <- fmt.Errorf("failed to get leaf %d: %v", i, err) @@ -99,7 +100,7 @@ func (r *LeafReader) getLeaf(ctx context.Context, i uint64, logSize uint64) ([]b return nil, fmt.Errorf("requested leaf %d >= log size %d", i, logSize) } if cached, _ := r.c.get(i); cached != nil { - klog.V(2).Infof("Using cached result for index %d", i) + slog.Debug("Using cached result for index", slog.Uint64("i", i)) return cached, nil } @@ -220,7 +221,7 @@ func (w *LogWriter) Run(ctx context.Context) { case w.leafChan <- lt: default: } - klog.V(2).Infof("Wrote leaf at index %d", index) + slog.Debug("Wrote leaf at index", slog.Uint64("i", index)) newLeaf = w.gen() } } diff --git a/internal/witness/witness.go b/internal/witness/witness.go index 423c865ae..d9ff617e5 100644 --- a/internal/witness/witness.go +++ b/internal/witness/witness.go @@ -24,18 +24,20 @@ import ( "fmt" "io" "net/http" + "os" "strconv" "strings" "sync" "time" + "log/slog" + "github.com/transparency-dev/tessera/client" "github.com/transparency-dev/tessera/internal/otel" "github.com/transparency-dev/tessera/internal/parse" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" "golang.org/x/mod/sumdb/note" - "k8s.io/klog/v2" ) type contextKey int @@ -63,7 +65,8 @@ func init() { metric.WithDescription("Number of requests to the witnesses' submit endpoint"), metric.WithUnit("{call}")) if err != nil { - klog.Exitf("Failed to create witnessReqsTotal metric: %v", err) + slog.Error("Failed to create witnessReqsTotal metric", slog.Any("error", err)) + os.Exit(1) } witnessReqHistogram, err = meter.Int64Histogram( "tessera.witness.duration", @@ -71,14 +74,16 @@ func init() { metric.WithUnit("ms"), metric.WithExplicitBucketBoundaries(witnessHistogramBuckets...)) if err != nil { - klog.Exitf("Failed to create witnessReqHistogram metric: %v", err) + slog.Error("Failed to create witnessReqHistogram metric", slog.Any("error", err)) + os.Exit(1) } witnessRespsTotal, err = meter.Int64Counter( "tessera.witness.response", metric.WithDescription("Number of responses from the witnesses' submit endpoint"), metric.WithUnit("{call}")) if err != nil { - klog.Exitf("Failed to create witnessRespsTotal metric: %v", err) + slog.Error("Failed to create witnessRespsTotal metric", slog.Any("error", err)) + os.Exit(1) } } @@ -357,7 +362,7 @@ func (w *witness) update(ctx context.Context, cp []byte, size uint64, fetchProof return nil, fmt.Errorf("witness at %q replied with x.tlog.size %d, larger than log size %d", w.url, newWitSize, size) } - klog.Infof("Witness at %q replied with x.tlog.size %d != our hint %d, retrying", w.url, newWitSize, w.size) + slog.Info("Witness replied with x.tlog.size different than our hint. Retrying.", slog.String("url", w.url), slog.Uint64("reply", newWitSize), slog.Uint64("hinted", w.size)) w.size = newWitSize // Witnesses could cause this recursion to go on for longer than expected if they keep triggering this case. // This is why we pass the context with an incrementing value to detect this unlikely case. diff --git a/migrate.go b/migrate.go index b6870c6c3..0fe09709a 100644 --- a/migrate.go +++ b/migrate.go @@ -19,11 +19,12 @@ import ( "fmt" "sync/atomic" + "log/slog" + "github.com/cenkalti/backoff/v5" "github.com/transparency-dev/tessera/api/layout" "github.com/transparency-dev/tessera/client" "golang.org/x/sync/errgroup" - "k8s.io/klog/v2" ) type setEntryBundleFunc func(ctx context.Context, index uint64, partial uint8, bundle []byte) error @@ -61,7 +62,7 @@ type bundle struct { // // A call to this function will block until either the copying is done, or an error has occurred. func (c *copier) Copy(ctx context.Context, fromSize uint64, sourceSize uint64) error { - klog.Infof("Starting copy from %d to source size %d", fromSize, sourceSize) + slog.Info("Starting copy", slog.Uint64("from", fromSize), slog.Uint64("to", sourceSize)) if fromSize > sourceSize { return fmt.Errorf("from size %d > source size %d", fromSize, sourceSize) @@ -91,7 +92,7 @@ func (c *copier) BundlesCopied() uint64 { // populateWork sends entries to the `todo` work channel. // Each entry corresponds to an individual entryBundle which needs to be copied. func (m *copier) populateWork(from, treeSize uint64) { - klog.Infof("Spans for entry range [%d, %d)", from, treeSize) + slog.Info("Spans for entry range", slog.Uint64("from", from), slog.Uint64("to", treeSize)) defer close(m.todo) for ri := range layout.Range(from, treeSize-from, treeSize) { @@ -109,12 +110,12 @@ func (m *copier) worker(ctx context.Context) error { d, err := m.getEntryBundle(ctx, b.Index, uint8(b.Partial)) if err != nil { wErr := fmt.Errorf("failed to fetch entrybundle %d (p=%d): %v", b.Index, b.Partial, err) - klog.Infof("%v", wErr) + slog.Info("Fetch error", slog.Any("error", wErr)) return 0, wErr } if err := m.setEntryBundle(ctx, b.Index, b.Partial, d); err != nil { wErr := fmt.Errorf("failed to store entrybundle %d (p=%d): %v", b.Index, b.Partial, err) - klog.Infof("%v", wErr) + slog.Info("Store error", slog.Any("error", wErr)) return 0, wErr } return 1, nil @@ -122,7 +123,7 @@ func (m *copier) worker(ctx context.Context) error { backoff.WithMaxTries(10), backoff.WithBackOff(backoff.NewExponentialBackOff())) if err != nil { - klog.Infof("retry: %v", err) + slog.Info("Retry error", slog.Any("error", err)) return err } m.bundlesCopied.Add(n) diff --git a/migrate_lifecycle.go b/migrate_lifecycle.go index c3a3ac597..0695f82fe 100644 --- a/migrate_lifecycle.go +++ b/migrate_lifecycle.go @@ -21,11 +21,12 @@ import ( "strings" "time" + "log/slog" + "github.com/transparency-dev/tessera/api/layout" "github.com/transparency-dev/tessera/client" "github.com/transparency-dev/tessera/internal/migrate" "golang.org/x/sync/errgroup" - "k8s.io/klog/v2" ) // NewMigrationTarget returns a MigrationTarget, which allows a personality to "import" a C2SP @@ -132,7 +133,7 @@ func (mt *MigrationTarget) Migrate(ctx context.Context, numWorkers uint, sourceS } s, err := mt.writer.IntegratedSize(ctx) if err != nil { - klog.Warningf("Size: %v", err) + slog.Warn("Size", slog.Any("error", err)) } info := []string{} @@ -142,12 +143,12 @@ func (mt *MigrationTarget) Migrate(ctx context.Context, numWorkers uint, sourceS for _, f := range mt.followers { p, err := f.EntriesProcessed(ctx) if err != nil { - klog.Infof("%s EntriesProcessed(): %v", f.Name(), err) + slog.Info("EntriesProcessed", slog.String("name", f.Name()), slog.Any("error", err)) continue } info = append(info, progress(f.Name(), p, sourceSize)) } - klog.Infof("Progress: %s", strings.Join(info, ", ")) + slog.Info("Progress", slog.String("status", strings.Join(info, ", "))) } }() @@ -169,7 +170,7 @@ func (mt *MigrationTarget) Migrate(ctx context.Context, numWorkers uint, sourceS }) for _, f := range mt.followers { - klog.Infof("Starting %s follower", f.Name()) + slog.Info("Starting follower", slog.String("name", f.Name())) go f.Follow(cctx, mt.reader) errG.Go(awaitFollower(cctx, f, sourceSize)) } @@ -182,7 +183,7 @@ func (mt *MigrationTarget) Migrate(ctx context.Context, numWorkers uint, sourceS return fmt.Errorf("migration completed, but local root hash %x != source root hash %x", calculatedRoot, sourceRoot) } - klog.Infof("Migration successful.") + slog.Info("Migration successful.") return nil } @@ -199,11 +200,11 @@ func awaitFollower(ctx context.Context, f Follower, i uint64) func() error { pos, err := f.EntriesProcessed(ctx) if err != nil { - klog.Infof("%s EntriesProcessed(): %v", f.Name(), err) + slog.Info("EntriesProcessed", slog.String("name", f.Name()), slog.Any("error", err)) continue } if pos >= i { - klog.Infof("%s follower complete", f.Name()) + slog.Info("follower complete", slog.String("name", f.Name())) return nil } } diff --git a/storage/aws/antispam/aws.go b/storage/aws/antispam/aws.go index 53ff5645d..8d74abeac 100644 --- a/storage/aws/antispam/aws.go +++ b/storage/aws/antispam/aws.go @@ -28,11 +28,12 @@ import ( "sync/atomic" "time" + "log/slog" + "github.com/transparency-dev/tessera" "github.com/transparency-dev/tessera/client" "github.com/transparency-dev/tessera/internal/otel" "go.opentelemetry.io/otel/trace" - "k8s.io/klog/v2" _ "github.com/go-sql-driver/mysql" ) @@ -365,7 +366,7 @@ func (f *follower) Follow(ctx context.Context, lr tessera.LogReader) { return ctx.Err() } - klog.V(1).Infof("Inserting %d entries into antispam database (follow from %d of size %d)", len(curEntries), followFrom, logSize) + slog.Debug("Inserting entries into antispam database", slog.Int("count", len(curEntries)), slog.Uint64("followfrom", followFrom), slog.Uint64("logsize", logSize)) args := make([]string, 0, len(curEntries)) vals := make([]any, 0, 2*len(curEntries)) @@ -396,7 +397,7 @@ func (f *follower) Follow(ctx context.Context, lr tessera.LogReader) { }) if err != nil { if err != errOutOfSync { - klog.Errorf("Failed to commit antispam population tx: %v", err) + slog.Error("Failed to commit antispam population tx", slog.Any("error", err)) } if next != nil { stop() diff --git a/storage/aws/antispam/aws_test.go b/storage/aws/antispam/aws_test.go index 35fb7d8e0..c410c0249 100644 --- a/storage/aws/antispam/aws_test.go +++ b/storage/aws/antispam/aws_test.go @@ -23,10 +23,11 @@ import ( "testing" "time" + "log/slog" + "github.com/transparency-dev/tessera" "github.com/transparency-dev/tessera/api" "github.com/transparency-dev/tessera/testonly" - "k8s.io/klog/v2" ) var ( @@ -36,7 +37,6 @@ var ( // TestMain inits flags and runs tests. func TestMain(m *testing.M) { - klog.InitFlags(nil) // m.Run() will parse flags os.Exit(m.Run()) } @@ -44,7 +44,7 @@ func TestMain(m *testing.M) { func TestAntispam(t *testing.T) { ctx := t.Context() if canSkipMySQLTest(t, ctx) { - klog.Warningf("MySQL not available, skipping %s", t.Name()) + slog.Warn("MySQL not available, skipping", slog.String("name", t.Name())) t.Skip("MySQL not available, skipping test") } mustDropTables(t, ctx) @@ -110,7 +110,7 @@ func TestAntispam(t *testing.T) { func TestAntispamPushbackRecovers(t *testing.T) { ctx := t.Context() if canSkipMySQLTest(t, ctx) { - klog.Warningf("MySQL not available, skipping %s", t.Name()) + slog.Warn("MySQL not available, skipping", slog.String("name", t.Name())) t.Skip("MySQL not available, skipping test") } mustDropTables(t, ctx) diff --git a/storage/aws/aws.go b/storage/aws/aws.go index 29204e13e..7ae5cc7b5 100644 --- a/storage/aws/aws.go +++ b/storage/aws/aws.go @@ -45,6 +45,8 @@ import ( "sync" "time" + "log/slog" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/service/s3" @@ -63,7 +65,6 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" "golang.org/x/sync/errgroup" - "k8s.io/klog/v2" "github.com/go-sql-driver/mysql" ) @@ -298,7 +299,7 @@ func (a *Appender) integrateEntriesJob(ctx context.Context) { } return nil }); err != nil { - klog.Errorf("integrateEntries: %v", err) + slog.Error("integrateEntries", slog.Any("error", err)) } } } @@ -328,7 +329,7 @@ func (a *Appender) publishCheckpointJob(ctx context.Context, pubInterval, republ } return nil }); err != nil { - klog.Error(err) + slog.Error("Failed to execute checkpoint publisher job", slog.Any("error", err)) } } } @@ -372,7 +373,7 @@ func (a *Appender) garbageCollectorJob(ctx context.Context, i time.Duration) { } return nil }); err != nil { - klog.Warning(err) + slog.Warn("Failed to execute garbage Collector job", slog.Any("error", err)) } } @@ -418,7 +419,7 @@ func (a *Appender) updateCheckpoint(ctx context.Context, size uint64, root []byt return fmt.Errorf("writeCheckpoint: %v", err) } - klog.V(2).Infof("Created and stored latest checkpoint: %d, %x", size, root) + slog.Debug("Created and stored latest checkpoint", slog.Uint64("size", size), slog.String("root", fmt.Sprintf("%x", root))) return nil } @@ -501,20 +502,20 @@ func (a *Appender) updateEntryBundles(ctx context.Context, fromSeq uint64, entri numAdded++ if entriesInBundle == layout.EntryBundleWidth { // This bundle is full, so we need to write it out... - klog.V(1).Infof("In-memory bundle idx %d is full, attempting write to S3", bundleIndex) + slog.Debug("In-memory bundle is full, attempting write to S3", slog.Uint64("bundleindex", bundleIndex)) goSetEntryBundle(ctx, bundleIndex, 0, bundleWriter.Bytes()) // ... and prepare the next entry bundle for any remaining entries in the batch bundleIndex++ entriesInBundle = 0 // Don't use Reset/Truncate here - the backing []bytes is still being used by goSetEntryBundle above. bundleWriter = &bytes.Buffer{} - klog.V(1).Infof("Starting to fill in-memory bundle idx %d", bundleIndex) + slog.Debug("Starting to fill in-memory bundle", slog.Uint64("bundleindex", bundleIndex)) } } // If we have a partial bundle remaining once we've added all the entries from the batch, // this needs writing out too. if entriesInBundle > 0 { - klog.V(1).Infof("Attempting to write in-memory partial bundle idx %d.%d to S3", bundleIndex, entriesInBundle) + slog.Debug("Attempting to write in-memory partial bundle idx . to S3", slog.Uint64("bundleindex", bundleIndex), slog.Uint64("entriesinbundle", entriesInBundle)) goSetEntryBundle(ctx, bundleIndex, uint8(entriesInBundle), bundleWriter.Bytes()) } return seqErr.Wait() @@ -566,16 +567,16 @@ func (m *MigrationStorage) AwaitIntegration(ctx context.Context, sourceSize uint case <-t.C: from, _, err := m.sequencer.currentTree(ctx) if err != nil && !errors.Is(err, os.ErrNotExist) { - klog.Warningf("readTreeState: %v", err) + slog.Warn("readTreeState", slog.Any("error", err)) continue } - klog.Infof("Integrate from %d (Target %d)", from, sourceSize) + slog.Info("Integrating", slog.Uint64("from", from), slog.Uint64("target", sourceSize)) newSize, newRoot, err := m.buildTree(ctx, sourceSize) if err != nil { - klog.Warningf("integrate: %v", err) + slog.Warn("integrate", slog.Any("error", err)) } if newSize == sourceSize { - klog.Infof("Integrated to %d with roothash %x", newSize, newRoot) + slog.Info("Integration complete", slog.Uint64("size", newSize), slog.String("root", fmt.Sprintf("%x", newRoot))) return newRoot, nil } } @@ -645,7 +646,7 @@ func (m *MigrationStorage) buildTree(ctx context.Context, sourceSize uint64) (ui defer func() { if tx != nil { if err := tx.Rollback(); err != nil && err != sql.ErrTxDone { - klog.Errorf("failed to rollback Tx: %v", err) + slog.Error("Failed to rollback Tx", slog.Any("error", err)) } } }() @@ -658,7 +659,7 @@ func (m *MigrationStorage) buildTree(ctx context.Context, sourceSize uint64) (ui return 0, nil, fmt.Errorf("failed to read IntCoord: %v", err) } - klog.V(1).Infof("Integrating from %d", from) + slog.Debug("Integrating", slog.Uint64("from", from)) lh, err := m.fetchLeafHashes(ctx, from, sourceSize, sourceSize) if err != nil { @@ -666,19 +667,19 @@ func (m *MigrationStorage) buildTree(ctx context.Context, sourceSize uint64) (ui } if len(lh) == 0 { - klog.Infof("Integrate: nothing to do, nothing done") + slog.Info("Integrate: nothing to do, nothing done") return from, rootHash, nil } added := uint64(len(lh)) - klog.Infof("Integrate: adding %d entries to existing tree size %d", len(lh), from) + slog.Info("Integrate: adding entries to existing tree size", slog.Int("count", len(lh)), slog.Uint64("from", from)) newRoot, err = integrate(ctx, from, lh, m.logStore) if err != nil { - klog.Warningf("integrate failed: %v", err) + slog.Warn("integrate failed", slog.Any("error", err)) return 0, nil, fmt.Errorf("integrate failed: %v", err) } newSize = from + added - klog.Infof("Integrate: added %d entries", added) + slog.Info("Integrate: added entries", slog.Uint64("count", added)) if _, err := tx.ExecContext(ctx, "UPDATE IntCoord SET seq=?, rootHash=? WHERE id=?", newSize, newRoot, 0); err != nil { return 0, nil, fmt.Errorf("update intcoord: %v", err) @@ -752,7 +753,7 @@ func (lrs *logResourceStore) setTile(ctx context.Context, level, index, logSize return err } tPath := layout.TilePath(level, index, layout.PartialTileSize(level, index, logSize)) - klog.V(2).Infof("StoreTile: %s (%d entries)", tPath, len(tile.Nodes)) + slog.Debug("StoreTile", slog.String("tpath", tPath), slog.Int("count", len(tile.Nodes))) err = lrs.objStore.setObjectIfNoneMatch(ctx, tPath, data, logContType, logCacheControl) opsHistogram.Record(ctx, time.Since(start).Milliseconds(), metric.WithAttributes(opNameKey.String("writeTile"))) @@ -854,7 +855,7 @@ func integrate(ctx context.Context, fromSeq uint64, lh [][]byte, lrs *logResourc if err := errG.Wait(); err != nil { return nil, err } - klog.V(1).Infof("New tree: %d, %x", newSize, newRoot) + slog.Debug("New tree", slog.Uint64("size", newSize), slog.String("root", fmt.Sprintf("%x", newRoot))) return newRoot, nil } @@ -1060,7 +1061,7 @@ func (s *mySQLSequencer) assignEntries(ctx context.Context, entries []*tessera.E defer func() { if tx != nil { if err := tx.Rollback(); err != nil { - klog.Errorf("failed to rollback Tx: %v", err) + slog.Error("failed to rollback Tx", slog.Any("error", err)) } } }() @@ -1128,7 +1129,7 @@ func (s *mySQLSequencer) consumeEntries(ctx context.Context, limit uint64, f con defer func() { if tx != nil { if err := tx.Rollback(); err != nil { - klog.Errorf("failed to rollback Tx: %v", err) + slog.Error("failed to rollback Tx", slog.Any("error", err)) } } }() @@ -1142,7 +1143,7 @@ func (s *mySQLSequencer) consumeEntries(ctx context.Context, limit uint64, f con } else if err != nil { return false, fmt.Errorf("failed to read IntCoord: %v", err) } - klog.V(1).Infof("Consuming from %d", fromSeq) + slog.Debug("Consuming", slog.Uint64("fromseq", fromSeq)) // Now read the sequenced starting at the index we got above. rows, err := tx.QueryContext(ctx, "SELECT seq, v FROM Seq WHERE id = ? AND seq >= ? ORDER BY seq LIMIT ? FOR UPDATE", 0, fromSeq, limit) @@ -1151,7 +1152,7 @@ func (s *mySQLSequencer) consumeEntries(ctx context.Context, limit uint64, f con } defer func() { if err := rows.Close(); err != nil { - klog.Warningf("rows.Close: %v", err) + slog.Warn("rows.Close", slog.Any("error", err)) } }() @@ -1181,7 +1182,7 @@ func (s *mySQLSequencer) consumeEntries(ctx context.Context, limit uint64, f con orderCheck += uint64(len(b)) } if len(seqsConsumed) == 0 && !forceUpdate { - klog.V(1).Info("Found no rows to sequence") + slog.Debug("Found no rows to sequence") return false, nil } @@ -1269,7 +1270,7 @@ func (s *mySQLSequencer) publishCheckpoint(ctx context.Context, minStaleActive, } cpAge := time.Since(time.Unix(pubAt, 0)) if cpAge < minStaleActive { - klog.V(1).Infof("publishCheckpoint: last checkpoint published %s ago (< required %s), not publishing new checkpoint", cpAge, minStaleActive) + slog.Debug("publishCheckpoint: last checkpoint published too recently, not publishing new checkpoint", slog.Duration("age", cpAge), slog.Duration("minstaleactive", minStaleActive)) publishCount.Add(ctx, 1, metric.WithAttributes(errorTypeKey.String("skipped"))) return nil } @@ -1294,12 +1295,12 @@ func (s *mySQLSequencer) publishCheckpoint(ctx context.Context, minStaleActive, } if !shouldPublish { - klog.V(1).Infof("publishCheckpoint: skipping publish because tree hasn't grown and previous checkpoint is too recent") + slog.Debug("publishCheckpoint: skipping publish because tree hasn't grown and previous checkpoint is too recent") publishCount.Add(ctx, 1, metric.WithAttributes(errorTypeKey.String("skipped_no_growth"))) return nil } - klog.V(1).Infof("publishCheckpoint: updating checkpoint (replacing %s old checkpoint)", cpAge) + slog.Debug("publishCheckpoint: updating checkpoint (replacing old checkpoint)", slog.Duration("age", cpAge)) if err := f(ctx, fromSeq, rootHash); err != nil { return err @@ -1500,11 +1501,11 @@ func (s *s3Storage) setObjectIfNoneMatch(ctx context.Context, objName string, da return fmt.Errorf("failed to fetch existing content for %q: %v", objName, err) } if !bytes.Equal(existing, data) { - klog.Errorf("Resource %q non-idempotent write:\n%s", objName, cmp.Diff(existing, data)) + slog.Error("Resource non-idempotent writen", slog.String("objname", objName), slog.String("diff", cmp.Diff(existing, data))) return fmt.Errorf("precondition failed: resource content for %q differs from data to-be-written", objName) } - klog.V(2).Infof("setObjectIfNoneMatch: identical resource already exists for %q, continuing", objName) + slog.Debug("setObjectIfNoneMatch: identical resource already exists. Continuing", slog.String("objname", objName)) return nil } @@ -1535,7 +1536,7 @@ func (s *s3Storage) deleteObjectsWithPrefix(ctx context.Context, objPrefix strin }, } for _, k := range l.Contents { - klog.V(2).Infof("Deleting object %s", *k.Key) + slog.Debug("Deleting object", slog.String("key", *k.Key)) di.Delete.Objects = append(di.Delete.Objects, types.ObjectIdentifier{Key: k.Key}) } if _, err := s.s3Client.DeleteObjects(ctx, di); err != nil { @@ -1552,6 +1553,6 @@ Qu80vNj7tiOe0lkdc8hwZK9YxavT0+FTP++vU6DUKvpEOr1+VGTk3IBXKSX9AHz5xXRgAQAA` g, _ := base64.StdEncoding.DecodeString(d) r, _ := gzip.NewReader(bytes.NewReader(g)) t, _ := io.ReadAll(r) - klog.Infof("Running in non-AWS mode - see storage/aws/README.md for more details.") - klog.Infof("Here be dragons!\n%s", t) + slog.Info("Running in non-AWS mode - see storage/aws/README.md for more details.") + slog.Info("Here be dragons!\n", slog.Any("t", t)) } diff --git a/storage/aws/aws_test.go b/storage/aws/aws_test.go index b47d8855e..c1a52807e 100644 --- a/storage/aws/aws_test.go +++ b/storage/aws/aws_test.go @@ -36,6 +36,8 @@ import ( "testing" "time" + "log/slog" + "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go" "github.com/google/go-cmp/cmp" @@ -46,7 +48,6 @@ import ( "github.com/transparency-dev/tessera/fsck" storage "github.com/transparency-dev/tessera/storage/internal" "golang.org/x/mod/sumdb/note" - "k8s.io/klog/v2" ) var ( @@ -56,7 +57,6 @@ var ( // TestMain inits flags and runs tests. func TestMain(m *testing.M) { - klog.InitFlags(nil) // m.Run() will parse flags os.Exit(m.Run()) } @@ -115,7 +115,7 @@ func mustDropTables(t *testing.T, ctx context.Context) { func TestMySQLSequencerAssignEntries(t *testing.T) { ctx := context.Background() if canSkipMySQLTest(t, ctx) { - klog.Warningf("MySQL not available, skipping %s", t.Name()) + slog.Warn("MySQL not available, skipping", slog.String("name", t.Name())) t.Skip("MySQL not available, skipping test") } // Clean tables in case there's already something in there. @@ -147,7 +147,7 @@ func TestMySQLSequencerAssignEntries(t *testing.T) { func TestMySQLSequencerPushback(t *testing.T) { ctx := context.Background() if canSkipMySQLTest(t, ctx) { - klog.Warningf("MySQL not available, skipping %s", t.Name()) + slog.Warn("MySQL not available, skipping", slog.String("name", t.Name())) t.Skip("MySQL not available, skipping test") } // Clean tables in case there's already something in there. @@ -207,7 +207,7 @@ func TestMySQLSequencerPushback(t *testing.T) { func TestMySQLSequencerRoundTrip(t *testing.T) { ctx := context.Background() if canSkipMySQLTest(t, ctx) { - klog.Warningf("MySQL not available, skipping %s", t.Name()) + slog.Warn("MySQL not available, skipping", slog.String("name", t.Name())) t.Skip("MySQL not available, skipping test") } // Clean tables in case there's already something in there. @@ -375,7 +375,7 @@ func TestBundleRoundtrip(t *testing.T) { func TestPublishTree(t *testing.T) { ctx := context.Background() if canSkipMySQLTest(t, ctx) { - klog.Warningf("MySQL not available, skipping %s", t.Name()) + slog.Warn("MySQL not available, skipping", slog.String("name", t.Name())) t.Skip("MySQL not available, skipping test") } @@ -473,7 +473,7 @@ func TestPublishTree(t *testing.T) { func TestGarbageCollect(t *testing.T) { ctx := t.Context() if canSkipMySQLTest(t, ctx) { - klog.Warningf("MySQL not available, skipping %s", t.Name()) + slog.Warn("MySQL not available, skipping", slog.String("name", t.Name())) t.Skip("MySQL not available, skipping test") } // Clean tables in case there's already something in there. @@ -595,7 +595,7 @@ func TestGarbageCollectOption(t *testing.T) { ctx := t.Context() if canSkipMySQLTest(t, ctx) { - klog.Warningf("MySQL not available, skipping %s", t.Name()) + slog.Warn("MySQL not available, skipping", slog.String("name", t.Name())) t.Skip("MySQL not available, skipping test") } // Clean tables in case there's already something in there. diff --git a/storage/aws/otel.go b/storage/aws/otel.go index d399f7908..4f76fe7b7 100644 --- a/storage/aws/otel.go +++ b/storage/aws/otel.go @@ -15,10 +15,12 @@ package aws import ( + "log/slog" + "os" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" - "k8s.io/klog/v2" ) const name = "github.com/transparency-dev/tessera/storage/aws" @@ -49,7 +51,8 @@ func init() { metric.WithUnit("ms"), metric.WithExplicitBucketBoundaries(histogramBuckets...)) if err != nil { - klog.Exitf("Failed to create opsHistogram metric: %v", err) + slog.Error("Failed to create opsHistogram metric", slog.Any("error", err)) + os.Exit(1) } publishCount, err = meter.Int64Counter( @@ -57,6 +60,7 @@ func init() { metric.WithDescription("Number of checkpoint publication attempts by result"), metric.WithUnit("{call}")) if err != nil { - klog.Exitf("Failed to create checkpoint publication counter metric: %v", err) + slog.Error("Failed to create checkpoint publication counter metric", slog.Any("error", err)) + os.Exit(1) } } diff --git a/storage/gcp/antispam/gcp_test.go b/storage/gcp/antispam/gcp_test.go index 29853c25d..2f5a1d7b3 100644 --- a/storage/gcp/antispam/gcp_test.go +++ b/storage/gcp/antispam/gcp_test.go @@ -16,15 +16,17 @@ package gcp import ( "crypto/sha256" + "fmt" "os" "testing" "time" + "log/slog" + "cloud.google.com/go/spanner/spannertest" "github.com/transparency-dev/tessera" "github.com/transparency-dev/tessera/api" "github.com/transparency-dev/tessera/testonly" - "k8s.io/klog/v2" ) type testLookup struct { @@ -88,7 +90,7 @@ func TestAntispamStorage(t *testing.T) { if err != nil { t.Fatalf("Await(%d): %v", i, err) } - klog.Infof("%d == %x", i, entry.Identity()) + slog.Info("integrated entry", slog.Int("i", i), slog.String("identity", fmt.Sprintf("%x", entry.Identity()))) entryIndex[string(testIDHash(e))] = idx.Index } @@ -104,7 +106,7 @@ func TestAntispamStorage(t *testing.T) { t.Logf("IntegratedSize: %v", err) continue } - klog.Infof("Wait for follower (%d) to catch up with tree (%d)", pos, sz) + slog.Info("Wait for follower to catch up with tree", slog.Uint64("pos", pos), slog.Uint64("sz", sz)) if pos >= sz { break } @@ -174,7 +176,7 @@ func TestAntispamPushbackRecovers(t *testing.T) { if err != nil { t.Fatalf("Await(%d): %v", i, err) } - klog.Infof("%d == %x", i, entry.Identity()) + slog.Info("integrated entry", slog.Int("i", i), slog.String("identity", fmt.Sprintf("%x", entry.Identity()))) entryIndex[string(testIDHash(e))] = idx.Index } @@ -193,7 +195,7 @@ func TestAntispamPushbackRecovers(t *testing.T) { t.Logf("IntegratedSize: %v", err) continue } - klog.Infof("Wait for follower (%d) to catch up with tree (%d)", pos, sz) + slog.Info("Wait for follower to catch up with tree", slog.Uint64("pos", pos), slog.Uint64("sz", sz)) if pos >= sz { break } diff --git a/storage/gcp/gcp.go b/storage/gcp/gcp.go index 46867ebc5..61b1cf502 100644 --- a/storage/gcp/gcp.go +++ b/storage/gcp/gcp.go @@ -725,7 +725,7 @@ func (a *Appender) updateEntryBundles(ctx context.Context, fromSeq uint64, entri // If we have a partial bundle remaining once we've added all the entries from the batch, // this needs writing out too. if entriesInBundle > 0 { - slog.DebugContext(ctx, "Attempting to write in-memory partial bundle to GCS", slog.Uint64("bundleIndex", bundleIndex), slog.Any("entriesInBundle", entriesInBundle)) + slog.DebugContext(ctx, "Attempting to write in-memory partial bundle to GCS", slog.Uint64("bundleIndex", bundleIndex), slog.Uint64("entriesInBundle", entriesInBundle)) goSetEntryBundle(ctx, bundleIndex, uint8(entriesInBundle), bundleWriter.Bytes()) } return seqErr.Wait() diff --git a/storage/gcp/otel.go b/storage/gcp/otel.go index 8902df7e8..0db3cfb85 100644 --- a/storage/gcp/otel.go +++ b/storage/gcp/otel.go @@ -15,10 +15,12 @@ package gcp import ( + "log/slog" + "os" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" - "k8s.io/klog/v2" ) const name = "github.com/transparency-dev/tessera/storage/gcp" @@ -53,7 +55,8 @@ func init() { metric.WithUnit("ms"), metric.WithExplicitBucketBoundaries(histogramBuckets...)) if err != nil { - klog.Exitf("Failed to create opsHistogram metric: %v", err) + slog.Error("Failed to create opsHistogram metric", slog.Any("error", err)) + os.Exit(1) } checkpointAgeHistogram, err = meter.Int64Histogram( @@ -62,7 +65,8 @@ func init() { metric.WithUnit("ms"), metric.WithExplicitBucketBoundaries(histogramBuckets...)) if err != nil { - klog.Exitf("Failed to create checkpointAgeHistogram metric: %v", err) + slog.Error("Failed to create checkpointAgeHistogram metric", slog.Any("error", err)) + os.Exit(1) } publishCount, err = meter.Int64Counter( @@ -70,6 +74,7 @@ func init() { metric.WithDescription("Number of checkpoint publication attempts by result"), metric.WithUnit("{call}")) if err != nil { - klog.Exitf("Failed to create checkpoint publication counter metric: %v", err) + slog.Error("Failed to create checkpoint publication counter metric", slog.Any("error", err)) + os.Exit(1) } } diff --git a/storage/internal/integrate.go b/storage/internal/integrate.go index 71ec443a7..6a9117f86 100644 --- a/storage/internal/integrate.go +++ b/storage/internal/integrate.go @@ -18,9 +18,12 @@ import ( "context" "errors" "fmt" + "os" "reflect" "time" + "log/slog" + "github.com/transparency-dev/merkle/compact" "github.com/transparency-dev/merkle/rfc6962" "github.com/transparency-dev/tessera/api" @@ -29,7 +32,6 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" "golang.org/x/exp/maps" - "k8s.io/klog/v2" ) var ( @@ -46,7 +48,8 @@ func init() { metric.WithDescription("Number of times the tree builder calculated a new root hash"), metric.WithUnit("{call}")) if err != nil { - klog.Exitf("Failed to create integrateCount metric: %v", err) + slog.Error("Failed to create integrateCount metric", slog.Any("error", err)) + os.Exit(1) } integrateBatchSizeHistogram, err = meter.Int64Histogram( @@ -55,7 +58,8 @@ func init() { metric.WithUnit("{leaves}"), metric.WithExplicitBucketBoundaries(batchSizeHistogramBuckets...)) if err != nil { - klog.Exitf("Failed to create integrateBatchSizeHistogram metric: %v", err) + slog.Error("Failed to create integrateBatchSizeHistogram metric", slog.Any("error", err)) + os.Exit(1) } integrateLatencyHistogram, err = meter.Int64Histogram( @@ -64,7 +68,8 @@ func init() { metric.WithUnit("ms"), metric.WithExplicitBucketBoundaries(latencyHistogramBuckets...)) if err != nil { - klog.Exitf("Failed to create integrateLatencyHistogram metric: %v", err) + slog.Error("Failed to create integrateLatencyHistogram metric", slog.Any("error", err)) + os.Exit(1) } } @@ -154,7 +159,7 @@ func (t *treeBuilder) integrate(ctx context.Context, fromSize uint64, leafHashes return 0, nil, nil, fmt.Errorf("invalid log state, unable to recalculate root: %w", err) } if len(leafHashes) == 0 { - klog.V(1).Infof("Nothing to do.") + slog.Debug("Nothing to do") // C2SP.org/log-tiles says all Merkle operations are those from RFC6962, we need to override // the root of the empty tree to match (compact.Range will return an empty slice). if fromSize == 0 { @@ -165,7 +170,7 @@ func (t *treeBuilder) integrate(ctx context.Context, fromSize uint64, leafHashes } span.AddEvent("Loaded state") - klog.V(1).Infof("Loaded state with roothash %x", r) + slog.Debug("Loaded state", slog.String("hash", fmt.Sprintf("%x", r))) // Create a new compact range which represents the update to the tree newRange := t.rf.NewEmptyRange(fromSize) tc := newTileWriteCache(fromSize, t.readCache.Get) @@ -204,7 +209,7 @@ func (t *treeBuilder) integrate(ctx context.Context, fromSize uint64, leafHashes // All calculation is now complete, all that remains is to store the new // tiles and updated log state. - klog.V(1).Infof("New log state: size 0x%x hash: %x", baseRange.End(), newRoot) + slog.Debug("New log state", slog.String("size", fmt.Sprintf("%x", baseRange.End())), slog.String("hash", fmt.Sprintf("%x", newRoot))) integrateCount.Add(ctx, 1) integrateBatchSizeHistogram.Record(ctx, int64(len(leafHashes))) @@ -234,7 +239,7 @@ func (r *tileReadCache) Get(ctx context.Context, tileID TileID, treeSize uint64) k := layout.TilePath(uint64(tileID.Level), tileID.Index, layout.PartialTileSize(tileID.Level, tileID.Index, treeSize)) e, ok := r.entries[k] if !ok { - klog.V(1).Infof("Readcache miss: %q", k) + slog.Debug("Readcache miss", slog.String("k", k)) span.AddEvent(fmt.Sprintf("Cache miss %q", k)) t, err := r.getTiles(ctx, []TileID{tileID}, treeSize) if err != nil { diff --git a/storage/internal/integrate_test.go b/storage/internal/integrate_test.go index 53fad48ba..c14bac36f 100644 --- a/storage/internal/integrate_test.go +++ b/storage/internal/integrate_test.go @@ -21,13 +21,14 @@ import ( "sync" "testing" + "log/slog" + "github.com/google/go-cmp/cmp" "github.com/transparency-dev/merkle/compact" "github.com/transparency-dev/merkle/rfc6962" "github.com/transparency-dev/tessera" "github.com/transparency-dev/tessera/api" "github.com/transparency-dev/tessera/api/layout" - "k8s.io/klog/v2" ) func TestNewRangeFetchesTiles(t *testing.T) { @@ -227,7 +228,7 @@ func (m *memTileStore[T]) getTiles(_ context.Context, ids []TileID, treeSize uin r := make([]*T, len(ids)) for i, id := range ids { k := layout.TilePath(id.Level, id.Index, layout.PartialTileSize(id.Level, id.Index, treeSize)) - klog.V(1).Infof("mem.getTile(%q, %d)", k, treeSize) + slog.Debug("mem.getTile", slog.String("k", k), slog.Uint64("treesize", treeSize)) d, ok := m.mem[k] if !ok { continue @@ -242,7 +243,7 @@ func (m *memTileStore[T]) setTile(_ context.Context, id TileID, treeSize uint64, defer m.Unlock() k := layout.TilePath(id.Level, id.Index, layout.PartialTileSize(id.Level, id.Index, treeSize)) - klog.V(1).Infof("mem.setTile(%q, %d)", k, treeSize) + slog.Debug("mem.setTile", slog.String("k", k), slog.Uint64("treesize", treeSize)) _, ok := m.mem[k] if ok { return fmt.Errorf("%q is already present", k) diff --git a/storage/posix/antispam/badger.go b/storage/posix/antispam/badger.go index f5ae8b20f..2ed0ef3ee 100644 --- a/storage/posix/antispam/badger.go +++ b/storage/posix/antispam/badger.go @@ -27,12 +27,13 @@ import ( "sync/atomic" "time" + "log/slog" + "github.com/dgraph-io/badger/v4" "github.com/transparency-dev/tessera" "github.com/transparency-dev/tessera/client" "github.com/transparency-dev/tessera/internal/otel" "go.opentelemetry.io/otel/trace" - "k8s.io/klog/v2" ) const ( @@ -327,7 +328,7 @@ func (f *follower) Follow(ctx context.Context, lr tessera.LogReader) { return fmt.Errorf("entryReader.next: %v", err) } if wantIdx := followFrom + uint64(i); e.Index != wantIdx { - klog.Infof("at %d, expected %d - out of sync", e.Index, wantIdx) + slog.Info("Out of sync", slog.Uint64("index", e.Index), slog.Uint64("wantidx", wantIdx)) // We're out of sync return errOutOfSync } @@ -365,7 +366,7 @@ func (f *follower) Follow(ctx context.Context, lr tessera.LogReader) { }) if err != nil { if err != errOutOfSync { - klog.Errorf("Failed to commit antispam population tx: %v", err) + slog.Error("Failed to commit antispam population tx", slog.Any("error", err)) } stop() next = nil diff --git a/storage/posix/antispam/badger_test.go b/storage/posix/antispam/badger_test.go index bb899d9f0..3f2f9d74a 100644 --- a/storage/posix/antispam/badger_test.go +++ b/storage/posix/antispam/badger_test.go @@ -16,13 +16,15 @@ package badger import ( "crypto/sha256" + "fmt" "testing" "time" + "log/slog" + "github.com/transparency-dev/tessera" "github.com/transparency-dev/tessera/api" "github.com/transparency-dev/tessera/testonly" - "k8s.io/klog/v2" ) type testLookup struct { @@ -85,7 +87,7 @@ func TestAntispamStorage(t *testing.T) { if err != nil { t.Fatalf("Await(%d): %v", i, err) } - klog.Infof("%d == %x", i, entry.Identity()) + slog.Info("integrated entry", slog.Int("i", i), slog.String("identity", fmt.Sprintf("%x", entry.Identity()))) entryIndex[string(testIDHash(e))] = idx.Index } @@ -101,7 +103,7 @@ func TestAntispamStorage(t *testing.T) { t.Logf("IntegratedSize: %v", err) continue } - klog.Infof("Wait for follower (%d) to catch up with tree (%d)", pos, sz) + slog.Info("Wait for follower () to catch up with tree", slog.Uint64("pos", pos), slog.Uint64("sz", sz)) if pos >= sz { break } @@ -169,7 +171,7 @@ func TestAntispamPushbackRecovers(t *testing.T) { if err != nil { t.Fatalf("Await(%d): %v", i, err) } - klog.Infof("%d == %x", i, entry.Identity()) + slog.Info("integrated entry", slog.Int("i", i), slog.String("identity", fmt.Sprintf("%x", entry.Identity()))) entryIndex[string(testIDHash(e))] = idx.Index } @@ -188,7 +190,7 @@ func TestAntispamPushbackRecovers(t *testing.T) { t.Logf("IntegratedSize: %v", err) continue } - klog.Infof("Wait for follower (%d) to catch up with tree (%d)", pos, sz) + slog.Info("Wait for follower () to catch up with tree", slog.Uint64("pos", pos), slog.Uint64("sz", sz)) if pos >= sz { break } diff --git a/storage/posix/file_ops.go b/storage/posix/file_ops.go index f7c31a9dd..71da0c7f9 100644 --- a/storage/posix/file_ops.go +++ b/storage/posix/file_ops.go @@ -24,7 +24,7 @@ import ( "strings" "syscall" - "k8s.io/klog/v2" + "log/slog" ) const ( @@ -120,7 +120,7 @@ func createEx(name string, d []byte) error { } defer func() { if err := os.Remove(tmpName); err != nil { - klog.Warningf("Failed to remove temporary file %q: %v", tmpName, err) + slog.Warn("Failed to remove temporary file", slog.String("tmpname", tmpName), slog.Any("error", err)) } }() diff --git a/storage/posix/files.go b/storage/posix/files.go index f19f8ef5e..e98db6659 100644 --- a/storage/posix/files.go +++ b/storage/posix/files.go @@ -29,6 +29,8 @@ import ( "syscall" "time" + "log/slog" + "github.com/transparency-dev/merkle/rfc6962" "github.com/transparency-dev/tessera" "github.com/transparency-dev/tessera/api" @@ -38,7 +40,6 @@ import ( "github.com/transparency-dev/tessera/internal/parse" storage "github.com/transparency-dev/tessera/storage/internal" "go.opentelemetry.io/otel/metric" - "k8s.io/klog/v2" ) const ( @@ -177,7 +178,7 @@ func (a *appender) publishCheckpointJob(ctx context.Context, pubInterval, republ ctx, cancel := context.WithTimeout(ctx, defaultPublicationTimeout) defer cancel() if err := a.publishCheckpoint(ctx, pubInterval, republishInterval); err != nil { - klog.Warningf("publishCheckpoint failed: %v", err) + slog.Warn("publishCheckpoint failed", slog.Any("error", err)) } }() } @@ -298,7 +299,7 @@ func (a *appender) sequenceBatch(ctx context.Context, entries []*tessera.Entry) size = 0 } a.curSize = size - klog.V(1).Infof("Sequencing from %d", a.curSize) + slog.Debug("Sequencing", slog.Uint64("from", a.curSize)) if len(entries) == 0 { return nil @@ -359,7 +360,7 @@ func (a *appender) sequenceBatch(ctx context.Context, entries []*tessera.Entry) // If this is broken out into an async process, we'll need to update the implementation of NextIndex, too. newSize, newRoot, err := doIntegrate(ctx, seq, leafHashes, a.logStorage) if err != nil { - klog.Errorf("Integrate failed: %v", err) + slog.Error("Integrate failed", slog.Any("error", err)) return err } if err := a.s.writeTreeState(ctx, newSize, newRoot); err != nil { @@ -386,7 +387,7 @@ func doIntegrate(ctx context.Context, fromSeq uint64, leafHashes [][]byte, ls *l newSize, newRoot, tiles, err := storage.Integrate(ctx, getTiles, fromSeq, leafHashes) if err != nil { - klog.Errorf("Integrate: %v", err) + slog.Error("Integrate", slog.Any("error", err)) return 0, nil, fmt.Errorf("error in Integrate: %v", err) } for k, v := range tiles { @@ -395,7 +396,7 @@ func doIntegrate(ctx context.Context, fromSeq uint64, leafHashes [][]byte, ls *l } } - klog.V(1).Infof("New tree: %d, %x", newSize, newRoot) + slog.Debug("New tree", slog.Uint64("size", newSize), slog.String("hash", fmt.Sprintf("%x", newRoot))) return newSize, newRoot, nil } @@ -442,7 +443,7 @@ func (lrs *logResourceStorage) readTile(ctx context.Context, level, index uint64 // stored with a .xx suffix where xx is the number of "tile leaves" in hex. func (lrs *logResourceStorage) storeTile(ctx context.Context, level, index, logSize uint64, tile *api.HashTile) error { tileSize := uint64(len(tile.Nodes)) - klog.V(2).Infof("StoreTile: level %d index %x ts: %x", level, index, tileSize) + slog.Debug("StoreTile", slog.Uint64("level", level), slog.String("index", fmt.Sprintf("%x", index)), slog.String("tilesize", fmt.Sprintf("%x", tileSize))) if tileSize == 0 || tileSize > layout.TileWidth { return fmt.Errorf("tileSize %d must be > 0 and <= %d", tileSize, layout.TileWidth) } @@ -470,7 +471,7 @@ func (lrs *logResourceStorage) writeTile(ctx context.Context, level, index uint6 } // Clean up old partial tiles by symlinking them to the new full tile. for _, p := range partials { - klog.V(2).Infof("relink partial %s to %s", p, tPath) + slog.Debug("relink partial", slog.String("p", p), slog.String("tpath", tPath)) // We have to do a little dance here to get POSIX atomicity: // 1. Create a new temporary symlink to the full tile // 2. Rename the temporary symlink over the top of the old partial tile @@ -531,7 +532,7 @@ func (a *appender) initialise(ctx context.Context) error { return fmt.Errorf("failed to load checkpoint for log: %v", err) } // Create the directory structure and write out an empty checkpoint - klog.Infof("Initializing directory for POSIX log at %q (this should only happen ONCE per log!)", a.s.cfg.Path) + slog.Info("Initializing directory for POSIX log (this should only happen ONCE per log!)", slog.String("path", a.s.cfg.Path)) if err := a.s.writeTreeState(ctx, 0, rfc6962.DefaultHasher.EmptyRoot()); err != nil { return fmt.Errorf("failed to write tree-state checkpoint: %v", err) } @@ -558,7 +559,7 @@ func (s *Storage) ensureVersion(version uint16) error { versionFile := filepath.Join(stateDir, "version") if _, err := s.stat(versionFile); errors.Is(err, os.ErrNotExist) { - klog.V(1).Infof("No version file exists, creating") + slog.Debug("No version file exists, creating") data := fmt.Appendf(nil, "%d", version) if err := s.createExclusive(versionFile, data); err != nil { return fmt.Errorf("failed to create version file: %v", err) @@ -637,7 +638,7 @@ func (a *appender) publishCheckpoint(ctx context.Context, minStalenessActive, mi } defer func() { if err := unlock(); err != nil { - klog.Warningf("unlock(%s): %v", publishLock, err) + slog.Warn("unlock", slog.String("publishlock", publishLock), slog.Any("error", err)) } }() @@ -646,20 +647,20 @@ func (a *appender) publishCheckpoint(ctx context.Context, minStalenessActive, mi cpExists := true info, err := a.s.stat(layout.CheckpointPath) if errors.Is(err, os.ErrNotExist) { - klog.V(1).Infof("No checkpoint exists, publishing") + slog.Debug("No checkpoint exists, publishing") cpExists = false } else if err != nil { return fmt.Errorf("stat(%s): %v", layout.CheckpointPath, err) } else { publishedAge = time.Since(info.ModTime()) if publishedAge < minStalenessActive { - klog.V(1).Infof("publishCheckpoint: skipping publish because previous checkpoint published %v ago, less than %v", publishedAge, minStalenessActive) + slog.Debug("publishCheckpoint: skipping publish because previous checkpoint too fresh", slog.Duration("age", publishedAge), slog.Duration("minstalenessactive", minStalenessActive)) publishCount.Add(ctx, 1, metric.WithAttributes(errorTypeKey.String("skipped"))) return nil } publishedSize, err = a.publishedSize(ctx) if err != nil { - klog.V(1).Infof("publishCheckpoint: skipping publish because unable to determine previously published size: %v", err) + slog.Debug("publishCheckpoint: skipping publish because unable to determine previously published size", slog.Any("error", err)) return err } } @@ -671,7 +672,7 @@ func (a *appender) publishCheckpoint(ctx context.Context, minStalenessActive, mi } if cpExists && size == publishedSize { if minStalenessRepub == 0 || publishedAge < minStalenessRepub { - klog.V(1).Infof("publishCheckpoint: skipping publish because tree hasn't grown and previous checkpoint is too recent") + slog.Debug("publishCheckpoint: skipping publish because tree hasn't grown and previous checkpoint is too recent") publishCount.Add(ctx, 1, metric.WithAttributes(errorTypeKey.String("skipped_no_growth"))) return nil } @@ -686,7 +687,7 @@ func (a *appender) publishCheckpoint(ctx context.Context, minStalenessActive, mi return fmt.Errorf("createOverwrite(%s): %v", layout.CheckpointPath, err) } - klog.V(2).Infof("Published latest checkpoint: %d, %x", size, root) + slog.Debug("Published latest checkpoint", slog.Uint64("size", size), slog.String("root", fmt.Sprintf("%x", root))) posixOpsHistogram.Record(ctx, time.Since(now).Milliseconds(), metric.WithAttributes(opNameKey.String("publishCheckpoint"))) publishCount.Add(ctx, 1) @@ -739,12 +740,12 @@ func (a *appender) garbageCollectorJob(ctx context.Context, i time.Duration) { // checkpoint! pubSize, err := a.publishedSize(ctx) if err != nil { - klog.Warningf("GarbageCollect: %v", err) + slog.Warn("GarbageCollect", slog.Any("error", err)) return } if err := a.s.garbageCollect(ctx, pubSize, maxBundlesPerRun, a.logStorage.entriesPath); err != nil { - klog.Warningf("GarbageCollect failed: %v", err) + slog.Warn("GarbageCollect failed", slog.Any("error", err)) return } }() @@ -799,7 +800,7 @@ func (s *Storage) garbageCollect(ctx context.Context, treeSize uint64, maxBundle } defer func() { if err := unlock(); err != nil { - klog.Warningf("unlock(%s): %v", gcStateLock, err) + slog.Warn("unlock", slog.String("gcstatelock", gcStateLock), slog.Any("error", err)) } }() @@ -888,7 +889,7 @@ func (s *Storage) stat(p string) (os.FileInfo, error) { // The provided path is interpreted relative to the log root. func (s *Storage) removeDirAll(p string) error { p = filepath.Join(s.cfg.Path, p) - klog.V(3).Infof("rm %s", p) + slog.Debug("rm", slog.String("p", p)) if err := os.RemoveAll(p); err != nil && !errors.Is(err, os.ErrNotExist) { return err } @@ -931,11 +932,11 @@ func (m *MigrationStorage) AwaitIntegration(ctx context.Context, sourceSize uint case <-t.C: } if err := m.buildTree(ctx, sourceSize); err != nil { - klog.Warningf("buildTree: %v", err) + slog.Warn("buildTree", slog.Any("error", err)) } s, r, err := m.s.readTreeState(ctx) if err != nil { - klog.Warningf("readTreeState: %v", err) + slog.Warn("readTreeState", slog.Any("error", err)) } if s == sourceSize { return r, nil @@ -972,7 +973,7 @@ func (m *MigrationStorage) initialise(ctx context.Context) error { return fmt.Errorf("failed to load checkpoint for log: %v", err) } // Create the directory structure and write out an empty checkpoint - klog.Infof("Initializing directory for POSIX log at %q (this should only happen ONCE per log!)", m.s.cfg.Path) + slog.Info("Initializing directory for POSIX log (this should only happen ONCE per log!)", slog.String("path", m.s.cfg.Path)) if err := m.s.writeTreeState(ctx, 0, rfc6962.DefaultHasher.EmptyRoot()); err != nil { return fmt.Errorf("failed to write tree-state checkpoint: %v", err) } @@ -1016,14 +1017,14 @@ func (m *MigrationStorage) buildTree(ctx context.Context, targetSize uint64) err size = 0 } m.curSize = size - klog.V(1).Infof("Building from %d", m.curSize) + slog.Debug("Building", slog.Uint64("from", m.curSize)) lh, err := m.fetchLeafHashes(ctx, size, targetSize, targetSize) if err != nil { if errors.Is(err, os.ErrNotExist) { // We just don't have the bundle yet. // Bail quietly and the caller can retry. - klog.V(1).Infof("fetchLeafHashes(%d, %d): %v", size, targetSize, err) + slog.Debug("fetchLeafHashes", slog.Uint64("size", size), slog.Uint64("targetsize", targetSize), slog.Any("error", err)) return nil } return fmt.Errorf("fetchLeafHashes(%d, %d): %v", size, targetSize, err) diff --git a/storage/posix/otel.go b/storage/posix/otel.go index 749bf2a23..fb9a1e088 100644 --- a/storage/posix/otel.go +++ b/storage/posix/otel.go @@ -15,10 +15,12 @@ package posix import ( + "log/slog" + "os" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" - "k8s.io/klog/v2" ) const name = "github.com/transparency-dev/tessera/storage/posix" @@ -47,7 +49,8 @@ func init() { metric.WithUnit("ms"), metric.WithExplicitBucketBoundaries(histogramBuckets...)) if err != nil { - klog.Exitf("Failed to create posixOptsHistogram metric: %v", err) + slog.Error("Failed to create posixOptsHistogram metric", slog.Any("error", err)) + os.Exit(1) } publishCount, err = meter.Int64Counter( @@ -55,6 +58,7 @@ func init() { metric.WithDescription("Number of checkpoint publication attempts by result"), metric.WithUnit("{call}")) if err != nil { - klog.Exitf("Failed to create checkpoint publication counter metric: %v", err) + slog.Error("Failed to create checkpoint publication counter metric", slog.Any("error", err)) + os.Exit(1) } }