From b377c703eab2b4c7e1350eeacb752a7ebbdef262 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Tue, 23 Sep 2025 16:13:58 +0500 Subject: [PATCH 01/27] Codeq settings --- pkg/codec/decode.go | 17 +++++++++++++++++ pkg/codec/raptorq.go | 2 +- supernode/services/cascade/download.go | 10 ++++++++-- 3 files changed, 26 insertions(+), 3 deletions(-) diff --git a/pkg/codec/decode.go b/pkg/codec/decode.go index beeed5a8..fe77d752 100644 --- a/pkg/codec/decode.go +++ b/pkg/codec/decode.go @@ -39,11 +39,28 @@ func (rq *raptorQ) Decode(ctx context.Context, req DecodeRequest) (DecodeRespons defer processor.Free() symbolsDir := filepath.Join(rq.symbolsBaseDir, req.ActionID) + // Ensure a clean scratch directory (avoid contamination from previous attempts) + if err := os.RemoveAll(symbolsDir); err != nil { + fields[logtrace.FieldError] = err.Error() + return DecodeResponse{}, fmt.Errorf("cleanup decode dir %s: %w", symbolsDir, err) + } if err := os.MkdirAll(symbolsDir, 0o755); err != nil { fields[logtrace.FieldError] = err.Error() return DecodeResponse{}, fmt.Errorf("mkdir %s: %w", symbolsDir, err) } + // Validate layout before writing any symbols + if len(req.Layout.Blocks) == 0 { + fields[logtrace.FieldError] = "empty layout" + return DecodeResponse{}, fmt.Errorf("invalid layout: no blocks present") + } + for _, blk := range req.Layout.Blocks { + if len(blk.Symbols) == 0 { + fields[logtrace.FieldError] = fmt.Sprintf("block_%d has no symbols", blk.BlockID) + return DecodeResponse{}, fmt.Errorf("invalid layout: block %d has no symbols", blk.BlockID) + } + } + // Build symbol->block mapping from layout and ensure block directories exist symbolToBlock := make(map[string]int) for _, blk := range req.Layout.Blocks { diff --git a/pkg/codec/raptorq.go b/pkg/codec/raptorq.go index 8e0c1c6c..4564bc1b 100644 --- a/pkg/codec/raptorq.go +++ b/pkg/codec/raptorq.go @@ -17,7 +17,7 @@ const ( // Limit RaptorQ processor memory usage to ~2 GiB rqMaxMemoryMB uint64 = 2 * 1024 // MB // Concurrency tuned for 2 GiB limit and typical 8+ core CPUs - rqConcurrency uint64 = 6 + rqConcurrency uint64 = 1 // Target single-block output for up to 1 GiB files with padding headroom (~1.25 GiB) rqBlockSize int = 1280 * 1024 * 1024 // bytes (1,280 MiB) ) diff --git a/supernode/services/cascade/download.go b/supernode/services/cascade/download.go index b8220045..363834bc 100644 --- a/supernode/services/cascade/download.go +++ b/supernode/services/cascade/download.go @@ -163,7 +163,7 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( if targetRequiredCount < 1 && totalSymbols > 0 { targetRequiredCount = 1 } - logtrace.Info(ctx, "Retrieving all symbols for decode", fields) + logtrace.Info(ctx, "Retrieving target-required symbols for decode", fields) // Enable retrieve metrics capture for this action cm.StartRetrieveCapture(actionID) @@ -173,7 +173,13 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( retrieveStart := time.Now() // Tag context with metrics task ID (actionID) ctxRetrieve := cm.WithTaskID(ctx, actionID) - symbols, err := task.P2PClient.BatchRetrieve(ctxRetrieve, allSymbols, totalSymbols, actionID) + // Retrieve only a fraction of symbols (targetRequiredCount) based on redundancy + // The DHT will short-circuit once it finds the required number across the provided keys + reqCount := targetRequiredCount + if reqCount > totalSymbols { + reqCount = totalSymbols + } + symbols, err := task.P2PClient.BatchRetrieve(ctxRetrieve, allSymbols, reqCount, actionID) if err != nil { fields[logtrace.FieldError] = err.Error() logtrace.Error(ctx, "batch retrieve failed", fields) From 6a225d3fbf1a4e299a9ec811d9c5635efe62c793 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Tue, 23 Sep 2025 20:22:39 +0500 Subject: [PATCH 02/27] Fix concurrent write panic in metrics --- pkg/p2pmetrics/metrics.go | 104 ++++++++++++++++++++++++++++++++------ 1 file changed, 88 insertions(+), 16 deletions(-) diff --git a/pkg/p2pmetrics/metrics.go b/pkg/p2pmetrics/metrics.go index 945225db..165f0eaa 100644 --- a/pkg/p2pmetrics/metrics.go +++ b/pkg/p2pmetrics/metrics.go @@ -197,6 +197,7 @@ func BuildStoreEventPayloadFromCollector(taskID string) map[string]any { // Retrieve session type retrieveSession struct { + mu sync.RWMutex CallsByIP map[string][]Call FoundLocal int FoundNet int @@ -208,33 +209,94 @@ type retrieveSession struct { var retrieveSessions = struct{ m map[string]*retrieveSession }{m: map[string]*retrieveSession{}} -// RegisterRetrieveBridge hooks retrieve callbacks into the retrieve collector. +// internal event channel for retrieve metrics (per task) +type retrieveEvent struct { + typ int // 0: per-node call, 1: found-local update + call Call + n int +} + +var retrieveEventChans = struct { + mu sync.Mutex + m map[string]chan retrieveEvent +}{m: map[string]chan retrieveEvent{}} + +// StartRetrieveCapture hooks retrieve callbacks into a buffered channel and a +// single goroutine that serializes updates to avoid concurrent map writes. func StartRetrieveCapture(taskID string) { - RegisterRetrieveHook(taskID, func(c Call) { - s := retrieveSessions.m[taskID] - if s == nil { - s = &retrieveSession{CallsByIP: map[string][]Call{}} - retrieveSessions.m[taskID] = s + // Create or get session upfront + s := retrieveSessions.m[taskID] + if s == nil { + s = &retrieveSession{CallsByIP: map[string][]Call{}} + retrieveSessions.m[taskID] = s + } + + // Per-task buffered channel + ch := make(chan retrieveEvent, 4096) + retrieveEventChans.mu.Lock() + retrieveEventChans.m[taskID] = ch + retrieveEventChans.mu.Unlock() + + // Worker goroutine to serialize writes + go func(taskID string, ch <-chan retrieveEvent) { + for ev := range ch { + sess := retrieveSessions.m[taskID] + if sess == nil { + sess = &retrieveSession{CallsByIP: map[string][]Call{}} + retrieveSessions.m[taskID] = sess + } + switch ev.typ { + case 0: // per-node call + key := ev.call.IP + if key == "" { + key = ev.call.Address + } + sess.mu.Lock() + if sess.CallsByIP == nil { + sess.CallsByIP = map[string][]Call{} + } + sess.CallsByIP[key] = append(sess.CallsByIP[key], ev.call) + sess.mu.Unlock() + case 1: // found-local update + sess.FoundLocal = ev.n + } } - key := c.IP - if key == "" { - key = c.Address + }(taskID, ch) + + // Register hooks that enqueue events (non-blocking) + RegisterRetrieveHook(taskID, func(c Call) { + retrieveEventChans.mu.Lock() + ch, ok := retrieveEventChans.m[taskID] + retrieveEventChans.mu.Unlock() + if ok { + select { + case ch <- retrieveEvent{typ: 0, call: c}: + default: // drop if buffer is full + } } - s.CallsByIP[key] = append(s.CallsByIP[key], c) }) RegisterFoundLocalHook(taskID, func(n int) { - s := retrieveSessions.m[taskID] - if s == nil { - s = &retrieveSession{CallsByIP: map[string][]Call{}} - retrieveSessions.m[taskID] = s + retrieveEventChans.mu.Lock() + ch, ok := retrieveEventChans.m[taskID] + retrieveEventChans.mu.Unlock() + if ok { + select { + case ch <- retrieveEvent{typ: 1, n: n}: + default: + } } - s.FoundLocal = n }) } func StopRetrieveCapture(taskID string) { UnregisterRetrieveHook(taskID) UnregisterFoundLocalHook(taskID) + retrieveEventChans.mu.Lock() + if ch, ok := retrieveEventChans.m[taskID]; ok { + delete(retrieveEventChans.m, taskID) + close(ch) + } + retrieveEventChans.mu.Unlock() } // SetRetrieveBatchSummary sets counts for a retrieval attempt. @@ -284,6 +346,16 @@ func BuildDownloadEventPayloadFromCollector(taskID string) map[string]any { }, } } + // Create a snapshot copy of CallsByIP to avoid concurrent map access + s.mu.RLock() + callsCopy := make(map[string][]Call, len(s.CallsByIP)) + for k, v := range s.CallsByIP { + vv := make([]Call, len(v)) + copy(vv, v) + callsCopy[k] = vv + } + s.mu.RUnlock() + return map[string]any{ "retrieve": map[string]any{ "keys": s.Keys, @@ -292,7 +364,7 @@ func BuildDownloadEventPayloadFromCollector(taskID string) map[string]any { "found_net": s.FoundNet, "retrieve_ms": s.RetrieveMS, "decode_ms": s.DecodeMS, - "calls_by_ip": s.CallsByIP, + "calls_by_ip": callsCopy, }, } } From 306d295615f67130f86689f9a6dc8738fed3cf02 Mon Sep 17 00:00:00 2001 From: j-rafique Date: Wed, 24 Sep 2025 15:06:58 +0500 Subject: [PATCH 03/27] Enable Pprof --- supernode/cmd/start.go | 37 +++++++++++++++++++++++++++++++++++++ supernode/config/config.go | 7 +++++++ 2 files changed, 44 insertions(+) diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index befaf85d..c8914270 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -3,9 +3,12 @@ package cmd import ( "context" "fmt" + "net/http" + _ "net/http/pprof" "os" "os/signal" "path/filepath" + "strings" "syscall" "github.com/LumeraProtocol/supernode/v2/p2p" @@ -139,6 +142,40 @@ The supernode will connect to the Lumera network and begin participating in the return fmt.Errorf("failed to create gateway server: %w", err) } + // Start profiling server if enabled or on testnet + isTestnet := strings.Contains(strings.ToLower(appConfig.LumeraClientConfig.ChainID), "testnet") + shouldEnableProfiling := appConfig.ProfilingConfig.Enabled || isTestnet + + if shouldEnableProfiling { + bindAddr := appConfig.ProfilingConfig.BindAddress + if bindAddr == "" { + if isTestnet { + bindAddr = "0.0.0.0" // Allow external access on testnet + } else { + bindAddr = "127.0.0.1" // Default to localhost + } + } + port := appConfig.ProfilingConfig.Port + if port == 0 { + port = 6060 // Default pprof port + } + + profilingAddr := fmt.Sprintf("%s:%d", bindAddr, port) + + logtrace.Info(ctx, "Starting profiling server", logtrace.Fields{ + "address": profilingAddr, + "chain_id": appConfig.LumeraClientConfig.ChainID, + "is_testnet": isTestnet, + "auto_enabled": isTestnet && !appConfig.ProfilingConfig.Enabled, + }) + + go func() { + if err := http.ListenAndServe(profilingAddr, nil); err != nil { + logtrace.Error(ctx, "Profiling server error", logtrace.Fields{"error": err.Error()}) + } + }() + } + // Start the services go func() { if err := RunServices(ctx, grpcServer, cService, *p2pService, gatewayServer); err != nil { diff --git a/supernode/config/config.go b/supernode/config/config.go index e3910ac2..a09190ce 100644 --- a/supernode/config/config.go +++ b/supernode/config/config.go @@ -44,12 +44,19 @@ type LogConfig struct { Level string `yaml:"level"` } +type ProfilingConfig struct { + Enabled bool `yaml:"enabled"` + Port uint16 `yaml:"port"` + BindAddress string `yaml:"bind_address,omitempty"` +} + type Config struct { SupernodeConfig `yaml:"supernode"` KeyringConfig `yaml:"keyring"` P2PConfig `yaml:"p2p"` LumeraClientConfig `yaml:"lumera"` RaptorQConfig `yaml:"raptorq"` + ProfilingConfig `yaml:"profiling"` // Store base directory (not from YAML) BaseDir string `yaml:"-"` From bca91a118ad642a84414407d55cfd65d6988e3f7 Mon Sep 17 00:00:00 2001 From: j-rafique Date: Wed, 24 Sep 2025 15:17:05 +0500 Subject: [PATCH 04/27] Remove config --- supernode/cmd/start.go | 27 ++++++--------------------- supernode/config/config.go | 7 ------- 2 files changed, 6 insertions(+), 28 deletions(-) diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index c8914270..31c19b2a 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -142,31 +142,16 @@ The supernode will connect to the Lumera network and begin participating in the return fmt.Errorf("failed to create gateway server: %w", err) } - // Start profiling server if enabled or on testnet + // Start profiling server on testnet only isTestnet := strings.Contains(strings.ToLower(appConfig.LumeraClientConfig.ChainID), "testnet") - shouldEnableProfiling := appConfig.ProfilingConfig.Enabled || isTestnet - - if shouldEnableProfiling { - bindAddr := appConfig.ProfilingConfig.BindAddress - if bindAddr == "" { - if isTestnet { - bindAddr = "0.0.0.0" // Allow external access on testnet - } else { - bindAddr = "127.0.0.1" // Default to localhost - } - } - port := appConfig.ProfilingConfig.Port - if port == 0 { - port = 6060 // Default pprof port - } - profilingAddr := fmt.Sprintf("%s:%d", bindAddr, port) + if isTestnet { + profilingAddr := "0.0.0.0:6060" logtrace.Info(ctx, "Starting profiling server", logtrace.Fields{ - "address": profilingAddr, - "chain_id": appConfig.LumeraClientConfig.ChainID, - "is_testnet": isTestnet, - "auto_enabled": isTestnet && !appConfig.ProfilingConfig.Enabled, + "address": profilingAddr, + "chain_id": appConfig.LumeraClientConfig.ChainID, + "is_testnet": isTestnet, }) go func() { diff --git a/supernode/config/config.go b/supernode/config/config.go index a09190ce..e3910ac2 100644 --- a/supernode/config/config.go +++ b/supernode/config/config.go @@ -44,19 +44,12 @@ type LogConfig struct { Level string `yaml:"level"` } -type ProfilingConfig struct { - Enabled bool `yaml:"enabled"` - Port uint16 `yaml:"port"` - BindAddress string `yaml:"bind_address,omitempty"` -} - type Config struct { SupernodeConfig `yaml:"supernode"` KeyringConfig `yaml:"keyring"` P2PConfig `yaml:"p2p"` LumeraClientConfig `yaml:"lumera"` RaptorQConfig `yaml:"raptorq"` - ProfilingConfig `yaml:"profiling"` // Store base directory (not from YAML) BaseDir string `yaml:"-"` From efbe176dc7ad28547096f49093827eaa801c043a Mon Sep 17 00:00:00 2001 From: Matee ullah Malik <46045452+mateeullahmalik@users.noreply.github.com> Date: Wed, 24 Sep 2025 16:46:36 +0500 Subject: [PATCH 05/27] Codec callback (#188) * prepare decode * Test Fixes --- Makefile | 6 +- go.mod | 1 - p2p/kademlia/network.go | 16 +- pkg/codec/codec_mock.go | 12 +- pkg/codec/decode.go | 257 ++++++++++++++---- pkg/net/grpc/client/client.go | 6 +- pkg/net/grpc/client/client_test.go | 4 +- pkg/net/grpc/server/server.go | 8 +- pkg/net/grpc/server/server_test.go | 8 +- .../cascade/cascade_action_server_test.go | 2 +- .../cascade/adaptors/mocks/lumera_mock.go | 48 ++-- .../cascade/adaptors/mocks/p2p_mock.go | 10 +- .../cascade/adaptors/mocks/rq_mock.go | 31 ++- supernode/services/cascade/adaptors/rq.go | 7 +- .../cascade/mocks/cascade_interfaces_mock.go | 15 +- supernode/services/cascade/register_test.go | 10 +- supernode/services/cascade/service_test.go | 2 +- tests/system/go.mod | 1 + tests/system/go.sum | 4 + 19 files changed, 288 insertions(+), 160 deletions(-) diff --git a/Makefile b/Makefile index fe5a9852..01272fbf 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ .PHONY: build build-release build-sncli build-sn-manager -.PHONY: install-lumera setup-supernodes system-test-setup +.PHONY: install-lumera setup-supernodes system-test-setup install-deps .PHONY: gen-cascade gen-supernode .PHONY: test-e2e test-unit test-integration test-system @@ -140,9 +140,9 @@ test-e2e: # Run cascade e2e tests only test-cascade: @echo "Running cascade e2e tests..." - @cd tests/system && go test -tags=system_test -v -run TestCascadeE2E . + @cd tests/system && go mod tidy && go test -tags=system_test -v -run TestCascadeE2E . # Run sn-manager e2e tests only test-sn-manager: @echo "Running sn-manager e2e tests..." - @cd tests/system && go test -tags=system_test -v -run '^TestSNManager' . \ No newline at end of file + @cd tests/system && go test -tags=system_test -v -run '^TestSNManager' . diff --git a/go.mod b/go.mod index b7f53ec1..46091df2 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,6 @@ module github.com/LumeraProtocol/supernode/v2 go 1.24.1 replace ( - github.com/LumeraProtocol/supernode/v2/supernode => ./supernode github.com/bytedance/sonic => github.com/bytedance/sonic v1.14.0 github.com/bytedance/sonic/loader => github.com/bytedance/sonic/loader v0.3.0 ) diff --git a/p2p/kademlia/network.go b/p2p/kademlia/network.go index a2322ff7..935d1583 100644 --- a/p2p/kademlia/network.go +++ b/p2p/kademlia/network.go @@ -2,7 +2,6 @@ package kademlia import ( "context" - "encoding/hex" "fmt" "io" "net" @@ -951,7 +950,7 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, i++ } - values, count, err := s.dht.store.RetrieveBatchValues(ctx, keys, true) + values, count, err := s.dht.store.RetrieveBatchValues(ctx, keys, false) if err != nil { err = errors.Errorf("batch find values: %w", err) s.appendRetrieveEntry(message.Sender.IP, RecentBatchRetrieveEntry{ @@ -976,17 +975,8 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, for i, key := range keys { val := KeyValWithClosest{ - Value: values[i], - } - if len(val.Value) == 0 { - decodedKey, err := hex.DecodeString(keys[i]) - if err != nil { - err = errors.Errorf("batch find vals: decode key: %w - key %s", err, keys[i]) - return s.generateResponseMessage(BatchGetValues, message.Sender, ResultFailed, err.Error()) - } - - nodes, _ := s.dht.ht.closestContacts(Alpha, decodedKey, []*Node{message.Sender}) - val.Closest = nodes.Nodes + Value: values[i], + Closest: make([]*Node, 0), // for compatibility, not used - each node now has full view of the whole network } request.Data[key] = val diff --git a/pkg/codec/codec_mock.go b/pkg/codec/codec_mock.go index 9c3cf864..09484cee 100644 --- a/pkg/codec/codec_mock.go +++ b/pkg/codec/codec_mock.go @@ -1,10 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. // Source: codec.go -// -// Generated by this command: -// -// mockgen -destination=codec_mock.go -package=codec -source=codec.go -// // Package codec is a generated GoMock package. package codec @@ -13,14 +8,13 @@ import ( context "context" reflect "reflect" - gomock "go.uber.org/mock/gomock" + gomock "github.com/golang/mock/gomock" ) // MockCodec is a mock of Codec interface. type MockCodec struct { ctrl *gomock.Controller recorder *MockCodecMockRecorder - isgomock struct{} } // MockCodecMockRecorder is the mock recorder for MockCodec. @@ -50,7 +44,7 @@ func (m *MockCodec) Decode(ctx context.Context, req DecodeRequest) (DecodeRespon } // Decode indicates an expected call of Decode. -func (mr *MockCodecMockRecorder) Decode(ctx, req any) *gomock.Call { +func (mr *MockCodecMockRecorder) Decode(ctx, req interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Decode", reflect.TypeOf((*MockCodec)(nil).Decode), ctx, req) } @@ -65,7 +59,7 @@ func (m *MockCodec) Encode(ctx context.Context, req EncodeRequest) (EncodeRespon } // Encode indicates an expected call of Encode. -func (mr *MockCodecMockRecorder) Encode(ctx, req any) *gomock.Call { +func (mr *MockCodecMockRecorder) Encode(ctx, req interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Encode", reflect.TypeOf((*MockCodec)(nil).Encode), ctx, req) } diff --git a/pkg/codec/decode.go b/pkg/codec/decode.go index fe77d752..bd3b0231 100644 --- a/pkg/codec/decode.go +++ b/pkg/codec/decode.go @@ -5,9 +5,13 @@ import ( "encoding/json" "fmt" "os" + "path" "path/filepath" + "strings" + "sync" raptorq "github.com/LumeraProtocol/rq-go" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" ) @@ -18,81 +22,160 @@ type DecodeRequest struct { } type DecodeResponse struct { - Path string + FilePath string DecodeTmpDir string } -func (rq *raptorQ) Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) { +// Workspace holds paths & reverse index for prepared decoding. +type Workspace struct { + ActionID string + SymbolsDir string // ...// + BlockDirs []string // index = blockID (or 0 if single block) + symbolToBlock map[string]int + mu sync.RWMutex // protects symbolToBlock reads if you expand it later +} + +// PrepareDecode creates the on-disk workspace for decoding and returns: +// - blockPaths[0] => where to write symbols for block 0 (your single-block case) +// - Write(block, id, data) callback that writes symbol bytes directly to disk +// - Cleanup() to remove the workspace on abort (no-op if you want to keep it) +func (rq *raptorQ) PrepareDecode( + ctx context.Context, + actionID string, + layout Layout, +) (blockPaths []string, Write func(block int, symbolID string, data []byte) (string, error), Cleanup func() error, ws *Workspace, err error) { fields := logtrace.Fields{ - logtrace.FieldMethod: "Decode", + logtrace.FieldMethod: "PrepareDecode", logtrace.FieldModule: "rq", - logtrace.FieldActionID: req.ActionID, + logtrace.FieldActionID: actionID, } - logtrace.Info(ctx, "RaptorQ decode request received", fields) - // Use deterministic processor settings (matching encoder) - processor, err := raptorq.NewRaptorQProcessor(rqSymbolSize, rqRedundancyFactor, rqMaxMemoryMB, rqConcurrency) - if err != nil { + // Create root symbols dir for this action + symbolsDir := filepath.Join(rq.symbolsBaseDir, actionID) + if err := os.MkdirAll(symbolsDir, 0o755); err != nil { fields[logtrace.FieldError] = err.Error() - return DecodeResponse{}, fmt.Errorf("create RaptorQ processor: %w", err) + logtrace.Error(ctx, "mkdir symbols base dir failed", fields) + return nil, nil, nil, nil, fmt.Errorf("mkdir %s: %w", symbolsDir, err) } - defer processor.Free() - symbolsDir := filepath.Join(rq.symbolsBaseDir, req.ActionID) - // Ensure a clean scratch directory (avoid contamination from previous attempts) - if err := os.RemoveAll(symbolsDir); err != nil { - fields[logtrace.FieldError] = err.Error() - return DecodeResponse{}, fmt.Errorf("cleanup decode dir %s: %w", symbolsDir, err) + // Ensure block directories exist; build reverse index symbol -> block + maxBlockID := 0 + for _, b := range layout.Blocks { + if int(b.BlockID) > maxBlockID { + maxBlockID = int(b.BlockID) + } } - if err := os.MkdirAll(symbolsDir, 0o755); err != nil { - fields[logtrace.FieldError] = err.Error() - return DecodeResponse{}, fmt.Errorf("mkdir %s: %w", symbolsDir, err) + blockDirs := make([]string, maxBlockID+1) + s2b := make(map[string]int, 1024) + + for _, b := range layout.Blocks { + dir := filepath.Join(symbolsDir, fmt.Sprintf("block_%d", b.BlockID)) + if err := os.MkdirAll(dir, 0o755); err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "mkdir block dir failed", fields) + return nil, nil, nil, nil, fmt.Errorf("mkdir %s: %w", dir, err) + } + blockDirs[b.BlockID] = dir + for _, sym := range b.Symbols { + s2b[sym] = b.BlockID + } } - // Validate layout before writing any symbols - if len(req.Layout.Blocks) == 0 { - fields[logtrace.FieldError] = "empty layout" - return DecodeResponse{}, fmt.Errorf("invalid layout: no blocks present") + ws = &Workspace{ + ActionID: actionID, + SymbolsDir: symbolsDir, + BlockDirs: blockDirs, + symbolToBlock: s2b, } - for _, blk := range req.Layout.Blocks { - if len(blk.Symbols) == 0 { - fields[logtrace.FieldError] = fmt.Sprintf("block_%d has no symbols", blk.BlockID) - return DecodeResponse{}, fmt.Errorf("invalid layout: block %d has no symbols", blk.BlockID) + + // Helper: atomic write (tmp file + rename) to avoid partials on crash + writeFileAtomic := func(path string, data []byte) error { + tmp := path + ".tmp" + if err := os.WriteFile(tmp, data, 0o644); err != nil { + return err } + return os.Rename(tmp, path) } - // Build symbol->block mapping from layout and ensure block directories exist - symbolToBlock := make(map[string]int) - for _, blk := range req.Layout.Blocks { - blockDir := filepath.Join(symbolsDir, fmt.Sprintf("block_%d", blk.BlockID)) - if err := os.MkdirAll(blockDir, 0o755); err != nil { - fields[logtrace.FieldError] = err.Error() - return DecodeResponse{}, fmt.Errorf("mkdir %s: %w", blockDir, err) + // Write callback; if block < 0, resolve via layout reverse index; default to 0. + Write = func(block int, symbolID string, data []byte) (string, error) { + // Quick cancellation check + select { + case <-ctx.Done(): + return "", ctx.Err() + default: } - for _, sym := range blk.Symbols { - symbolToBlock[sym] = blk.BlockID + + // Resolve block if caller passes default + if block < 0 { + ws.mu.RLock() + bid, ok := ws.symbolToBlock[symbolID] + ws.mu.RUnlock() + if !ok { + // Single-block simplification: default to block 0 if layout maps are absent + if len(ws.BlockDirs) == 0 || ws.BlockDirs[0] == "" { + return "", errors.Errorf("no block directories prepared") + } + bid = 0 + } + block = bid } - } - // Write symbols to their respective block directories - for id, data := range req.Symbols { - blkID, ok := symbolToBlock[id] - if !ok { - fields[logtrace.FieldError] = "symbol not present in layout" - return DecodeResponse{}, fmt.Errorf("symbol %s not present in layout", id) + if block < 0 || block >= len(ws.BlockDirs) || ws.BlockDirs[block] == "" { + return "", errors.Errorf("invalid block index %d", block) } - blockDir := filepath.Join(symbolsDir, fmt.Sprintf("block_%d", blkID)) - symbolPath := filepath.Join(blockDir, id) - if err := os.WriteFile(symbolPath, data, 0o644); err != nil { - fields[logtrace.FieldError] = err.Error() - return DecodeResponse{}, fmt.Errorf("write symbol %s: %w", id, err) + + // sanitize symbolID to a basename (prevents traversal) + clean := path.Clean("/" + symbolID) + base := strings.TrimPrefix(clean, "/") + if base == "" || strings.Contains(base, "/") { + return "", errors.Errorf("invalid symbol id %q", symbolID) + + } + + dest := filepath.Join(ws.BlockDirs[block], base) + if err := writeFileAtomic(dest, data); err != nil { + return "", fmt.Errorf("write symbol %s (block %d): %w", base, block, err) } + return dest, nil + } + + Cleanup = func() error { + // Remove the whole workspace directory (symbols + layout + output if any) + return os.RemoveAll(symbolsDir) + } + + logtrace.Info(ctx, "prepare decode workspace created", logtrace.Fields{ + "symbols_dir": symbolsDir, + "blocks": len(blockDirs), + }) + return blockDirs, Write, Cleanup, ws, nil +} + +// DecodeFromPrepared performs RaptorQ decode using an already-prepared workspace. +// It writes layout.json under the workspace, runs decode, and returns output paths. +func (rq *raptorQ) DecodeFromPrepared( + ctx context.Context, + ws *Workspace, + layout Layout, +) (DecodeResponse, error) { + fields := logtrace.Fields{ + logtrace.FieldMethod: "DecodeFromPrepared", + logtrace.FieldModule: "rq", + logtrace.FieldActionID: ws.ActionID, + } + logtrace.Info(ctx, "RaptorQ decode (prepared) requested", fields) + + processor, err := raptorq.NewRaptorQProcessor(rqSymbolSize, rqRedundancyFactor, rqMaxMemoryMB, rqConcurrency) + if err != nil { + fields[logtrace.FieldError] = err.Error() + return DecodeResponse{}, fmt.Errorf("create RaptorQ processor: %w", err) } - logtrace.Info(ctx, "symbols written to block directories", fields) + defer processor.Free() - // ---------- write layout.json ---------- - layoutPath := filepath.Join(symbolsDir, "layout.json") - layoutBytes, err := json.Marshal(req.Layout) + // Write layout.json (idempotent) + layoutPath := filepath.Join(ws.SymbolsDir, "layout.json") + layoutBytes, err := json.Marshal(layout) if err != nil { fields[logtrace.FieldError] = err.Error() return DecodeResponse{}, fmt.Errorf("marshal layout: %w", err) @@ -101,16 +184,74 @@ func (rq *raptorQ) Decode(ctx context.Context, req DecodeRequest) (DecodeRespons fields[logtrace.FieldError] = err.Error() return DecodeResponse{}, fmt.Errorf("write layout file: %w", err) } - logtrace.Info(ctx, "layout.json written", fields) + logtrace.Info(ctx, "layout.json written (prepared)", fields) - // Decode - outputPath := filepath.Join(symbolsDir, "output") - if err := processor.DecodeSymbols(symbolsDir, outputPath, layoutPath); err != nil { + // Decode to output (idempotent-safe: overwrite on success) + outputPath := filepath.Join(ws.SymbolsDir, "output") + if err := processor.DecodeSymbols(ws.SymbolsDir, outputPath, layoutPath); err != nil { fields[logtrace.FieldError] = err.Error() - _ = os.Remove(outputPath) + _ = os.Remove(outputPath) // best-effort cleanup of partial output return DecodeResponse{}, fmt.Errorf("raptorq decode: %w", err) } - logtrace.Info(ctx, "RaptorQ decoding completed successfully", fields) - return DecodeResponse{Path: outputPath, DecodeTmpDir: symbolsDir}, nil + logtrace.Info(ctx, "RaptorQ decoding completed successfully (prepared)", logtrace.Fields{ + "output_path": outputPath, + }) + return DecodeResponse{FilePath: outputPath, DecodeTmpDir: ws.SymbolsDir}, nil +} + +func (rq *raptorQ) Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) { + fields := logtrace.Fields{ + logtrace.FieldMethod: "Decode", + logtrace.FieldModule: "rq", + logtrace.FieldActionID: req.ActionID, + } + logtrace.Info(ctx, "RaptorQ decode request received", fields) + + // 1) Validate layout (the check) + if len(req.Layout.Blocks) == 0 { + fields[logtrace.FieldError] = "empty layout" + return DecodeResponse{}, fmt.Errorf("invalid layout: no blocks present") + } + for _, blk := range req.Layout.Blocks { + if len(blk.Symbols) == 0 { + fields[logtrace.FieldError] = fmt.Sprintf("block_%d has no symbols", blk.BlockID) + return DecodeResponse{}, fmt.Errorf("invalid layout: block %d has no symbols", blk.BlockID) + } + } + + // 2) Prepare workspace (functionality) + _, Write, Cleanup, ws, err := rq.PrepareDecode(ctx, req.ActionID, req.Layout) + if err != nil { + fields[logtrace.FieldError] = err.Error() + return DecodeResponse{}, fmt.Errorf("prepare decode workspace: %w", err) + } + + // Ensure workspace cleanup on failure. On success, caller cleans up via returned path. + success := false + defer func() { + if !success && Cleanup != nil { + _ = Cleanup() + } + }() + + // 3) Persist provided in-memory symbols via Write (functionality) + if len(req.Symbols) > 0 { + for id, data := range req.Symbols { + if _, werr := Write(-1, id, data); werr != nil { + fields[logtrace.FieldError] = werr.Error() + return DecodeResponse{}, werr + } + } + logtrace.Info(ctx, "symbols persisted via Write()", fields) + } + + // 4) Decode using the prepared workspace (functionality) + resp, derr := rq.DecodeFromPrepared(ctx, ws, req.Layout) + if derr != nil { + fields[logtrace.FieldError] = derr.Error() + return DecodeResponse{}, derr + } + success = true + return resp, nil } diff --git a/pkg/net/grpc/client/client.go b/pkg/net/grpc/client/client.go index dc4f45de..907c5b58 100644 --- a/pkg/net/grpc/client/client.go +++ b/pkg/net/grpc/client/client.go @@ -112,9 +112,9 @@ var defaultBackoffConfig = backoff.Config{ func DefaultClientOptions() *ClientOptions { return &ClientOptions{ MaxRecvMsgSize: 100 * MB, - MaxSendMsgSize: 100 * MB, // 100MB - InitialWindowSize: (int32)(1 * MB), // 1MB - controls initial frame size for streams - InitialConnWindowSize: (int32)(1 * MB), // 1MB - controls initial frame size for connection + MaxSendMsgSize: 100 * MB, // 100MB + InitialWindowSize: (int32)(32 * MB), // 32MB - controls initial frame size for streams + InitialConnWindowSize: (int32)(128 * MB), // 128MB - controls initial frame size for connection ConnWaitTime: defaultConnWaitTime, KeepAliveTime: 30 * time.Minute, diff --git a/pkg/net/grpc/client/client_test.go b/pkg/net/grpc/client/client_test.go index 12b196fa..0b4a10e1 100644 --- a/pkg/net/grpc/client/client_test.go +++ b/pkg/net/grpc/client/client_test.go @@ -86,8 +86,8 @@ func TestDefaultClientOptions(t *testing.T) { assert.NotNil(t, opts, "ClientOptions should be initialized") assert.Equal(t, 100*MB, opts.MaxRecvMsgSize, "MaxRecvMsgSize should be 100 MB") assert.Equal(t, 100*MB, opts.MaxSendMsgSize, "MaxSendMsgSize should be 100 MB") - assert.Equal(t, int32(1*MB), opts.InitialWindowSize, "InitialWindowSize should be 1 MB") - assert.Equal(t, int32(1*MB), opts.InitialConnWindowSize, "InitialConnWindowSize should be 1 MB") + assert.Equal(t, int32(32*MB), opts.InitialWindowSize, "InitialWindowSize should be 32 MB") + assert.Equal(t, int32(128*MB), opts.InitialConnWindowSize, "InitialConnWindowSize should be 128 MB") assert.Equal(t, defaultConnWaitTime, opts.ConnWaitTime, "ConnWaitTime should be 10 seconds") assert.True(t, opts.EnableRetries, "EnableRetries should be true") assert.Equal(t, maxRetries, opts.MaxRetries, "MaxRetries should be 5") diff --git a/pkg/net/grpc/server/server.go b/pkg/net/grpc/server/server.go index 10a2452c..64dfe0f2 100644 --- a/pkg/net/grpc/server/server.go +++ b/pkg/net/grpc/server/server.go @@ -94,7 +94,7 @@ func DefaultServerOptions() *ServerOptions { return &ServerOptions{ MaxRecvMsgSize: 100 * MB, MaxSendMsgSize: 100 * MB, - InitialWindowSize: (int32)(1 * MB), + InitialWindowSize: (int32)(32 * MB), InitialConnWindowSize: (int32)(1 * MB), MaxConcurrentStreams: 1000, GracefulShutdownTime: defaultGracefulShutdownTimeout, @@ -102,13 +102,13 @@ func DefaultServerOptions() *ServerOptions { MaxConnectionIdle: 2 * time.Hour, MaxConnectionAge: 2 * time.Hour, MaxConnectionAgeGrace: 1 * time.Hour, - Time: 1 * time.Hour, + Time: 30 * time.Minute, Timeout: 30 * time.Minute, MinTime: 5 * time.Minute, PermitWithoutStream: true, - WriteBufferSize: 32 * KB, - ReadBufferSize: 32 * KB, + WriteBufferSize: 512 * KB, + ReadBufferSize: 512 * KB, } } diff --git a/pkg/net/grpc/server/server_test.go b/pkg/net/grpc/server/server_test.go index 4c6067e0..f2308436 100644 --- a/pkg/net/grpc/server/server_test.go +++ b/pkg/net/grpc/server/server_test.go @@ -55,18 +55,18 @@ func TestDefaultServerOptions(t *testing.T) { assert.NotNil(t, opts, "Server options should be initialized") assert.Equal(t, 100*MB, opts.MaxRecvMsgSize, "MaxRecvMsgSize should be 100 MB") assert.Equal(t, 100*MB, opts.MaxSendMsgSize, "MaxSendMsgSize should be 100 MB") - assert.Equal(t, int32(1*MB), opts.InitialWindowSize, "InitialWindowSize should be 1 MB") + assert.Equal(t, int32(32*MB), opts.InitialWindowSize, "InitialWindowSize should be 32 MB") assert.Equal(t, int32(1*MB), opts.InitialConnWindowSize, "InitialConnWindowSize should be 1 MB") assert.Equal(t, uint32(1000), opts.MaxConcurrentStreams, "MaxConcurrentStreams should be 1000") assert.Equal(t, defaultGracefulShutdownTimeout, opts.GracefulShutdownTime, fmt.Sprintf("GracefulShutdownTimeout should be %v", defaultGracefulShutdownTimeout)) assert.Equal(t, uint32(0), opts.NumServerWorkers, "NumServerWorkers should be 0") - assert.Equal(t, 32*KB, opts.WriteBufferSize, "WriteBufferSize should be 32 KB") - assert.Equal(t, 32*KB, opts.ReadBufferSize, "ReadBufferSize should be 32 KB") + assert.Equal(t, 512*KB, opts.WriteBufferSize, "WriteBufferSize should be 512 KB") + assert.Equal(t, 512*KB, opts.ReadBufferSize, "ReadBufferSize should be 512 KB") assert.Equal(t, 2*time.Hour, opts.MaxConnectionIdle, "MaxConnectionIdle should be 2 hours") assert.Equal(t, 2*time.Hour, opts.MaxConnectionAge, "MaxConnectionAge should be 2 hours") assert.Equal(t, 1*time.Hour, opts.MaxConnectionAgeGrace, "MaxConnectionAgeGrace should be 1 hour") - assert.Equal(t, 1*time.Hour, opts.Time, "Time should be 1 hour") + assert.Equal(t, 30*time.Minute, opts.Time, "Time should be 30 minutes") assert.Equal(t, 30*time.Minute, opts.Timeout, "Timeout should be 30 minutes") assert.Equal(t, 5*time.Minute, opts.MinTime, "MinTime should be 5 minutes") assert.True(t, opts.PermitWithoutStream, "PermitWithoutStream should be true") diff --git a/supernode/node/action/server/cascade/cascade_action_server_test.go b/supernode/node/action/server/cascade/cascade_action_server_test.go index eca121d8..ff2738b3 100644 --- a/supernode/node/action/server/cascade/cascade_action_server_test.go +++ b/supernode/node/action/server/cascade/cascade_action_server_test.go @@ -10,7 +10,7 @@ import ( cascademocks "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/mocks" "github.com/stretchr/testify/assert" - "go.uber.org/mock/gomock" + "github.com/golang/mock/gomock" ) func TestRegister_Success(t *testing.T) { diff --git a/supernode/services/cascade/adaptors/mocks/lumera_mock.go b/supernode/services/cascade/adaptors/mocks/lumera_mock.go index 15a6c901..29cdd48f 100644 --- a/supernode/services/cascade/adaptors/mocks/lumera_mock.go +++ b/supernode/services/cascade/adaptors/mocks/lumera_mock.go @@ -1,10 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. // Source: lumera.go -// -// Generated by this command: -// -// mockgen -destination=mocks/lumera_mock.go -package=cascadeadaptormocks -source=lumera.go -// // Package cascadeadaptormocks is a generated GoMock package. package cascadeadaptormocks @@ -16,14 +11,13 @@ import ( types "github.com/LumeraProtocol/lumera/x/action/v1/types" types0 "github.com/LumeraProtocol/lumera/x/supernode/v1/types" tx "github.com/cosmos/cosmos-sdk/types/tx" - gomock "go.uber.org/mock/gomock" + gomock "github.com/golang/mock/gomock" ) // MockLumeraClient is a mock of LumeraClient interface. type MockLumeraClient struct { ctrl *gomock.Controller recorder *MockLumeraClientMockRecorder - isgomock struct{} } // MockLumeraClientMockRecorder is the mock recorder for MockLumeraClient. @@ -53,26 +47,11 @@ func (m *MockLumeraClient) FinalizeAction(ctx context.Context, actionID string, } // FinalizeAction indicates an expected call of FinalizeAction. -func (mr *MockLumeraClientMockRecorder) FinalizeAction(ctx, actionID, rqids any) *gomock.Call { +func (mr *MockLumeraClientMockRecorder) FinalizeAction(ctx, actionID, rqids interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinalizeAction", reflect.TypeOf((*MockLumeraClient)(nil).FinalizeAction), ctx, actionID, rqids) } -// SimulateFinalizeAction mocks base method. -func (m *MockLumeraClient) SimulateFinalizeAction(ctx context.Context, actionID string, rqids []string) (*tx.SimulateResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SimulateFinalizeAction", ctx, actionID, rqids) - ret0, _ := ret[0].(*tx.SimulateResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SimulateFinalizeAction indicates an expected call of SimulateFinalizeAction. -func (mr *MockLumeraClientMockRecorder) SimulateFinalizeAction(ctx, actionID, rqids any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimulateFinalizeAction", reflect.TypeOf((*MockLumeraClient)(nil).SimulateFinalizeAction), ctx, actionID, rqids) -} - // GetAction mocks base method. func (m *MockLumeraClient) GetAction(ctx context.Context, actionID string) (*types.QueryGetActionResponse, error) { m.ctrl.T.Helper() @@ -83,7 +62,7 @@ func (m *MockLumeraClient) GetAction(ctx context.Context, actionID string) (*typ } // GetAction indicates an expected call of GetAction. -func (mr *MockLumeraClientMockRecorder) GetAction(ctx, actionID any) *gomock.Call { +func (mr *MockLumeraClientMockRecorder) GetAction(ctx, actionID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAction", reflect.TypeOf((*MockLumeraClient)(nil).GetAction), ctx, actionID) } @@ -98,7 +77,7 @@ func (m *MockLumeraClient) GetActionFee(ctx context.Context, dataSize string) (* } // GetActionFee indicates an expected call of GetActionFee. -func (mr *MockLumeraClientMockRecorder) GetActionFee(ctx, dataSize any) *gomock.Call { +func (mr *MockLumeraClientMockRecorder) GetActionFee(ctx, dataSize interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActionFee", reflect.TypeOf((*MockLumeraClient)(nil).GetActionFee), ctx, dataSize) } @@ -113,11 +92,26 @@ func (m *MockLumeraClient) GetTopSupernodes(ctx context.Context, height uint64) } // GetTopSupernodes indicates an expected call of GetTopSupernodes. -func (mr *MockLumeraClientMockRecorder) GetTopSupernodes(ctx, height any) *gomock.Call { +func (mr *MockLumeraClientMockRecorder) GetTopSupernodes(ctx, height interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTopSupernodes", reflect.TypeOf((*MockLumeraClient)(nil).GetTopSupernodes), ctx, height) } +// SimulateFinalizeAction mocks base method. +func (m *MockLumeraClient) SimulateFinalizeAction(ctx context.Context, actionID string, rqids []string) (*tx.SimulateResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SimulateFinalizeAction", ctx, actionID, rqids) + ret0, _ := ret[0].(*tx.SimulateResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SimulateFinalizeAction indicates an expected call of SimulateFinalizeAction. +func (mr *MockLumeraClientMockRecorder) SimulateFinalizeAction(ctx, actionID, rqids interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimulateFinalizeAction", reflect.TypeOf((*MockLumeraClient)(nil).SimulateFinalizeAction), ctx, actionID, rqids) +} + // Verify mocks base method. func (m *MockLumeraClient) Verify(ctx context.Context, creator string, file, sigBytes []byte) error { m.ctrl.T.Helper() @@ -127,7 +121,7 @@ func (m *MockLumeraClient) Verify(ctx context.Context, creator string, file, sig } // Verify indicates an expected call of Verify. -func (mr *MockLumeraClientMockRecorder) Verify(ctx, creator, file, sigBytes any) *gomock.Call { +func (mr *MockLumeraClientMockRecorder) Verify(ctx, creator, file, sigBytes interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockLumeraClient)(nil).Verify), ctx, creator, file, sigBytes) } diff --git a/supernode/services/cascade/adaptors/mocks/p2p_mock.go b/supernode/services/cascade/adaptors/mocks/p2p_mock.go index 4f62a440..ec99d92a 100644 --- a/supernode/services/cascade/adaptors/mocks/p2p_mock.go +++ b/supernode/services/cascade/adaptors/mocks/p2p_mock.go @@ -1,10 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. // Source: p2p.go -// -// Generated by this command: -// -// mockgen -destination=mocks/p2p_mock.go -package=cascadeadaptormocks -source=p2p.go -// // Package cascadeadaptormocks is a generated GoMock package. package cascadeadaptormocks @@ -15,14 +10,13 @@ import ( logtrace "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" adaptors "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - gomock "go.uber.org/mock/gomock" + gomock "github.com/golang/mock/gomock" ) // MockP2PService is a mock of P2PService interface. type MockP2PService struct { ctrl *gomock.Controller recorder *MockP2PServiceMockRecorder - isgomock struct{} } // MockP2PServiceMockRecorder is the mock recorder for MockP2PService. @@ -51,7 +45,7 @@ func (m *MockP2PService) StoreArtefacts(ctx context.Context, req adaptors.StoreA } // StoreArtefacts indicates an expected call of StoreArtefacts. -func (mr *MockP2PServiceMockRecorder) StoreArtefacts(ctx, req, f any) *gomock.Call { +func (mr *MockP2PServiceMockRecorder) StoreArtefacts(ctx, req, f interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreArtefacts", reflect.TypeOf((*MockP2PService)(nil).StoreArtefacts), ctx, req, f) } diff --git a/supernode/services/cascade/adaptors/mocks/rq_mock.go b/supernode/services/cascade/adaptors/mocks/rq_mock.go index 4c53c1dd..f45f2eb5 100644 --- a/supernode/services/cascade/adaptors/mocks/rq_mock.go +++ b/supernode/services/cascade/adaptors/mocks/rq_mock.go @@ -1,10 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. // Source: rq.go -// -// Generated by this command: -// -// mockgen -destination=mocks/rq_mock.go -package=cascadeadaptormocks -source=rq.go -// // Package cascadeadaptormocks is a generated GoMock package. package cascadeadaptormocks @@ -13,15 +8,15 @@ import ( context "context" reflect "reflect" + codec "github.com/LumeraProtocol/supernode/v2/pkg/codec" adaptors "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - gomock "go.uber.org/mock/gomock" + gomock "github.com/golang/mock/gomock" ) // MockCodecService is a mock of CodecService interface. type MockCodecService struct { ctrl *gomock.Controller recorder *MockCodecServiceMockRecorder - isgomock struct{} } // MockCodecServiceMockRecorder is the mock recorder for MockCodecService. @@ -51,7 +46,7 @@ func (m *MockCodecService) Decode(ctx context.Context, req adaptors.DecodeReques } // Decode indicates an expected call of Decode. -func (mr *MockCodecServiceMockRecorder) Decode(ctx, req any) *gomock.Call { +func (mr *MockCodecServiceMockRecorder) Decode(ctx, req interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Decode", reflect.TypeOf((*MockCodecService)(nil).Decode), ctx, req) } @@ -66,7 +61,25 @@ func (m *MockCodecService) EncodeInput(ctx context.Context, taskID, path string, } // EncodeInput indicates an expected call of EncodeInput. -func (mr *MockCodecServiceMockRecorder) EncodeInput(ctx, taskID, path, dataSize any) *gomock.Call { +func (mr *MockCodecServiceMockRecorder) EncodeInput(ctx, taskID, path, dataSize interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EncodeInput", reflect.TypeOf((*MockCodecService)(nil).EncodeInput), ctx, taskID, path, dataSize) } + +// PrepareDecode mocks base method. +func (m *MockCodecService) PrepareDecode(ctx context.Context, actionID string, layout codec.Layout) ([]string, func(int, string, []byte) (string, error), func() error, *codec.Workspace, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PrepareDecode", ctx, actionID, layout) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(func(int, string, []byte) (string, error)) + ret2, _ := ret[2].(func() error) + ret3, _ := ret[3].(*codec.Workspace) + ret4, _ := ret[4].(error) + return ret0, ret1, ret2, ret3, ret4 +} + +// PrepareDecode indicates an expected call of PrepareDecode. +func (mr *MockCodecServiceMockRecorder) PrepareDecode(ctx, actionID, layout interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareDecode", reflect.TypeOf((*MockCodecService)(nil).PrepareDecode), ctx, actionID, layout) +} diff --git a/supernode/services/cascade/adaptors/rq.go b/supernode/services/cascade/adaptors/rq.go index 5f4443cf..92e89819 100644 --- a/supernode/services/cascade/adaptors/rq.go +++ b/supernode/services/cascade/adaptors/rq.go @@ -11,6 +11,7 @@ import ( //go:generate mockgen -destination=mocks/rq_mock.go -package=cascadeadaptormocks -source=rq.go type CodecService interface { EncodeInput(ctx context.Context, taskID string, path string, dataSize int) (EncodeResult, error) + PrepareDecode(ctx context.Context, actionID string, layout codec.Layout) (blockPaths []string, Write func(block int, symbolID string, data []byte) (string, error), Cleanup func() error, ws *codec.Workspace, err error) Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) } @@ -70,7 +71,11 @@ func (c *codecImpl) Decode(ctx context.Context, req DecodeRequest) (DecodeRespon } return DecodeResponse{ - FilePath: resp.Path, + FilePath: resp.FilePath, DecodeTmpDir: resp.DecodeTmpDir, }, nil } + +func (c *codecImpl) PrepareDecode(ctx context.Context, actionID string, layout codec.Layout) (blockPaths []string, Write func(block int, symbolID string, data []byte) (string, error), Cleanup func() error, ws *codec.Workspace, err error) { + return +} diff --git a/supernode/services/cascade/mocks/cascade_interfaces_mock.go b/supernode/services/cascade/mocks/cascade_interfaces_mock.go index 497497c3..44d3189c 100644 --- a/supernode/services/cascade/mocks/cascade_interfaces_mock.go +++ b/supernode/services/cascade/mocks/cascade_interfaces_mock.go @@ -1,10 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. // Source: interfaces.go -// -// Generated by this command: -// -// mockgen -destination=mocks/cascade_interfaces_mock.go -package=cascademocks -source=interfaces.go -// // Package cascademocks is a generated GoMock package. package cascademocks @@ -14,14 +9,13 @@ import ( reflect "reflect" cascade "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade" - gomock "go.uber.org/mock/gomock" + gomock "github.com/golang/mock/gomock" ) // MockCascadeServiceFactory is a mock of CascadeServiceFactory interface. type MockCascadeServiceFactory struct { ctrl *gomock.Controller recorder *MockCascadeServiceFactoryMockRecorder - isgomock struct{} } // MockCascadeServiceFactoryMockRecorder is the mock recorder for MockCascadeServiceFactory. @@ -59,7 +53,6 @@ func (mr *MockCascadeServiceFactoryMockRecorder) NewCascadeRegistrationTask() *g type MockCascadeTask struct { ctrl *gomock.Controller recorder *MockCascadeTaskMockRecorder - isgomock struct{} } // MockCascadeTaskMockRecorder is the mock recorder for MockCascadeTask. @@ -88,7 +81,7 @@ func (m *MockCascadeTask) CleanupDownload(ctx context.Context, actionID string) } // CleanupDownload indicates an expected call of CleanupDownload. -func (mr *MockCascadeTaskMockRecorder) CleanupDownload(ctx, actionID any) *gomock.Call { +func (mr *MockCascadeTaskMockRecorder) CleanupDownload(ctx, actionID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupDownload", reflect.TypeOf((*MockCascadeTask)(nil).CleanupDownload), ctx, actionID) } @@ -102,7 +95,7 @@ func (m *MockCascadeTask) Download(ctx context.Context, req *cascade.DownloadReq } // Download indicates an expected call of Download. -func (mr *MockCascadeTaskMockRecorder) Download(ctx, req, send any) *gomock.Call { +func (mr *MockCascadeTaskMockRecorder) Download(ctx, req, send interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockCascadeTask)(nil).Download), ctx, req, send) } @@ -116,7 +109,7 @@ func (m *MockCascadeTask) Register(ctx context.Context, req *cascade.RegisterReq } // Register indicates an expected call of Register. -func (mr *MockCascadeTaskMockRecorder) Register(ctx, req, send any) *gomock.Call { +func (mr *MockCascadeTaskMockRecorder) Register(ctx, req, send interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Register", reflect.TypeOf((*MockCascadeTask)(nil).Register), ctx, req, send) } diff --git a/supernode/services/cascade/register_test.go b/supernode/services/cascade/register_test.go index c73b96b7..6f56791a 100644 --- a/supernode/services/cascade/register_test.go +++ b/supernode/services/cascade/register_test.go @@ -21,8 +21,8 @@ import ( "github.com/cosmos/gogoproto/proto" "lukechampine.com/blake3" + "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" - "go.uber.org/mock/gomock" ) func TestCascadeRegistrationTask_Register(t *testing.T) { @@ -104,10 +104,10 @@ func TestCascadeRegistrationTask_Register(t *testing.T) { Metadata: codecpkg.Layout{Blocks: []codecpkg.Block{{BlockID: 1, Hash: "abc"}}}, }, nil) - // 8. Store artefacts (no metrics returned; recorded centrally) - p2p.EXPECT(). - StoreArtefacts(gomock.Any(), gomock.Any(), gomock.Any()). - Return(nil) + // 8. Store artefacts (no metrics returned; recorded centrally) + p2p.EXPECT(). + StoreArtefacts(gomock.Any(), gomock.Any(), gomock.Any()). + Return(nil) }, expectedError: "", expectedEvents: 12, diff --git a/supernode/services/cascade/service_test.go b/supernode/services/cascade/service_test.go index eaa7bf7f..bc2998ad 100644 --- a/supernode/services/cascade/service_test.go +++ b/supernode/services/cascade/service_test.go @@ -8,8 +8,8 @@ import ( "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade" cascadeadaptormocks "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors/mocks" "github.com/LumeraProtocol/supernode/v2/supernode/services/common" + "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" - "go.uber.org/mock/gomock" ) func TestNewCascadeService(t *testing.T) { diff --git a/tests/system/go.mod b/tests/system/go.mod index 15b8212d..e6eb3bba 100644 --- a/tests/system/go.mod +++ b/tests/system/go.mod @@ -95,6 +95,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.2.4 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/mock v1.6.0 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/btree v1.1.3 // indirect github.com/google/flatbuffers v1.12.1 // indirect diff --git a/tests/system/go.sum b/tests/system/go.sum index 9ff0158a..6e9c0112 100644 --- a/tests/system/go.sum +++ b/tests/system/go.sum @@ -803,6 +803,7 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= @@ -888,6 +889,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -933,6 +935,7 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1025,6 +1028,7 @@ golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 74fb97edfb96133285a2552defb024bd2f2d6a31 Mon Sep 17 00:00:00 2001 From: j-rafique Date: Wed, 24 Sep 2025 17:31:22 +0500 Subject: [PATCH 06/27] Change test port --- supernode/cmd/start.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index 31c19b2a..9c186d1a 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -146,7 +146,7 @@ The supernode will connect to the Lumera network and begin participating in the isTestnet := strings.Contains(strings.ToLower(appConfig.LumeraClientConfig.ChainID), "testnet") if isTestnet { - profilingAddr := "0.0.0.0:6060" + profilingAddr := "0.0.0.0:6062" logtrace.Info(ctx, "Starting profiling server", logtrace.Fields{ "address": profilingAddr, From cf7f877fe0ceeb68fbcdab70ada6c60f8101ebeb Mon Sep 17 00:00:00 2001 From: Matee ullah Malik <46045452+mateeullahmalik@users.noreply.github.com> Date: Wed, 24 Sep 2025 17:53:31 +0500 Subject: [PATCH 07/27] Async events (#190) --- sdk/adapters/supernodeservice/adapter.go | 32 ++++++++- sdk/net/factory.go | 7 +- sdk/task/download.go | 66 +++---------------- .../server/cascade/cascade_action_server.go | 32 +++++++-- 4 files changed, 71 insertions(+), 66 deletions(-) diff --git a/sdk/adapters/supernodeservice/adapter.go b/sdk/adapters/supernodeservice/adapter.go index f9e9e6da..0d326a17 100644 --- a/sdk/adapters/supernodeservice/adapter.go +++ b/sdk/adapters/supernodeservice/adapter.go @@ -446,6 +446,7 @@ func (a *cascadeAdapter) CascadeSupernodeDownload( bytesWritten int64 chunkIndex int startedEmitted bool + downloadStart time.Time ) // 3. Receive streamed responses @@ -509,7 +510,11 @@ func (a *cascadeAdapter) CascadeSupernodeDownload( } } } - in.EventLogger(ctx, toSdkEvent(x.Event.EventType), x.Event.Message, edata) + // Avoid blocking Recv loop on event handling; dispatch asynchronously + evtType := toSdkEvent(x.Event.EventType) + go func(ed event.EventData, et event.EventType, msg string) { + in.EventLogger(ctx, et, msg, ed) + }(edata, evtType, x.Event.Message) } // 3b. Actual data chunk @@ -520,7 +525,10 @@ func (a *cascadeAdapter) CascadeSupernodeDownload( } if !startedEmitted { if in.EventLogger != nil { - in.EventLogger(ctx, event.SDKDownloadStarted, "Download started", event.EventData{event.KeyActionID: in.ActionID}) + // mark start to compute throughput at completion + downloadStart = time.Now() + // Emit started asynchronously to avoid blocking + go in.EventLogger(ctx, event.SDKDownloadStarted, "Download started", event.EventData{event.KeyActionID: in.ActionID}) } startedEmitted = true } @@ -538,7 +546,25 @@ func (a *cascadeAdapter) CascadeSupernodeDownload( a.logger.Info(ctx, "download complete", "bytes_written", bytesWritten, "path", in.OutputPath, "action_id", in.ActionID) if in.EventLogger != nil { - in.EventLogger(ctx, event.SDKDownloadCompleted, "Download completed", event.EventData{event.KeyActionID: in.ActionID, event.KeyOutputPath: in.OutputPath}) + // Compute metrics if we marked a start + var elapsed float64 + var throughput float64 + if !downloadStart.IsZero() { + elapsed = time.Since(downloadStart).Seconds() + mb := float64(bytesWritten) / (1024.0 * 1024.0) + if elapsed > 0 { + throughput = mb / elapsed + } + } + // Emit completion asynchronously with metrics + go in.EventLogger(ctx, event.SDKDownloadCompleted, "Download completed", event.EventData{ + event.KeyActionID: in.ActionID, + event.KeyOutputPath: in.OutputPath, + event.KeyBytesTotal: bytesWritten, + event.KeyChunks: chunkIndex, + event.KeyElapsedSeconds: elapsed, + event.KeyThroughputMBS: throughput, + }) } return &CascadeSupernodeDownloadResponse{ Success: true, diff --git a/sdk/net/factory.go b/sdk/net/factory.go index b9fad9fd..f3486780 100644 --- a/sdk/net/factory.go +++ b/sdk/net/factory.go @@ -39,9 +39,10 @@ func NewClientFactory(ctx context.Context, logger log.Logger, keyring keyring.Ke // Tuned for 1GB max files with 4MB chunks // Reduce in-flight memory by aligning windows and msg sizes to chunk size. opts := client.DefaultClientOptions() - opts.MaxRecvMsgSize = 8 * 1024 * 1024 // 8MB: supports 4MB chunks + overhead - opts.MaxSendMsgSize = 8 * 1024 * 1024 // 8MB: supports 4MB chunks + overhead - opts.InitialWindowSize = 4 * 1024 * 1024 // 4MB per-stream window ≈ chunk size + opts.MaxRecvMsgSize = 12 * 1024 * 1024 // 8MB: supports 4MB chunks + overhead + opts.MaxSendMsgSize = 12 * 1024 * 1024 // 8MB: supports 4MB chunks + overhead + // Increase per-stream window to provide headroom for first data chunk + events + opts.InitialWindowSize = 12 * 1024 * 1024 // 8MB per-stream window opts.InitialConnWindowSize = 64 * 1024 * 1024 // 64MB per-connection window return &ClientFactory{ diff --git a/sdk/task/download.go b/sdk/task/download.go index 3e85007a..2c727ae9 100644 --- a/sdk/task/download.go +++ b/sdk/task/download.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "os" - "sort" "time" "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" @@ -77,51 +76,6 @@ func (t *CascadeDownloadTask) downloadFromSupernodes(ctx context.Context, supern } } - // Optionally rank supernodes by available memory to improve success for large files - // We keep a short timeout per status fetch to avoid delaying downloads. - type rankedSN struct { - sn lumera.Supernode - availGB float64 - hasStatus bool - } - ranked := make([]rankedSN, 0, len(supernodes)) - for _, sn := range supernodes { - ranked = append(ranked, rankedSN{sn: sn}) - } - - // Probe supernode status with short timeouts and close clients promptly - for i := range ranked { - sn := ranked[i].sn - // 2s status timeout to keep this pass fast - stx, cancel := context.WithTimeout(ctx, 2*time.Second) - client, err := clientFactory.CreateClient(stx, sn) - if err != nil { - cancel() - continue - } - status, err := client.GetSupernodeStatus(stx) - _ = client.Close(stx) - cancel() - if err != nil { - continue - } - ranked[i].hasStatus = true - ranked[i].availGB = status.Resources.Memory.AvailableGB - } - - // Sort: nodes with status first, higher available memory first - sort.Slice(ranked, func(i, j int) bool { - if ranked[i].hasStatus != ranked[j].hasStatus { - return ranked[i].hasStatus && !ranked[j].hasStatus - } - return ranked[i].availGB > ranked[j].availGB - }) - - // Rebuild the supernodes list in the sorted order - for i := range ranked { - supernodes[i] = ranked[i].sn - } - // Try supernodes sequentially, one by one (now sorted) var lastErr error for idx, sn := range supernodes { @@ -146,8 +100,8 @@ func (t *CascadeDownloadTask) downloadFromSupernodes(ctx context.Context, supern continue } - // Success; return to caller - return nil + // Success; return to caller + return nil } if lastErr != nil { @@ -176,15 +130,15 @@ func (t *CascadeDownloadTask) attemptDownload( t.LogEvent(ctx, evt, msg, data) } - resp, err := client.Download(ctx, req) - if err != nil { - return fmt.Errorf("download from %s: %w", sn.CosmosAddress, err) - } - if !resp.Success { - return fmt.Errorf("download rejected by %s: %s", sn.CosmosAddress, resp.Message) - } + resp, err := client.Download(ctx, req) + if err != nil { + return fmt.Errorf("download from %s: %w", sn.CosmosAddress, err) + } + if !resp.Success { + return fmt.Errorf("download rejected by %s: %s", sn.CosmosAddress, resp.Message) + } - return nil + return nil } // downloadResult holds the result of a successful download attempt diff --git a/supernode/node/action/server/cascade/cascade_action_server.go b/supernode/node/action/server/cascade/cascade_action_server.go index a99fbf0a..449f4c42 100644 --- a/supernode/node/action/server/cascade/cascade_action_server.go +++ b/supernode/node/action/server/cascade/cascade_action_server.go @@ -313,7 +313,14 @@ func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeS "chunk_size": chunkSize, }) - // Announce: file is ready to be served to the client + // Pre-read first chunk to avoid any delay between SERVE_READY and first data + buf := make([]byte, chunkSize) + n, readErr := f.Read(buf) + if readErr != nil && readErr != io.EOF { + return fmt.Errorf("chunked read failed: %w", readErr) + } + + // Announce: file is ready to be served to the client (right before first data) if err := stream.Send(&pb.DownloadResponse{ ResponseType: &pb.DownloadResponse_Event{ Event: &pb.DownloadEvent{ @@ -326,10 +333,27 @@ func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeS return err } - // Stream the file in fixed-size chunks - buf := make([]byte, chunkSize) + // Send pre-read first chunk if available + if n > 0 { + if err := stream.Send(&pb.DownloadResponse{ + ResponseType: &pb.DownloadResponse_Chunk{ + Chunk: &pb.DataChunk{Data: buf[:n]}, + }, + }); err != nil { + logtrace.Error(ctx, "failed to stream first chunk", logtrace.Fields{logtrace.FieldError: err.Error()}) + return err + } + } + + // If EOF after first read, we're done + if readErr == io.EOF { + logtrace.Info(ctx, "completed streaming all chunks", fields) + return nil + } + + // Continue streaming remaining chunks for { - n, readErr := f.Read(buf) + n, readErr = f.Read(buf) if n > 0 { if err := stream.Send(&pb.DownloadResponse{ ResponseType: &pb.DownloadResponse_Chunk{ From 1f524bf39752989de190659c25116ef129a52bd5 Mon Sep 17 00:00:00 2001 From: j-rafique Date: Wed, 24 Sep 2025 18:37:05 +0500 Subject: [PATCH 08/27] Add profile script --- profile_cascade.sh | 98 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) create mode 100755 profile_cascade.sh diff --git a/profile_cascade.sh b/profile_cascade.sh new file mode 100755 index 00000000..7fe0af5e --- /dev/null +++ b/profile_cascade.sh @@ -0,0 +1,98 @@ +#!/bin/bash + +# Cascade Download Heap Profiling Script +# Samples heap every 30 seconds during cascade downloads + +# Configuration - modify these as needed +PROFILE_URL="http://localhost:6062/debug/pprof/heap" +INTERVAL=30 +TIMESTAMP=$(date +%Y%m%d_%H%M%S) +PROFILE_DIR="profiles_${TIMESTAMP}" + +# Allow override via command line +if [ "$1" != "" ]; then + PROFILE_URL="$1" +fi + +echo "=== Cascade Heap Profiling ===" +echo "Profile URL: $PROFILE_URL" +echo "Interval: ${INTERVAL}s" +echo "Output Dir: $PROFILE_DIR" +echo + +# Create profile directory +mkdir -p "$PROFILE_DIR" +cd "$PROFILE_DIR" + +# Test connection first +echo "Testing connection to profiling server..." +if ! curl -s --fail "$PROFILE_URL" > /dev/null; then + echo "ERROR: Cannot connect to profiling server at $PROFILE_URL" + echo "Make sure your supernode is running on testnet!" + exit 1 +fi + +echo "✓ Connected to profiling server" +echo + +# Take baseline +echo "Taking baseline heap snapshot..." +curl -s -o "heap_00s.prof" "$PROFILE_URL" +echo "✓ Baseline saved: heap_00s.prof" +echo + +echo "*** NOW START YOUR CASCADE DOWNLOAD ***" +echo "Press ENTER when download has started..." +read + +echo "Starting heap profiling every ${INTERVAL}s..." +echo "Press Ctrl+C to stop" +echo + +# Counter for snapshots +counter=1 + +# Function to handle cleanup on exit +cleanup() { + echo + echo "Profiling stopped. Taking final snapshot..." + final_elapsed=$((counter * INTERVAL)) + curl -s -o "heap_${final_elapsed}s_final.prof" "$PROFILE_URL" + + echo + echo "=== Profiling Complete ===" + echo "Location: $(pwd)" + echo "Files created:" + ls -la *.prof + echo + echo "Analysis commands:" + echo "# Compare baseline to final:" + echo "go tool pprof -http=:8080 -base heap_00s.prof heap_${final_elapsed}s_final.prof" + exit 0 +} + +# Set up signal handler +trap cleanup INT TERM + +# Main profiling loop +while true; do + sleep $INTERVAL + + elapsed=$((counter * INTERVAL)) + minutes=$((elapsed / 60)) + seconds=$((elapsed % 60)) + + timestamp=$(date +%H:%M:%S) + filename="heap_${elapsed}s.prof" + + echo "[$timestamp] Taking snapshot $counter (${minutes}m ${seconds}s elapsed)..." + + if curl -s -o "$filename" "$PROFILE_URL"; then + size=$(ls -lh "$filename" | awk '{print $5}') + echo "✓ Saved: $filename ($size)" + else + echo "✗ Failed to get snapshot $counter" + fi + + ((counter++)) +done \ No newline at end of file From b123dcefab57927edb7e398fd870bbe75c6948bb Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Wed, 24 Sep 2025 19:04:29 +0500 Subject: [PATCH 09/27] Change test port --- supernode/cmd/start.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index 9c186d1a..92ccc700 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -146,7 +146,7 @@ The supernode will connect to the Lumera network and begin participating in the isTestnet := strings.Contains(strings.ToLower(appConfig.LumeraClientConfig.ChainID), "testnet") if isTestnet { - profilingAddr := "0.0.0.0:6062" + profilingAddr := "0.0.0.0:8082" logtrace.Info(ctx, "Starting profiling server", logtrace.Fields{ "address": profilingAddr, From 3c66950c37aa46711219d0e86ee73efffc027eb1 Mon Sep 17 00:00:00 2001 From: j-rafique Date: Thu, 25 Sep 2025 16:27:39 +0500 Subject: [PATCH 10/27] Disable Metrics --- p2p/kademlia/dht.go | 78 +++++++++++-------- p2p/p2p.go | 61 ++++++++------- supernode/cmd/start.go | 42 +++++----- supernode/services/cascade/adaptors/p2p.go | 17 ++-- supernode/services/cascade/config.go | 6 +- supernode/services/cascade/download.go | 35 +++++---- supernode/services/cascade/register.go | 14 ++-- supernode/services/cascade/service.go | 14 ++-- tests/integration/p2p/p2p_integration_test.go | 5 +- 9 files changed, 154 insertions(+), 118 deletions(-) diff --git a/p2p/kademlia/dht.go b/p2p/kademlia/dht.go index 69c45023..b83fb34e 100644 --- a/p2p/kademlia/dht.go +++ b/p2p/kademlia/dht.go @@ -42,9 +42,10 @@ const ( delKeysCountThreshold = 10 lowSpaceThreshold = 50 // GB batchRetrieveSize = 1000 - storeSameSymbolsBatchConcurrency = 3 - fetchSymbolsBatchConcurrency = 6 - minimumDataStoreSuccessRate = 75.0 + + storeSameSymbolsBatchConcurrency = 3 + fetchSymbolsBatchConcurrency = 6 + minimumDataStoreSuccessRate = 75.0 maxIterations = 4 macConcurrentNetworkStoreCalls = 16 @@ -124,7 +125,7 @@ func (s *DHT) ConnPoolSnapshot() map[string]int64 { // Options contains configuration options for the queries node type Options struct { - ID []byte + ID []byte // The queries IPv4 or IPv6 address IP string @@ -139,8 +140,11 @@ type Options struct { // Lumera client for interacting with the blockchain LumeraClient lumera.Client - // Keyring for credentials - Keyring keyring.Keyring + // Keyring for credentials + Keyring keyring.Keyring + + // MetricsDisabled gates DHT-level metrics emission (p2pmetrics hooks and snapshots) + MetricsDisabled bool } // NewDHT returns a new DHT node @@ -739,7 +743,9 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, return nil, fmt.Errorf("fetch and add local keys: %v", err) } // Report how many were found locally, for event metrics - p2pmetrics.ReportFoundLocal(p2pmetrics.TaskIDFromContext(ctx), int(foundLocalCount)) + if !s.options.MetricsDisabled { + p2pmetrics.ReportFoundLocal(p2pmetrics.TaskIDFromContext(ctx), int(foundLocalCount)) + } if foundLocalCount >= required { return result, nil } @@ -788,7 +794,9 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, // Record batch retrieve stats for internal DHT snapshot window s.metrics.RecordBatchRetrieve(len(keys), int(required), int(foundLocalCount), netFound, time.Since(start)) // Also feed retrieve counts into the per-task collector for stream events - p2pmetrics.SetRetrieveBatchSummary(p2pmetrics.TaskIDFromContext(ctx), len(keys), int(required), int(foundLocalCount), netFound, time.Since(start).Milliseconds()) + if !s.options.MetricsDisabled { + p2pmetrics.SetRetrieveBatchSummary(p2pmetrics.TaskIDFromContext(ctx), len(keys), int(required), int(foundLocalCount), netFound, time.Since(start).Milliseconds()) + } return result, nil } @@ -946,14 +954,16 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, } mu.Unlock() // record failed RPC per-node - p2pmetrics.RecordRetrieve(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ - IP: node.IP, - Address: node.String(), - Keys: 0, - Success: false, - Error: err.Error(), - DurationMS: time.Since(callStart).Milliseconds(), - }) + if !s.options.MetricsDisabled { + p2pmetrics.RecordRetrieve(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ + IP: node.IP, + Address: node.String(), + Keys: 0, + Success: false, + Error: err.Error(), + DurationMS: time.Since(callStart).Milliseconds(), + }) + } return } @@ -976,14 +986,16 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, } // record successful RPC per-node (returned may be 0). Success is true when no error. - p2pmetrics.RecordRetrieve(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ - IP: node.IP, - Address: node.String(), - Keys: returned, - Success: true, - Error: "", - DurationMS: time.Since(callStart).Milliseconds(), - }) + if !s.options.MetricsDisabled { + p2pmetrics.RecordRetrieve(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ + IP: node.IP, + Address: node.String(), + Keys: returned, + Success: true, + Error: "", + DurationMS: time.Since(callStart).Milliseconds(), + }) + } }(node, nodeID) } @@ -1713,14 +1725,16 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i } // Emit per-node store RPC call via metrics bridge (no P2P API coupling) - p2pmetrics.RecordStore(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ - IP: nodeIP, - Address: nodeAddr, - Keys: response.KeysCount, - Success: errMsg == "" && response.Error == nil, - Error: errMsg, - DurationMS: response.DurationMS, - }) + if !s.options.MetricsDisabled { + p2pmetrics.RecordStore(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ + IP: nodeIP, + Address: nodeAddr, + Keys: response.KeysCount, + Success: errMsg == "" && response.Error == nil, + Error: errMsg, + DurationMS: response.DurationMS, + }) + } } diff --git a/p2p/p2p.go b/p2p/p2p.go index e3d6b40a..006c469a 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -40,14 +40,15 @@ type P2P interface { // p2p structure to implements interface type p2p struct { - store kademlia.Store // the store for kademlia network - metaStore kademlia.MetaStore - dht *kademlia.DHT // the kademlia network - config *Config // the service configuration - running bool // if the kademlia network is ready - lumeraClient lumera.Client - keyring keyring.Keyring // Add the keyring field - rqstore rqstore.Store + store kademlia.Store // the store for kademlia network + metaStore kademlia.MetaStore + dht *kademlia.DHT // the kademlia network + config *Config // the service configuration + running bool // if the kademlia network is ready + lumeraClient lumera.Client + keyring keyring.Keyring // Add the keyring field + rqstore rqstore.Store + metricsDisabled bool } // Run the kademlia network @@ -226,14 +227,15 @@ func (s *p2p) NClosestNodesWithIncludingNodeList(ctx context.Context, n int, key // configure the distributed hash table for p2p service func (s *p2p) configure(ctx context.Context) error { // new the queries storage - kadOpts := &kademlia.Options{ - LumeraClient: s.lumeraClient, - Keyring: s.keyring, // Pass the keyring - BootstrapNodes: []*kademlia.Node{}, - IP: s.config.ListenAddress, - Port: s.config.Port, - ID: []byte(s.config.ID), - } + kadOpts := &kademlia.Options{ + LumeraClient: s.lumeraClient, + Keyring: s.keyring, // Pass the keyring + BootstrapNodes: []*kademlia.Node{}, + IP: s.config.ListenAddress, + Port: s.config.Port, + ID: []byte(s.config.ID), + MetricsDisabled: s.metricsDisabled, + } if len(kadOpts.ID) == 0 { errors.Errorf("node id is empty") @@ -251,25 +253,26 @@ func (s *p2p) configure(ctx context.Context) error { } // New returns a new p2p instance. -func New(ctx context.Context, config *Config, lumeraClient lumera.Client, kr keyring.Keyring, rqstore rqstore.Store, cloud cloud.Storage, mst *sqlite.MigrationMetaStore) (P2P, error) { - store, err := sqlite.NewStore(ctx, config.DataDir, cloud, mst) - if err != nil { - return nil, errors.Errorf("new kademlia store: %w", err) - } +func New(ctx context.Context, config *Config, lumeraClient lumera.Client, kr keyring.Keyring, rqstore rqstore.Store, cloud cloud.Storage, mst *sqlite.MigrationMetaStore, metricsDisabled bool) (P2P, error) { + store, err := sqlite.NewStore(ctx, config.DataDir, cloud, mst) + if err != nil { + return nil, errors.Errorf("new kademlia store: %w", err) + } meta, err := meta.NewStore(ctx, config.DataDir) if err != nil { return nil, errors.Errorf("new kademlia meta store: %w", err) } - return &p2p{ - store: store, - metaStore: meta, - config: config, - lumeraClient: lumeraClient, - keyring: kr, // Store the keyring - rqstore: rqstore, - }, nil + return &p2p{ + store: store, + metaStore: meta, + config: config, + lumeraClient: lumeraClient, + keyring: kr, // Store the keyring + rqstore: rqstore, + metricsDisabled: metricsDisabled, + }, nil } // LocalStore store data into the kademlia network diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index 92ccc700..eaf1339e 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -84,11 +84,14 @@ The supernode will connect to the Lumera network and begin participating in the logtrace.Fatal(ctx, "Failed to initialize RaptorQ store", logtrace.Fields{"error": err.Error()}) } - // Initialize P2P service - p2pService, err := initP2PService(ctx, appConfig, lumeraClient, kr, rqStore, nil, nil) - if err != nil { - logtrace.Fatal(ctx, "Failed to initialize P2P service", logtrace.Fields{"error": err.Error()}) - } + // Manually set the disable flag at the highest level + disableMetrics := true + + // Initialize P2P service with explicit disable flag + p2pService, err := initP2PService(ctx, appConfig, lumeraClient, kr, rqStore, nil, nil, disableMetrics) + if err != nil { + logtrace.Fatal(ctx, "Failed to initialize P2P service", logtrace.Fields{"error": err.Error()}) + } // Initialize the supernode supernodeInstance, err := NewSupernode(ctx, appConfig, kr, p2pService, rqStore, lumeraClient) @@ -97,18 +100,19 @@ The supernode will connect to the Lumera network and begin participating in the } // Configure cascade service - cService := cascadeService.NewCascadeService( - &cascadeService.Config{ - Config: common.Config{ - SupernodeAccountAddress: appConfig.SupernodeConfig.Identity, - }, - RqFilesDir: appConfig.GetRaptorQFilesDir(), - }, - lumeraClient, - *p2pService, - codec.NewRaptorQCodec(appConfig.GetRaptorQFilesDir()), - rqStore, - ) + cService := cascadeService.NewCascadeService( + &cascadeService.Config{ + Config: common.Config{ + SupernodeAccountAddress: appConfig.SupernodeConfig.Identity, + }, + RqFilesDir: appConfig.GetRaptorQFilesDir(), + MetricsDisabled: disableMetrics, + }, + lumeraClient, + *p2pService, + codec.NewRaptorQCodec(appConfig.GetRaptorQFilesDir()), + rqStore, + ) // Create cascade action server cascadeActionServer := cascade.NewCascadeActionServer(cService) @@ -190,7 +194,7 @@ func init() { } // initP2PService initializes the P2P service -func initP2PService(ctx context.Context, config *config.Config, lumeraClient lumera.Client, kr cKeyring.Keyring, rqStore rqstore.Store, cloud cloud.Storage, mst *sqlite.MigrationMetaStore) (*p2p.P2P, error) { +func initP2PService(ctx context.Context, config *config.Config, lumeraClient lumera.Client, kr cKeyring.Keyring, rqStore rqstore.Store, cloud cloud.Storage, mst *sqlite.MigrationMetaStore, metricsDisabled bool) (*p2p.P2P, error) { // Get the supernode address from the keyring keyInfo, err := kr.Key(config.SupernodeConfig.KeyName) if err != nil { @@ -206,7 +210,7 @@ func initP2PService(ctx context.Context, config *config.Config, lumeraClient lum logtrace.Info(ctx, "Initializing P2P service", logtrace.Fields{"address": p2pConfig.ListenAddress, "port": p2pConfig.Port, "data_dir": p2pConfig.DataDir, "supernode_id": address.String()}) - p2pService, err := p2p.New(ctx, p2pConfig, lumeraClient, kr, rqStore, cloud, mst) + p2pService, err := p2p.New(ctx, p2pConfig, lumeraClient, kr, rqStore, cloud, mst, metricsDisabled) if err != nil { return nil, fmt.Errorf("failed to initialize p2p service: %w", err) } diff --git a/supernode/services/cascade/adaptors/p2p.go b/supernode/services/cascade/adaptors/p2p.go index 116d6810..a29e2b99 100644 --- a/supernode/services/cascade/adaptors/p2p.go +++ b/supernode/services/cascade/adaptors/p2p.go @@ -39,13 +39,14 @@ type P2PService interface { // p2pImpl is the default implementation of the P2PService interface. type p2pImpl struct { - p2p p2p.Client - rqStore rqstore.Store + p2p p2p.Client + rqStore rqstore.Store + metricsDisabled bool } // NewP2PService returns a concrete implementation of P2PService. -func NewP2PService(client p2p.Client, store rqstore.Store) P2PService { - return &p2pImpl{p2p: client, rqStore: store} +func NewP2PService(client p2p.Client, store rqstore.Store, metricsDisabled bool) P2PService { + return &p2pImpl{p2p: client, rqStore: store, metricsDisabled: metricsDisabled} } type StoreArtefactsRequest struct { @@ -58,9 +59,11 @@ type StoreArtefactsRequest struct { func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) error { logtrace.Info(ctx, "About to store artefacts (metadata + symbols)", logtrace.Fields{"taskID": req.TaskID, "id_files": len(req.IDFiles)}) - // Enable per-node store RPC capture for this task - cm.StartStoreCapture(req.TaskID) - defer cm.StopStoreCapture(req.TaskID) + // Optionally enable per-node store RPC capture for this task + if !p.metricsDisabled { + cm.StartStoreCapture(req.TaskID) + defer cm.StopStoreCapture(req.TaskID) + } start := time.Now() firstPassSymbols, totalSymbols, err := p.storeCascadeSymbolsAndData(ctx, req.TaskID, req.ActionID, req.SymbolsDir, req.IDFiles) diff --git a/supernode/services/cascade/config.go b/supernode/services/cascade/config.go index 7a0f1ef2..01401d41 100644 --- a/supernode/services/cascade/config.go +++ b/supernode/services/cascade/config.go @@ -8,6 +8,8 @@ import ( type Config struct { common.Config `mapstructure:",squash" json:"-"` - RaptorQServiceAddress string `mapstructure:"-" json:"-"` - RqFilesDir string `mapstructure:"rq_files_dir" json:"rq_files_dir,omitempty"` + RaptorQServiceAddress string `mapstructure:"-" json:"-"` + RqFilesDir string `mapstructure:"rq_files_dir" json:"rq_files_dir,omitempty"` + // MetricsDisabled toggles upload/download metrics for cascade service + MetricsDisabled bool `mapstructure:"-" json:"-"` } diff --git a/supernode/services/cascade/download.go b/supernode/services/cascade/download.go index 363834bc..9da3dc1e 100644 --- a/supernode/services/cascade/download.go +++ b/supernode/services/cascade/download.go @@ -165,9 +165,11 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( } logtrace.Info(ctx, "Retrieving target-required symbols for decode", fields) - // Enable retrieve metrics capture for this action - cm.StartRetrieveCapture(actionID) - defer cm.StopRetrieveCapture(actionID) + + if !task.config.MetricsDisabled { + cm.StartRetrieveCapture(actionID) + defer cm.StopRetrieveCapture(actionID) + } // Measure symbols batch retrieve duration retrieveStart := time.Now() @@ -201,17 +203,22 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( } decodeMS := time.Since(decodeStart).Milliseconds() - // Set minimal retrieve summary and emit event strictly from internal collector - cm.SetRetrieveSummary(actionID, retrieveMS, decodeMS) - payload := cm.BuildDownloadEventPayloadFromCollector(actionID) - if retrieve, ok := payload["retrieve"].(map[string]any); ok { - retrieve["target_required_percent"] = targetRequiredPercent - retrieve["target_required_count"] = targetRequiredCount - retrieve["total_symbols"] = totalSymbols - } - if b, err := json.MarshalIndent(payload, "", " "); err == nil { - task.streamDownloadEvent(SupernodeEventTypeArtefactsDownloaded, string(b), "", "", send) - } + // Set minimal retrieve summary and emit event strictly from internal collector + if !task.config.MetricsDisabled { + cm.SetRetrieveSummary(actionID, retrieveMS, decodeMS) + payload := cm.BuildDownloadEventPayloadFromCollector(actionID) + if retrieve, ok := payload["retrieve"].(map[string]any); ok { + retrieve["target_required_percent"] = targetRequiredPercent + retrieve["target_required_count"] = targetRequiredCount + retrieve["total_symbols"] = totalSymbols + } + if b, err := json.MarshalIndent(payload, "", " "); err == nil { + task.streamDownloadEvent(SupernodeEventTypeArtefactsDownloaded, string(b), "", "", send) + } + } else { + // Send minimal hardcoded event when metrics disabled + task.streamDownloadEvent(SupernodeEventTypeArtefactsDownloaded, "Download completed (metrics disabled)", "", "", send) + } fileHash, err := crypto.HashFileIncrementally(decodeInfo.FilePath, 0) if err != nil { diff --git a/supernode/services/cascade/register.go b/supernode/services/cascade/register.go index dd6e1e77..4739e0d9 100644 --- a/supernode/services/cascade/register.go +++ b/supernode/services/cascade/register.go @@ -1,11 +1,11 @@ package cascade import ( - "context" - "os" + "context" + "os" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/supernode/services/common" ) // RegisterRequest contains parameters for upload request @@ -162,8 +162,10 @@ func (task *CascadeRegistrationTask) Register( if err := task.storeArtefacts(ctx, action.ActionID, rqidResp.RedundantMetadataFiles, encResp.SymbolsDir, fields); err != nil { return err } - // Emit compact analytics payload from centralized metrics collector - task.emitArtefactsStored(ctx, fields, encResp.Metadata, send) + // Emit compact analytics payload from centralized metrics collector (optional) + if !task.config.MetricsDisabled { + task.emitArtefactsStored(ctx, fields, encResp.Metadata, send) + } resp, err := task.LumeraClient.FinalizeAction(ctx, action.ActionID, rqidResp.RQIDs) if err != nil { diff --git a/supernode/services/cascade/service.go b/supernode/services/cascade/service.go index a1d9898b..b5b2870a 100644 --- a/supernode/services/cascade/service.go +++ b/supernode/services/cascade/service.go @@ -56,11 +56,11 @@ func (service *CascadeService) GetRunningTasks() []string { // NewCascadeService returns a new CascadeService instance func NewCascadeService(config *Config, lumera lumera.Client, p2pClient p2p.Client, codec codec.Codec, rqstore rqstore.Store) *CascadeService { - return &CascadeService{ - config: config, - SuperNodeService: base.NewSuperNodeService(p2pClient), - LumeraClient: adaptors.NewLumeraClient(lumera), - P2P: adaptors.NewP2PService(p2pClient, rqstore), - RQ: adaptors.NewCodecService(codec), - } + return &CascadeService{ + config: config, + SuperNodeService: base.NewSuperNodeService(p2pClient), + LumeraClient: adaptors.NewLumeraClient(lumera), + P2P: adaptors.NewP2PService(p2pClient, rqstore, config.MetricsDisabled), + RQ: adaptors.NewCodecService(codec), + } } diff --git a/tests/integration/p2p/p2p_integration_test.go b/tests/integration/p2p/p2p_integration_test.go index bce71f58..d5df6dc2 100644 --- a/tests/integration/p2p/p2p_integration_test.go +++ b/tests/integration/p2p/p2p_integration_test.go @@ -108,7 +108,7 @@ func TestP2PBasicIntegration(t *testing.T) { // Add debug logging log.Printf("Storing batch with keys: %v", expectedKeys) - err := services[0].StoreBatch(ctx, batchData, 0, taskID) + err := services[0].StoreBatch(ctx, batchData, 0, taskID) require.NoError(t, err) // Add immediate verification @@ -203,7 +203,8 @@ func SetupTestP2PNodes(t *testing.T, ctx context.Context) ([]p2p.Client, []*rqst require.NoError(t, err, "failed to create rqstore for node %d: %v", i, err) rqStores = append(rqStores, rqStore) - service, err := p2p.New(ctx, p2pConfig, mockClient, kr, rqStore, nil, nil) + // Disable metrics in integration tests by default + service, err := p2p.New(ctx, p2pConfig, mockClient, kr, rqStore, nil, nil, true) require.NoError(t, err, "failed to create p2p service for node %d: %v", i, err) // Start P2P service From c53d7c32d71ff5c1b0f4607833104c77512353a2 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Mon, 29 Sep 2025 19:28:29 +0500 Subject: [PATCH 11/27] Silence all logs --- p2p/kademlia/bootstrap.go | 2 +- p2p/kademlia/dht.go | 174 +++++++++--------- p2p/kademlia/fetch_and_store.go | 32 ++-- p2p/kademlia/network.go | 32 ++-- p2p/kademlia/node_activity.go | 4 +- p2p/kademlia/redundant_data.go | 14 +- p2p/kademlia/replication.go | 34 ++-- p2p/kademlia/rq_symbols.go | 8 +- p2p/kademlia/store/meta/meta.go | 10 +- p2p/kademlia/store/sqlite/meta_worker.go | 26 +-- p2p/kademlia/store/sqlite/sqlite.go | 16 +- p2p/p2p.go | 70 +++---- pkg/codec/decode.go | 104 +++++------ pkg/codec/raptorq.go | 6 +- pkg/common/task/task.go | 6 +- pkg/common/task/worker.go | 2 +- pkg/dd/image_rareness.go | 4 +- pkg/dd/status.go | 4 +- pkg/lumera/connection.go | 2 +- pkg/lumera/modules/auth/impl.go | 4 +- pkg/lumera/modules/tx/impl.go | 10 +- pkg/net/grpc/server/server.go | 4 +- pkg/net/interceptor.go | 4 +- pkg/storage/queries/health_check.go | 6 +- pkg/storage/queries/self_healing.go | 10 +- pkg/storage/queries/storage_challenge.go | 6 +- pkg/storage/queries/task_history.go | 2 +- sdk/task/task.go | 2 +- supernode/cmd/service.go | 2 +- supernode/cmd/start.go | 54 +++--- supernode/cmd/supernode.go | 8 +- .../server/cascade/cascade_action_server.go | 24 +-- supernode/node/supernode/gateway/server.go | 4 +- supernode/node/supernode/server/server.go | 6 +- supernode/services/cascade/adaptors/p2p.go | 34 ++-- supernode/services/cascade/download.go | 63 ++++--- supernode/services/cascade/helper.go | 32 ++-- supernode/services/cascade/register.go | 50 ++--- .../services/common/base/supernode_service.go | 2 +- .../services/common/base/supernode_task.go | 2 +- supernode/services/common/storage/handler.go | 12 +- .../services/common/supernode/service.go | 2 +- supernode/services/verifier/verifier.go | 2 +- 43 files changed, 447 insertions(+), 448 deletions(-) diff --git a/p2p/kademlia/bootstrap.go b/p2p/kademlia/bootstrap.go index 5b29f44d..25dc3b54 100644 --- a/p2p/kademlia/bootstrap.go +++ b/p2p/kademlia/bootstrap.go @@ -102,7 +102,7 @@ func (s *DHT) setBootstrapNodesFromConfigVar(ctx context.Context, bootstrapNodes }) } s.options.BootstrapNodes = nodes - logtrace.Info(ctx, "Bootstrap nodes set from config var", logtrace.Fields{ + logtrace.Debug(ctx, "Bootstrap nodes set from config var", logtrace.Fields{ logtrace.FieldModule: "p2p", "bootstrap_nodes": nodes, }) diff --git a/p2p/kademlia/dht.go b/p2p/kademlia/dht.go index b83fb34e..df2be5c7 100644 --- a/p2p/kademlia/dht.go +++ b/p2p/kademlia/dht.go @@ -43,9 +43,9 @@ const ( lowSpaceThreshold = 50 // GB batchRetrieveSize = 1000 - storeSameSymbolsBatchConcurrency = 3 - fetchSymbolsBatchConcurrency = 6 - minimumDataStoreSuccessRate = 75.0 + storeSameSymbolsBatchConcurrency = 3 + fetchSymbolsBatchConcurrency = 6 + minimumDataStoreSuccessRate = 75.0 maxIterations = 4 macConcurrentNetworkStoreCalls = 16 @@ -104,7 +104,7 @@ func (s *DHT) bootstrapIgnoreList(ctx context.Context) error { } if added > 0 { - logtrace.Info(ctx, "Ignore list bootstrapped from replication info", logtrace.Fields{ + logtrace.Debug(ctx, "Ignore list bootstrapped from replication info", logtrace.Fields{ logtrace.FieldModule: "p2p", "ignored_count": added, }) @@ -125,7 +125,7 @@ func (s *DHT) ConnPoolSnapshot() map[string]int64 { // Options contains configuration options for the queries node type Options struct { - ID []byte + ID []byte // The queries IPv4 or IPv6 address IP string @@ -140,11 +140,11 @@ type Options struct { // Lumera client for interacting with the blockchain LumeraClient lumera.Client - // Keyring for credentials - Keyring keyring.Keyring + // Keyring for credentials + Keyring keyring.Keyring - // MetricsDisabled gates DHT-level metrics emission (p2pmetrics hooks and snapshots) - MetricsDisabled bool + // MetricsDisabled gates DHT-level metrics emission (p2pmetrics hooks and snapshots) + MetricsDisabled bool } // NewDHT returns a new DHT node @@ -362,7 +362,7 @@ func (s *DHT) Store(ctx context.Context, data []byte, typ int) (string, error) { // measured success rate for node RPCs is below the configured minimum, an error // is returned. Metrics are not returned through the API. func (s *DHT) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID string) error { - logtrace.Info(ctx, "Store DB batch begin", logtrace.Fields{ + logtrace.Debug(ctx, "Store DB batch begin", logtrace.Fields{ logtrace.FieldModule: "dht", logtrace.FieldTaskID: taskID, "records": len(values), @@ -370,7 +370,7 @@ func (s *DHT) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID s if err := s.store.StoreBatch(ctx, values, typ, true); err != nil { return fmt.Errorf("store batch: %v", err) } - logtrace.Info(ctx, "Store DB batch done, store network batch begin", logtrace.Fields{ + logtrace.Debug(ctx, "Store DB batch done, store network batch begin", logtrace.Fields{ logtrace.FieldModule: "dht", logtrace.FieldTaskID: taskID, }) @@ -380,7 +380,7 @@ func (s *DHT) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID s return fmt.Errorf("iterate batch store: %v", err) } - logtrace.Info(ctx, "Store network batch workers done", logtrace.Fields{ + logtrace.Debug(ctx, "Store network batch workers done", logtrace.Fields{ logtrace.FieldModule: "dht", logtrace.FieldTaskID: taskID, }) @@ -426,13 +426,13 @@ func (s *DHT) Retrieve(ctx context.Context, key string, localOnly ...bool) ([]by return nil, errors.Errorf("retrieve from peer: %w", err) } if len(peerValue) > 0 { - logtrace.Info(ctx, "Not found locally, retrieved from other nodes", logtrace.Fields{ + logtrace.Debug(ctx, "Not found locally, retrieved from other nodes", logtrace.Fields{ logtrace.FieldModule: "dht", "key": dbKey, "data_len": len(peerValue), }) } else { - logtrace.Info(ctx, "Not found locally, not found in other nodes", logtrace.Fields{ + logtrace.Debug(ctx, "Not found locally, not found in other nodes", logtrace.Fields{ logtrace.FieldModule: "dht", "key": dbKey, }) @@ -531,7 +531,7 @@ func (s *DHT) GetValueFromNode(ctx context.Context, target []byte, n *Node) ([]b response, err := s.network.Call(cctx, request, false) if err != nil { - logtrace.Info(ctx, "Network call request failed", logtrace.Fields{ + logtrace.Debug(ctx, "Network call request failed", logtrace.Fields{ logtrace.FieldModule: "p2p", logtrace.FieldError: err.Error(), "request": request.String(), @@ -573,7 +573,7 @@ func (s *DHT) doMultiWorkers(ctx context.Context, iterativeType int, target []by // update the running goroutines number++ - logtrace.Info(ctx, "Start work for node", logtrace.Fields{ + logtrace.Debug(ctx, "Start work for node", logtrace.Fields{ logtrace.FieldModule: "p2p", "iterate_type": iterativeType, "node": node.String(), @@ -600,7 +600,7 @@ func (s *DHT) doMultiWorkers(ctx context.Context, iterativeType int, target []by // send the request and receive the response response, err := s.network.Call(ctx, request, false) if err != nil { - logtrace.Info(ctx, "Network call request failed", logtrace.Fields{ + logtrace.Debug(ctx, "Network call request failed", logtrace.Fields{ logtrace.FieldModule: "p2p", logtrace.FieldError: err.Error(), "request": request.String(), @@ -637,7 +637,7 @@ func (s *DHT) fetchAndAddLocalKeys(ctx context.Context, hexKeys []string, result batchHexKeys := hexKeys[start:end] - logtrace.Info(ctx, "Processing batch of local keys", logtrace.Fields{ + logtrace.Debug(ctx, "Processing batch of local keys", logtrace.Fields{ logtrace.FieldModule: "dht", "batch_size": len(batchHexKeys), "total_keys": len(hexKeys), @@ -743,9 +743,9 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, return nil, fmt.Errorf("fetch and add local keys: %v", err) } // Report how many were found locally, for event metrics - if !s.options.MetricsDisabled { - p2pmetrics.ReportFoundLocal(p2pmetrics.TaskIDFromContext(ctx), int(foundLocalCount)) - } + if !s.options.MetricsDisabled { + p2pmetrics.ReportFoundLocal(p2pmetrics.TaskIDFromContext(ctx), int(foundLocalCount)) + } if foundLocalCount >= required { return result, nil } @@ -794,9 +794,9 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, // Record batch retrieve stats for internal DHT snapshot window s.metrics.RecordBatchRetrieve(len(keys), int(required), int(foundLocalCount), netFound, time.Since(start)) // Also feed retrieve counts into the per-task collector for stream events - if !s.options.MetricsDisabled { - p2pmetrics.SetRetrieveBatchSummary(p2pmetrics.TaskIDFromContext(ctx), len(keys), int(required), int(foundLocalCount), netFound, time.Since(start).Milliseconds()) - } + if !s.options.MetricsDisabled { + p2pmetrics.SetRetrieveBatchSummary(p2pmetrics.TaskIDFromContext(ctx), len(keys), int(required), int(foundLocalCount), netFound, time.Since(start).Milliseconds()) + } return result, nil } @@ -907,7 +907,7 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, defer cancel() for nodeID, node := range nodes { if s.ignorelist.Banned(node) { - logtrace.Info(ctx, "Ignore banned node in iterate batch get values", logtrace.Fields{ + logtrace.Debug(ctx, "Ignore banned node in iterate batch get values", logtrace.Fields{ logtrace.FieldModule: "dht", "node": node.String(), }) @@ -954,16 +954,16 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, } mu.Unlock() // record failed RPC per-node - if !s.options.MetricsDisabled { - p2pmetrics.RecordRetrieve(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ - IP: node.IP, - Address: node.String(), - Keys: 0, - Success: false, - Error: err.Error(), - DurationMS: time.Since(callStart).Milliseconds(), - }) - } + if !s.options.MetricsDisabled { + p2pmetrics.RecordRetrieve(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ + IP: node.IP, + Address: node.String(), + Keys: 0, + Success: false, + Error: err.Error(), + DurationMS: time.Since(callStart).Milliseconds(), + }) + } return } @@ -986,22 +986,22 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, } // record successful RPC per-node (returned may be 0). Success is true when no error. - if !s.options.MetricsDisabled { - p2pmetrics.RecordRetrieve(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ - IP: node.IP, - Address: node.String(), - Keys: returned, - Success: true, - Error: "", - DurationMS: time.Since(callStart).Milliseconds(), - }) - } + if !s.options.MetricsDisabled { + p2pmetrics.RecordRetrieve(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ + IP: node.IP, + Address: node.String(), + Keys: returned, + Success: true, + Error: "", + DurationMS: time.Since(callStart).Milliseconds(), + }) + } }(node, nodeID) } wg.Wait() - logtrace.Info(ctx, "Iterate batch get values done", logtrace.Fields{ + logtrace.Debug(ctx, "Iterate batch get values done", logtrace.Fields{ logtrace.FieldModule: "dht", "found_count": atomic.LoadInt32(&foundCount), }) @@ -1083,7 +1083,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat // find the closest contacts for the target node from queries route tables nl, _ := s.ht.closestContacts(Alpha, target, igList) if len(igList) > 0 { - logtrace.Info(ctx, "Closest contacts", logtrace.Fields{ + logtrace.Debug(ctx, "Closest contacts", logtrace.Fields{ logtrace.FieldModule: "p2p", "nodes": nl.String(), "ignored": s.ignorelist.String(), @@ -1093,7 +1093,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat if nl.Len() == 0 { return nil, nil } - logtrace.Info(ctx, "Iterate start", logtrace.Fields{ + logtrace.Debug(ctx, "Iterate start", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "type": iterativeType, @@ -1107,7 +1107,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat if iterativeType == IterateFindNode { hashedTargetID, _ := utils.Blake3Hash(target) bucket := s.ht.bucketIndex(s.ht.self.HashedID, hashedTargetID) - logtrace.Info(ctx, "Bucket for target", logtrace.Fields{ + logtrace.Debug(ctx, "Bucket for target", logtrace.Fields{ logtrace.FieldModule: "p2p", "target": sKey, }) @@ -1131,7 +1131,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat // Set a maximum number of iterations to prevent indefinite looping maxIterations := 5 // Adjust the maximum iterations as needed - logtrace.Info(ctx, "Begin iteration", logtrace.Fields{ + logtrace.Debug(ctx, "Begin iteration", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1142,7 +1142,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat case <-ctx.Done(): return nil, fmt.Errorf("iterate cancelled: %w", ctx.Err()) case <-timeout: - logtrace.Info(ctx, "Iteration timed out", logtrace.Fields{ + logtrace.Debug(ctx, "Iteration timed out", logtrace.Fields{ logtrace.FieldModule: "p2p", }) return nil, nil @@ -1174,7 +1174,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat // Stop search if no more nodes to contact if !searchRest && len(nl.Nodes) == 0 { - logtrace.Info(ctx, "Search stopped", logtrace.Fields{ + logtrace.Debug(ctx, "Search stopped", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1186,7 +1186,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat nl.Comparator = target nl.Sort() - logtrace.Info(ctx, "Iterate sorted nodes", logtrace.Fields{ + logtrace.Debug(ctx, "Iterate sorted nodes", logtrace.Fields{ logtrace.FieldModule: "p2p", "id": base58.Encode(s.ht.self.ID), "iterate": iterativeType, @@ -1223,7 +1223,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat } } - logtrace.Info(ctx, "Finish iteration without results", logtrace.Fields{ + logtrace.Debug(ctx, "Finish iteration without results", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1244,7 +1244,7 @@ func (s *DHT) handleResponses(ctx context.Context, responses <-chan *Message, nl v, ok := response.Data.(*FindValueResponse) if ok { if v.Status.Result == ResultOk && len(v.Value) > 0 { - logtrace.Info(ctx, "Iterate found value from network", logtrace.Fields{ + logtrace.Debug(ctx, "Iterate found value from network", logtrace.Fields{ logtrace.FieldModule: "p2p", }) return nl, v.Value @@ -1274,7 +1274,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] // nl will have the closest nodes to the target value, it will ignore the nodes in igList nl, _ := s.ht.closestContacts(Alpha, target, igList) if len(igList) > 0 { - logtrace.Info(ctx, "Closest contacts", logtrace.Fields{ + logtrace.Debug(ctx, "Closest contacts", logtrace.Fields{ logtrace.FieldModule: "p2p", "nodes": nl.String(), "ignored": s.ignorelist.String(), @@ -1289,7 +1289,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] searchRest := false // keep track of contacted nodes so that we don't hit them again contacted := make(map[string]bool) - logtrace.Info(ctx, "Begin iteration", logtrace.Fields{ + logtrace.Debug(ctx, "Begin iteration", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1298,7 +1298,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] var closestNode *Node var iterationCount int for iterationCount = 0; iterationCount < maxIterations; iterationCount++ { - logtrace.Info(ctx, "Begin find value", logtrace.Fields{ + logtrace.Debug(ctx, "Begin find value", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "nl": nl.Len(), @@ -1318,7 +1318,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] // if the closest node is the same as the last iteration and we don't want to search rest of nodes, we are done if !searchRest && (closestNode != nil && bytes.Equal(nl.Nodes[0].ID, closestNode.ID)) { - logtrace.Info(ctx, "Closest node is the same as the last iteration", logtrace.Fields{ + logtrace.Debug(ctx, "Closest node is the same as the last iteration", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1337,7 +1337,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] nl.Sort() - logtrace.Info(ctx, "Iteration progress", logtrace.Fields{ + logtrace.Debug(ctx, "Iteration progress", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1346,7 +1346,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] }) } - logtrace.Info(ctx, "Finished iterations without results", logtrace.Fields{ + logtrace.Debug(ctx, "Finished iterations without results", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1526,7 +1526,7 @@ func (s *DHT) storeToAlphaNodes(ctx context.Context, nl *NodeList, data []byte, for i := Alpha; i < nl.Len() && finalStoreCount < int32(Alpha); i++ { n := nl.Nodes[i] if s.ignorelist.Banned(n) { - logtrace.Info(ctx, "Ignore banned node during sequential store", logtrace.Fields{ + logtrace.Debug(ctx, "Ignore banned node during sequential store", logtrace.Fields{ logtrace.FieldModule: "p2p", "node": n.String(), "task_id": taskID, @@ -1559,7 +1559,7 @@ func (s *DHT) storeToAlphaNodes(ctx context.Context, nl *NodeList, data []byte, skey, _ := utils.Blake3Hash(data) if finalStoreCount >= int32(Alpha) { - logtrace.Info(ctx, "Store data to alpha nodes success", logtrace.Fields{ + logtrace.Debug(ctx, "Store data to alpha nodes success", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": taskID, "len_total_nodes": nl.Len(), @@ -1569,7 +1569,7 @@ func (s *DHT) storeToAlphaNodes(ctx context.Context, nl *NodeList, data []byte, return nil } - logtrace.Info(ctx, "Store data to alpha nodes failed", logtrace.Fields{ + logtrace.Debug(ctx, "Store data to alpha nodes failed", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": taskID, "store_count": finalStoreCount, @@ -1582,7 +1582,7 @@ func (s *DHT) storeToAlphaNodes(ctx context.Context, nl *NodeList, data []byte, func (s *DHT) removeNode(ctx context.Context, node *Node) { // ensure this is not itself address if bytes.Equal(node.ID, s.ht.self.ID) { - logtrace.Info(ctx, "Trying to remove itself", logtrace.Fields{ + logtrace.Debug(ctx, "Trying to remove itself", logtrace.Fields{ logtrace.FieldModule: "p2p", }) return @@ -1598,7 +1598,7 @@ func (s *DHT) removeNode(ctx context.Context, node *Node) { "bucket": index, }) } else { - logtrace.Info(ctx, "Removed node from bucket success", logtrace.Fields{ + logtrace.Debug(ctx, "Removed node from bucket success", logtrace.Fields{ logtrace.FieldModule: "p2p", "node": node.String(), "bucket": index, @@ -1656,7 +1656,7 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i knownNodes := make(map[string]*Node) hashes := make([][]byte, len(values)) - logtrace.Info(ctx, "Iterate batch store begin", logtrace.Fields{ + logtrace.Debug(ctx, "Iterate batch store begin", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": id, "keys": len(values), @@ -1725,16 +1725,16 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i } // Emit per-node store RPC call via metrics bridge (no P2P API coupling) - if !s.options.MetricsDisabled { - p2pmetrics.RecordStore(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ - IP: nodeIP, - Address: nodeAddr, - Keys: response.KeysCount, - Success: errMsg == "" && response.Error == nil, - Error: errMsg, - DurationMS: response.DurationMS, - }) - } + if !s.options.MetricsDisabled { + p2pmetrics.RecordStore(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ + IP: nodeIP, + Address: nodeAddr, + Keys: response.KeysCount, + Success: errMsg == "" && response.Error == nil, + Error: errMsg, + DurationMS: response.DurationMS, + }) + } } @@ -1743,14 +1743,14 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i successRate := float64(successful) / float64(requests) * 100 if successRate >= minimumDataStoreSuccessRate { - logtrace.Info(ctx, "Successful store operations", logtrace.Fields{ + logtrace.Debug(ctx, "Successful store operations", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": id, "success_rate": fmt.Sprintf("%.2f%%", successRate), }) return nil } else { - logtrace.Info(ctx, "Failed to achieve desired success rate", logtrace.Fields{ + logtrace.Debug(ctx, "Failed to achieve desired success rate", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": id, "success_rate": fmt.Sprintf("%.2f%%", successRate), @@ -1777,12 +1777,12 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ var wg sync.WaitGroup for key, node := range nodes { - logtrace.Info(ctx, "Node", logtrace.Fields{ + logtrace.Debug(ctx, "Node", logtrace.Fields{ logtrace.FieldModule: "dht", "port": node.String(), }) if s.ignorelist.Banned(node) { - logtrace.Info(ctx, "Ignoring banned node in batch store network call", logtrace.Fields{ + logtrace.Debug(ctx, "Ignoring banned node in batch store network call", logtrace.Fields{ logtrace.FieldModule: "dht", "node": node.String(), }) @@ -1810,7 +1810,7 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ totalBytes += len(values[idx]) } - logtrace.Info(ctx, "Batch store to node", logtrace.Fields{ + logtrace.Debug(ctx, "Batch store to node", logtrace.Fields{ logtrace.FieldModule: "dht", "keys": len(toStore), "size_before_compress": utils.BytesIntToMB(totalBytes), @@ -1818,7 +1818,7 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ // Skip empty payloads: avoid sending empty store RPCs and do not record no-op metrics. if len(toStore) == 0 { - logtrace.Info(ctx, "Skipping store RPC with empty payload", logtrace.Fields{ + logtrace.Debug(ctx, "Skipping store RPC with empty payload", logtrace.Fields{ logtrace.FieldModule: "dht", "node": receiver.String(), }) @@ -1835,7 +1835,7 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ s.metrics.IncHotPathBanIncr() } - logtrace.Info(ctx, "Network call batch store request failed", logtrace.Fields{ + logtrace.Debug(ctx, "Network call batch store request failed", logtrace.Fields{ logtrace.FieldModule: "p2p", logtrace.FieldError: err.Error(), "request": request.String(), @@ -1856,7 +1856,7 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ } func (s *DHT) batchFindNode(ctx context.Context, payload [][]byte, nodes map[string]*Node, contacted map[string]bool, txid string) (chan *MessageWithError, bool) { - logtrace.Info(ctx, "Batch find node begin", logtrace.Fields{ + logtrace.Debug(ctx, "Batch find node begin", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": txid, "nodes_count": len(nodes), @@ -1879,7 +1879,7 @@ func (s *DHT) batchFindNode(ctx context.Context, payload [][]byte, nodes map[str continue } if s.ignorelist.Banned(node) { - logtrace.Info(ctx, "Ignoring banned node in batch find call", logtrace.Fields{ + logtrace.Debug(ctx, "Ignoring banned node in batch find call", logtrace.Fields{ logtrace.FieldModule: "dht", "node": node.String(), "txid": txid, @@ -1927,7 +1927,7 @@ func (s *DHT) batchFindNode(ctx context.Context, payload [][]byte, nodes map[str } wg.Wait() close(responses) - logtrace.Info(ctx, "Batch find node done", logtrace.Fields{ + logtrace.Debug(ctx, "Batch find node done", logtrace.Fields{ logtrace.FieldModule: "dht", "nodes_count": len(nodes), "len_resp": len(responses), diff --git a/p2p/kademlia/fetch_and_store.go b/p2p/kademlia/fetch_and_store.go index 9803bf3d..d7bc0f28 100644 --- a/p2p/kademlia/fetch_and_store.go +++ b/p2p/kademlia/fetch_and_store.go @@ -26,12 +26,12 @@ const ( // FetchAndStore fetches all keys from the queries TODO replicate list, fetches value from respective nodes and stores them in the queries store func (s *DHT) FetchAndStore(ctx context.Context) error { - logtrace.Info(ctx, "Getting fetch and store keys", logtrace.Fields{}) + logtrace.Debug(ctx, "Getting fetch and store keys", logtrace.Fields{}) keys, err := s.store.GetAllToDoRepKeys(failedKeysClosestContactsLookupCount+maxBatchAttempts+1, totalMaxAttempts) if err != nil { return fmt.Errorf("get all keys error: %w", err) } - logtrace.Info(ctx, "got keys from queries store", logtrace.Fields{"count": len(keys)}) + logtrace.Debug(ctx, "got keys from queries store", logtrace.Fields{"count": len(keys)}) if len(keys) == 0 { return nil @@ -79,7 +79,7 @@ func (s *DHT) FetchAndStore(ctx context.Context) error { return } - logtrace.Info(cctx, "iterate fetch for replication success", logtrace.Fields{"key": info.Key, "ip": info.IP}) + logtrace.Debug(cctx, "iterate fetch for replication success", logtrace.Fields{"key": info.Key, "ip": info.IP}) } if err := s.store.Store(cctx, sKey, value, 0, false); err != nil { @@ -94,7 +94,7 @@ func (s *DHT) FetchAndStore(ctx context.Context) error { atomic.AddInt32(&successCounter, 1) // Increment the counter atomically - logtrace.Info(cctx, "fetch & store key success", logtrace.Fields{"key": info.Key, "ip": info.IP}) + logtrace.Debug(cctx, "fetch & store key success", logtrace.Fields{"key": info.Key, "ip": info.IP}) }(key) time.Sleep(100 * time.Millisecond) @@ -102,7 +102,7 @@ func (s *DHT) FetchAndStore(ctx context.Context) error { //wg.Wait() - logtrace.Info(ctx, "Successfully fetched & stored keys", logtrace.Fields{"todo-keys": len(keys), "successfully-added-keys": atomic.LoadInt32(&successCounter)}) // Log the final count + logtrace.Debug(ctx, "Successfully fetched & stored keys", logtrace.Fields{"todo-keys": len(keys), "successfully-added-keys": atomic.LoadInt32(&successCounter)}) // Log the final count return nil } @@ -114,7 +114,7 @@ func (s *DHT) BatchFetchAndStoreFailedKeys(ctx context.Context) error { if err != nil { return fmt.Errorf("get all keys error: %w", err) } - logtrace.Info(ctx, "read failed keys from store", logtrace.Fields{"count": len(keys)}) + logtrace.Debug(ctx, "read failed keys from store", logtrace.Fields{"count": len(keys)}) if len(keys) == 0 { return nil @@ -143,7 +143,7 @@ func (s *DHT) BatchFetchAndStoreFailedKeys(ctx context.Context) error { repKeys = append(repKeys, repKey) } } - logtrace.Info(ctx, "got 2nd tier replication keys from queries store", logtrace.Fields{"count": len(repKeys)}) + logtrace.Debug(ctx, "got 2nd tier replication keys from queries store", logtrace.Fields{"count": len(repKeys)}) if err := s.GroupAndBatchFetch(ctx, repKeys, 0, false); err != nil { logtrace.Error(ctx, "group and batch fetch failed-keys error", logtrace.Fields{logtrace.FieldError: err}) @@ -160,7 +160,7 @@ func (s *DHT) BatchFetchAndStore(ctx context.Context) error { if err != nil { return fmt.Errorf("get all keys error: %w", err) } - logtrace.Info(ctx, "got batch todo rep-keys from queries store", logtrace.Fields{"count": len(keys)}) + logtrace.Debug(ctx, "got batch todo rep-keys from queries store", logtrace.Fields{"count": len(keys)}) if len(keys) == 0 { return nil @@ -213,12 +213,12 @@ func (s *DHT) GroupAndBatchFetch(ctx context.Context, repKeys []domain.ToRepKey, totalKeysFound := 0 for len(stringKeys) > 0 && iterations < maxSingleBatchIterations { iterations++ - logtrace.Info(ctx, "fetching batch values from node", logtrace.Fields{"node-ip": node.IP, "count": len(stringKeys), "keys[0]": stringKeys[0], "keys[len()]": stringKeys[len(stringKeys)-1]}) + logtrace.Debug(ctx, "fetching batch values from node", logtrace.Fields{"node-ip": node.IP, "count": len(stringKeys), "keys[0]": stringKeys[0], "keys[len()]": stringKeys[len(stringKeys)-1]}) isDone, retMap, failedKeys, err := s.GetBatchValuesFromNode(ctx, stringKeys, node) if err != nil { // Log the error but don't stop the process, continue to the next node - logtrace.Info(ctx, "failed to get batch values", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err}) + logtrace.Debug(ctx, "failed to get batch values", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err}) continue } @@ -238,7 +238,7 @@ func (s *DHT) GroupAndBatchFetch(ctx context.Context, repKeys []domain.ToRepKey, err = s.store.StoreBatch(ctx, response, datatype, isOriginal) if err != nil { // Log the error but don't stop the process, continue to the next node - logtrace.Info(ctx, "failed to store batch values", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err}) + logtrace.Debug(ctx, "failed to store batch values", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err}) continue } @@ -246,7 +246,7 @@ func (s *DHT) GroupAndBatchFetch(ctx context.Context, repKeys []domain.ToRepKey, err = s.store.BatchDeleteRepKeys(stringDelKeys) if err != nil { // Log the error but don't stop the process, continue to the next node - logtrace.Info(ctx, "failed to delete rep keys", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err}) + logtrace.Debug(ctx, "failed to delete rep keys", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err}) continue } } else { @@ -255,7 +255,7 @@ func (s *DHT) GroupAndBatchFetch(ctx context.Context, repKeys []domain.ToRepKey, if isDone && len(failedKeys) > 0 { if err := s.store.IncrementAttempts(failedKeys); err != nil { - logtrace.Info(ctx, "failed to increment attempts", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err}) + logtrace.Debug(ctx, "failed to increment attempts", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err}) // not adding 'continue' here because we want to delete the keys from the todo list } } else if isDone { @@ -265,7 +265,7 @@ func (s *DHT) GroupAndBatchFetch(ctx context.Context, repKeys []domain.ToRepKey, } } - logtrace.Info(ctx, "fetch batch values from node successfully", logtrace.Fields{"node-ip": node.IP, "count": totalKeysFound, "iterations": iterations}) + logtrace.Debug(ctx, "fetch batch values from node successfully", logtrace.Fields{"node-ip": node.IP, "count": totalKeysFound, "iterations": iterations}) } } @@ -274,7 +274,7 @@ func (s *DHT) GroupAndBatchFetch(ctx context.Context, repKeys []domain.ToRepKey, // GetBatchValuesFromNode get values from node in bateches func (s *DHT) GetBatchValuesFromNode(ctx context.Context, keys []string, n *Node) (bool, map[string][]byte, []string, error) { - logtrace.Info(ctx, "sending batch fetch request", logtrace.Fields{"node-ip": n.IP, "keys": len(keys)}) + logtrace.Debug(ctx, "sending batch fetch request", logtrace.Fields{"node-ip": n.IP, "keys": len(keys)}) messageType := BatchFindValues @@ -347,7 +347,7 @@ func (s *DHT) GetBatchValuesFromNode(ctx context.Context, keys []string, n *Node if err != nil { return isDone, nil, nil, fmt.Errorf("failed to verify and filter data: %w", err) } - logtrace.Info(ctx, "batch fetch response rcvd and keys verified", logtrace.Fields{"node-ip": n.IP, "received-keys": len(decompressedMap), "verified-keys": len(retMap), "failed-keys": len(failedKeys)}) + logtrace.Debug(ctx, "batch fetch response rcvd and keys verified", logtrace.Fields{"node-ip": n.IP, "received-keys": len(decompressedMap), "verified-keys": len(retMap), "failed-keys": len(failedKeys)}) return v.Done, retMap, failedKeys, nil } diff --git a/p2p/kademlia/network.go b/p2p/kademlia/network.go index 935d1583..c887eab1 100644 --- a/p2p/kademlia/network.go +++ b/p2p/kademlia/network.go @@ -336,7 +336,7 @@ func (s *Network) handleReplicateRequest(ctx context.Context, req *ReplicateData return fmt.Errorf("unable to store batch replication keys: %w", err) } - logtrace.Info(ctx, "Store batch replication keys stored", logtrace.Fields{ + logtrace.Debug(ctx, "Store batch replication keys stored", logtrace.Fields{ logtrace.FieldModule: "p2p", "to-store-keys": len(keysToStore), "rcvd-keys": len(req.Keys), @@ -649,7 +649,7 @@ func (s *Network) rpcOnceWrapper(ctx context.Context, cw *connWrapper, remoteAdd if _, e := cw.secureConn.Write(data); e != nil { cw.mtx.Unlock() if isStaleConnError(e) && !retried { - logtrace.Info(ctx, "Stale pooled connection on write; redialing", logtrace.Fields{ + logtrace.Debug(ctx, "Stale pooled connection on write; redialing", logtrace.Fields{ logtrace.FieldModule: "p2p", "remote": remoteAddr, "message_type": msgType, @@ -690,7 +690,7 @@ func (s *Network) rpcOnceWrapper(ctx context.Context, cw *connWrapper, remoteAdd cw.mtx.Unlock() if e != nil { if isStaleConnError(e) && !retried { - logtrace.Info(ctx, "Stale pooled connection on read; redialing", logtrace.Fields{ + logtrace.Debug(ctx, "Stale pooled connection on read; redialing", logtrace.Fields{ logtrace.FieldModule: "p2p", "remote": remoteAddr, "message_type": msgType, @@ -743,7 +743,7 @@ Retry: } if _, err := conn.Write(data); err != nil { if isStaleConnError(err) && !retried { - logtrace.Info(ctx, "Stale pooled connection on write; redialing", logtrace.Fields{ + logtrace.Debug(ctx, "Stale pooled connection on write; redialing", logtrace.Fields{ logtrace.FieldModule: "p2p", "remote": remoteAddr, "message_type": msgType, @@ -777,7 +777,7 @@ Retry: _ = conn.SetDeadline(time.Time{}) if err != nil { if isStaleConnError(err) && !retried { - logtrace.Info(ctx, "Stale pooled connection on read; redialing", logtrace.Fields{ + logtrace.Debug(ctx, "Stale pooled connection on read; redialing", logtrace.Fields{ logtrace.FieldModule: "p2p", "remote": remoteAddr, "message_type": msgType, @@ -841,7 +841,7 @@ func (s *Network) handleBatchFindValues(ctx context.Context, message *Message, r // Try to acquire the semaphore, wait up to 1 minute logtrace.Debug(ctx, "Attempting to acquire semaphore immediately", logtrace.Fields{logtrace.FieldModule: "p2p"}) if !s.sem.TryAcquire(1) { - logtrace.Info(ctx, "Immediate acquisition failed. Waiting up to 1 minute", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "Immediate acquisition failed. Waiting up to 1 minute", logtrace.Fields{logtrace.FieldModule: "p2p"}) ctxWithTimeout, cancel := context.WithTimeout(ctx, 1*time.Minute) defer cancel() @@ -850,7 +850,7 @@ func (s *Network) handleBatchFindValues(ctx context.Context, message *Message, r // failed to acquire semaphore within 1 minute return s.generateResponseMessage(BatchFindValues, message.Sender, ResultFailed, errorBusy) } - logtrace.Info(ctx, "Semaphore acquired after waiting", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "Semaphore acquired after waiting", logtrace.Fields{logtrace.FieldModule: "p2p"}) } // Add a defer function to recover from panic @@ -936,7 +936,7 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, return s.generateResponseMessage(BatchGetValues, message.Sender, ResultFailed, err.Error()) } - logtrace.Info(ctx, "Batch get values request received", logtrace.Fields{ + logtrace.Debug(ctx, "Batch get values request received", logtrace.Fields{ logtrace.FieldModule: "p2p", "from": message.Sender.String(), }) @@ -966,7 +966,7 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, return s.generateResponseMessage(BatchGetValues, message.Sender, ResultFailed, err.Error()) } - logtrace.Info(ctx, "Batch get values request processed", logtrace.Fields{ + logtrace.Debug(ctx, "Batch get values request processed", logtrace.Fields{ logtrace.FieldModule: "p2p", "requested-keys": len(keys), "found": count, @@ -1006,7 +1006,7 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, func (s *Network) handleBatchFindValuesRequest(ctx context.Context, req *BatchFindValuesRequest, ip string, reqID string) (isDone bool, compressedData []byte, err error) { // log.WithContext(ctx).WithField("p2p-req-id", reqID).WithField("keys", len(req.Keys)).WithField("from-ip", ip).Info("batch find values request received") - logtrace.Info(ctx, "Batch find values request received", logtrace.Fields{ + logtrace.Debug(ctx, "Batch find values request received", logtrace.Fields{ logtrace.FieldModule: "p2p", "from": ip, "keys": len(req.Keys), @@ -1029,7 +1029,7 @@ func (s *Network) handleBatchFindValuesRequest(ctx context.Context, req *BatchFi return false, nil, fmt.Errorf("failed to retrieve batch values: %w", err) } // log.WithContext(ctx).WithField("p2p-req-id", reqID).WithField("values-len", len(values)).WithField("found", count).WithField("from-ip", ip).Info("batch find values request processed") - logtrace.Info(ctx, "Batch find values request processed", logtrace.Fields{ + logtrace.Debug(ctx, "Batch find values request processed", logtrace.Fields{ logtrace.FieldModule: "p2p", "p2p-req-id": reqID, "values-len": len(values), @@ -1044,7 +1044,7 @@ func (s *Network) handleBatchFindValuesRequest(ctx context.Context, req *BatchFi // log.WithContext(ctx).WithField("p2p-req-id", reqID).WithField("compressed-data-len", utils.BytesToMB(uint64(len(compressedData)))).WithField("found", count). // WithField("from-ip", ip).Info("batch find values response sent") - logtrace.Info(ctx, "Batch find values response sent", logtrace.Fields{ + logtrace.Debug(ctx, "Batch find values response sent", logtrace.Fields{ logtrace.FieldModule: "p2p", "p2p-req-id": reqID, "compressed-data-len": utils.BytesToMB(uint64(len(compressedData))), @@ -1208,7 +1208,7 @@ func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (r } // log.P2P().WithContext(ctx).Info("handle batch store data request received") - logtrace.Info(ctx, "Handle batch store data request received", logtrace.Fields{ + logtrace.Debug(ctx, "Handle batch store data request received", logtrace.Fields{ logtrace.FieldModule: "p2p", "sender": message.Sender.String(), "keys": len(request.Data), @@ -1238,7 +1238,7 @@ func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (r }, } // log.P2P().WithContext(ctx).Info("handle batch store data request processed") - logtrace.Info(ctx, "Handle batch store data request processed", logtrace.Fields{ + logtrace.Debug(ctx, "Handle batch store data request processed", logtrace.Fields{ logtrace.FieldModule: "p2p", "sender": message.Sender.String(), "keys": len(request.Data), @@ -1283,7 +1283,7 @@ func (s *Network) handleBatchFindNode(ctx context.Context, message *Message) (re closestMap := make(map[string][]*Node) // log.WithContext(ctx).WithField("sender", message.Sender.String()).Info("Batch Find Nodes Request Received") - logtrace.Info(ctx, "Batch Find Nodes Request Received", logtrace.Fields{ + logtrace.Debug(ctx, "Batch Find Nodes Request Received", logtrace.Fields{ logtrace.FieldModule: "p2p", "sender": message.Sender.String(), "hashed-targets": len(request.HashedTarget), @@ -1294,7 +1294,7 @@ func (s *Network) handleBatchFindNode(ctx context.Context, message *Message) (re } response.ClosestNodes = closestMap // log.WithContext(ctx).WithField("sender", message.Sender.String()).Info("Batch Find Nodes Request Processed") - logtrace.Info(ctx, "Batch Find Nodes Request Processed", logtrace.Fields{ + logtrace.Debug(ctx, "Batch Find Nodes Request Processed", logtrace.Fields{ logtrace.FieldModule: "p2p", "sender": message.Sender.String(), }) diff --git a/p2p/kademlia/node_activity.go b/p2p/kademlia/node_activity.go index cc7089d6..88e09f7a 100644 --- a/p2p/kademlia/node_activity.go +++ b/p2p/kademlia/node_activity.go @@ -25,7 +25,7 @@ func (s *DHT) checkNodeActivity(ctx context.Context) { return case <-ticker.C: if !utils.CheckInternetConnectivity() { - logtrace.Info(ctx, "no internet connectivity, not checking node activity", logtrace.Fields{}) + logtrace.Debug(ctx, "no internet connectivity, not checking node activity", logtrace.Fields{}) continue } @@ -115,7 +115,7 @@ func (s *DHT) handlePingSuccess(ctx context.Context, wasActive bool, n *Node) { s.ignorelist.Delete(n) if !wasActive { - logtrace.Info(ctx, "node found to be active again", logtrace.Fields{ + logtrace.Debug(ctx, "node found to be active again", logtrace.Fields{ logtrace.FieldModule: "p2p", "ip": n.IP, "node_id": string(n.ID), diff --git a/p2p/kademlia/redundant_data.go b/p2p/kademlia/redundant_data.go index bfe6947d..151269d1 100644 --- a/p2p/kademlia/redundant_data.go +++ b/p2p/kademlia/redundant_data.go @@ -13,7 +13,7 @@ import ( ) func (s *DHT) startDisabledKeysCleanupWorker(ctx context.Context) error { - logtrace.Info(ctx, "disabled keys cleanup worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "disabled keys cleanup worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) for { select { @@ -50,7 +50,7 @@ func (s *DHT) cleanupDisabledKeys(ctx context.Context) error { } func (s *DHT) startCleanupRedundantDataWorker(ctx context.Context) { - logtrace.Info(ctx, "redundant data cleanup worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "redundant data cleanup worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) for { select { @@ -66,7 +66,7 @@ func (s *DHT) startCleanupRedundantDataWorker(ctx context.Context) { func (s *DHT) cleanupRedundantDataWorker(ctx context.Context) { from := time.Now().AddDate(-5, 0, 0) // 5 years ago - logtrace.Info(ctx, "getting all possible replication keys past five years", logtrace.Fields{logtrace.FieldModule: "p2p", "from": from}) + logtrace.Debug(ctx, "getting all possible replication keys past five years", logtrace.Fields{logtrace.FieldModule: "p2p", "from": from}) to := time.Now().UTC() replicationKeys := s.store.GetKeysForReplication(ctx, from, to) @@ -88,7 +88,7 @@ func (s *DHT) cleanupRedundantDataWorker(ctx context.Context) { removeKeys := make([]domain.DelKey, 0) for key, closestContacts := range closestContactsMap { if len(closestContacts) < Alpha { - logtrace.Info(ctx, "not enough contacts to replicate", logtrace.Fields{logtrace.FieldModule: "p2p", "key": key, "closest contacts": closestContacts}) + logtrace.Debug(ctx, "not enough contacts to replicate", logtrace.Fields{logtrace.FieldModule: "p2p", "key": key, "closest contacts": closestContacts}) continue } @@ -118,9 +118,9 @@ func (s *DHT) cleanupRedundantDataWorker(ctx context.Context) { return } - logtrace.Info(ctx, "insert del keys success", logtrace.Fields{logtrace.FieldModule: "p2p", "count-del-keys": len(insertKeys)}) + logtrace.Debug(ctx, "insert del keys success", logtrace.Fields{logtrace.FieldModule: "p2p", "count-del-keys": len(insertKeys)}) } else { - logtrace.Info(ctx, "No redundant key found to be stored in the storage", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "No redundant key found to be stored in the storage", logtrace.Fields{logtrace.FieldModule: "p2p"}) } if len(removeKeys) > 0 { @@ -133,7 +133,7 @@ func (s *DHT) cleanupRedundantDataWorker(ctx context.Context) { } func (s *DHT) startDeleteDataWorker(ctx context.Context) { - logtrace.Info(ctx, "start delete data worker", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "start delete data worker", logtrace.Fields{logtrace.FieldModule: "p2p"}) for { select { diff --git a/p2p/kademlia/replication.go b/p2p/kademlia/replication.go index 5163fd0b..4a36c422 100644 --- a/p2p/kademlia/replication.go +++ b/p2p/kademlia/replication.go @@ -34,7 +34,7 @@ var ( // StartReplicationWorker starts replication func (s *DHT) StartReplicationWorker(ctx context.Context) error { - logtrace.Info(ctx, "replication worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "replication worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) go s.checkNodeActivity(ctx) go s.StartBatchFetchAndStoreWorker(ctx) @@ -54,7 +54,7 @@ func (s *DHT) StartReplicationWorker(ctx context.Context) error { // StartBatchFetchAndStoreWorker starts replication func (s *DHT) StartBatchFetchAndStoreWorker(ctx context.Context) error { - logtrace.Info(ctx, "batch fetch and store worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "batch fetch and store worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) for { select { @@ -69,7 +69,7 @@ func (s *DHT) StartBatchFetchAndStoreWorker(ctx context.Context) error { // StartFailedFetchAndStoreWorker starts replication func (s *DHT) StartFailedFetchAndStoreWorker(ctx context.Context) error { - logtrace.Info(ctx, "fetch and store worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "fetch and store worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) for { select { @@ -131,7 +131,7 @@ func (s *DHT) Replicate(ctx context.Context) { historicStart = time.Now().UTC().Add(-24 * time.Hour * 180) } - logtrace.Info(ctx, "replicating data", logtrace.Fields{logtrace.FieldModule: "p2p", "historic-start": historicStart}) + logtrace.Debug(ctx, "replicating data", logtrace.Fields{logtrace.FieldModule: "p2p", "historic-start": historicStart}) for i := 0; i < B; i++ { if time.Since(s.ht.refreshTime(i)) > defaultRefreshTime { @@ -150,7 +150,7 @@ func (s *DHT) Replicate(ctx context.Context) { } if len(repInfo) == 0 { - logtrace.Info(ctx, "no replication info found", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "no replication info found", logtrace.Fields{logtrace.FieldModule: "p2p"}) return } @@ -159,7 +159,7 @@ func (s *DHT) Replicate(ctx context.Context) { from = *repInfo[0].LastReplicatedAt } - logtrace.Info(ctx, "getting all possible replication keys", logtrace.Fields{logtrace.FieldModule: "p2p", "from": from}) + logtrace.Debug(ctx, "getting all possible replication keys", logtrace.Fields{logtrace.FieldModule: "p2p", "from": from}) to := time.Now().UTC() replicationKeys := s.store.GetKeysForReplication(ctx, from, to) @@ -199,7 +199,7 @@ func (s *DHT) Replicate(ctx context.Context) { continue } countToSendKeys := len(replicationKeys) - idx - logtrace.Info(ctx, "count of replication keys to be checked", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID), "len-rep-keys": countToSendKeys}) + logtrace.Debug(ctx, "count of replication keys to be checked", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID), "len-rep-keys": countToSendKeys}) // Preallocate a slice with a capacity equal to the number of keys. closestContactKeys := make([]string, 0, countToSendKeys) @@ -212,13 +212,13 @@ func (s *DHT) Replicate(ctx context.Context) { } } - logtrace.Info(ctx, "closest contact keys count", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID), "len-rep-keys": len(closestContactKeys)}) + logtrace.Debug(ctx, "closest contact keys count", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID), "len-rep-keys": len(closestContactKeys)}) if len(closestContactKeys) == 0 { if err := s.updateLastReplicated(ctx, info.ID, to); err != nil { logtrace.Error(ctx, "replicate update lastReplicated failed", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID)}) } else { - logtrace.Info(ctx, "no closest keys found - replicate update lastReplicated success", logtrace.Fields{logtrace.FieldModule: "p2p", "node": info.IP, "to": to.String(), "closest-contact-keys": 0}) + logtrace.Debug(ctx, "no closest keys found - replicate update lastReplicated success", logtrace.Fields{logtrace.FieldModule: "p2p", "node": info.IP, "to": to.String(), "closest-contact-keys": 0}) } continue @@ -258,17 +258,17 @@ func (s *DHT) Replicate(ctx context.Context) { if err := s.updateLastReplicated(ctx, info.ID, to); err != nil { logtrace.Error(ctx, "replicate update lastReplicated failed", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID)}) } else { - logtrace.Info(ctx, "replicate update lastReplicated success", logtrace.Fields{logtrace.FieldModule: "p2p", "node": info.IP, "to": to.String(), "expected-rep-keys": len(closestContactKeys)}) + logtrace.Debug(ctx, "replicate update lastReplicated success", logtrace.Fields{logtrace.FieldModule: "p2p", "node": info.IP, "to": to.String(), "expected-rep-keys": len(closestContactKeys)}) } } - logtrace.Info(ctx, "Replication done", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "Replication done", logtrace.Fields{logtrace.FieldModule: "p2p"}) } func (s *DHT) adjustNodeKeys(ctx context.Context, from time.Time, info domain.NodeReplicationInfo) error { replicationKeys := s.store.GetKeysForReplication(ctx, from, time.Now().UTC()) - logtrace.Info(ctx, "begin adjusting node keys process for offline node", logtrace.Fields{logtrace.FieldModule: "p2p", "offline-node-ip": info.IP, "offline-node-id": string(info.ID), "total-rep-keys": len(replicationKeys), "from": from.String()}) + logtrace.Debug(ctx, "begin adjusting node keys process for offline node", logtrace.Fields{logtrace.FieldModule: "p2p", "offline-node-ip": info.IP, "offline-node-id": string(info.ID), "total-rep-keys": len(replicationKeys), "from": from.String()}) // prepare ignored nodes list but remove the node we are adjusting // because we want to find if this node was supposed to hold this key @@ -315,7 +315,7 @@ func (s *DHT) adjustNodeKeys(ctx context.Context, from time.Time, info domain.No failureCount := 0 for nodeInfoKey, keys := range nodeKeysMap { - logtrace.Info(ctx, "sending adjusted replication keys to node", logtrace.Fields{logtrace.FieldModule: "p2p", "offline-node-ip": info.IP, "offline-node-id": string(info.ID), "adjust-to-node": nodeInfoKey, "to-adjust-keys-len": len(keys)}) + logtrace.Debug(ctx, "sending adjusted replication keys to node", logtrace.Fields{logtrace.FieldModule: "p2p", "offline-node-ip": info.IP, "offline-node-id": string(info.ID), "adjust-to-node": nodeInfoKey, "to-adjust-keys-len": len(keys)}) // Retrieve the node object from the key node, err := getNodeFromKey(nodeInfoKey) if err != nil { @@ -370,14 +370,14 @@ func (s *DHT) adjustNodeKeys(ctx context.Context, from time.Time, info domain.No return fmt.Errorf("replicate update isAdjusted failed: %v", err) } - logtrace.Info(ctx, "offline node was successfully adjusted", logtrace.Fields{logtrace.FieldModule: "p2p", "offline-node-ip": info.IP, "offline-node-id": string(info.ID)}) + logtrace.Debug(ctx, "offline node was successfully adjusted", logtrace.Fields{logtrace.FieldModule: "p2p", "offline-node-ip": info.IP, "offline-node-id": string(info.ID)}) return nil } func isNodeGoneAndShouldBeAdjusted(lastSeen *time.Time, isAlreadyAdjusted bool) bool { if lastSeen == nil { - logtrace.Info(context.Background(), "lastSeen is nil - aborting node adjustment", logtrace.Fields{}) + logtrace.Debug(context.Background(), "lastSeen is nil - aborting node adjustment", logtrace.Fields{}) return false } @@ -396,10 +396,10 @@ func (s *DHT) checkAndAdjustNode(ctx context.Context, info domain.NodeReplicatio if err := s.store.UpdateIsAdjusted(ctx, string(info.ID), true); err != nil { logtrace.Error(ctx, "failed to update replication info, set isAdjusted to true", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err.Error(), "rep-ip": info.IP, "rep-id": string(info.ID)}) } else { - logtrace.Info(ctx, "set isAdjusted to true", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID)}) + logtrace.Debug(ctx, "set isAdjusted to true", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID)}) } } } - logtrace.Info(ctx, "replication node not active, skipping over it.", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID)}) + logtrace.Debug(ctx, "replication node not active, skipping over it.", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID)}) } diff --git a/p2p/kademlia/rq_symbols.go b/p2p/kademlia/rq_symbols.go index fbf6563d..c8ad2000 100644 --- a/p2p/kademlia/rq_symbols.go +++ b/p2p/kademlia/rq_symbols.go @@ -16,7 +16,7 @@ const ( ) func (s *DHT) startStoreSymbolsWorker(ctx context.Context) { - logtrace.Info(ctx, "start delete data worker", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "start delete data worker", logtrace.Fields{logtrace.FieldModule: "p2p"}) for { select { @@ -38,12 +38,12 @@ func (s *DHT) storeSymbols(ctx context.Context) error { } for _, dir := range dirs { - logtrace.Info(ctx, "rq_symbols worker: start scanning dir & storing raptorQ symbols", logtrace.Fields{"dir": dir, "txid": dir.TXID}) + logtrace.Debug(ctx, "rq_symbols worker: start scanning dir & storing raptorQ symbols", logtrace.Fields{"dir": dir, "txid": dir.TXID}) if err := s.scanDirAndStoreSymbols(ctx, dir.Dir, dir.TXID); err != nil { logtrace.Error(ctx, "scan and store symbols", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err}) } - logtrace.Info(ctx, "rq_symbols worker: scanned dir & stored raptorQ symbols", logtrace.Fields{"dir": dir, "txid": dir.TXID}) + logtrace.Debug(ctx, "rq_symbols worker: scanned dir & stored raptorQ symbols", logtrace.Fields{"dir": dir, "txid": dir.TXID}) } return nil @@ -66,7 +66,7 @@ func (s *DHT) scanDirAndStoreSymbols(ctx context.Context, dir, txid string) erro } sort.Strings(keys) - logtrace.Info(ctx, "p2p-worker: storing ALL RaptorQ symbols", logtrace.Fields{"txid": txid, "dir": dir, "total": len(keys)}) + logtrace.Debug(ctx, "p2p-worker: storing ALL RaptorQ symbols", logtrace.Fields{"txid": txid, "dir": dir, "total": len(keys)}) // Batch-flush at loadSymbolsBatchSize for start := 0; start < len(keys); { diff --git a/p2p/kademlia/store/meta/meta.go b/p2p/kademlia/store/meta/meta.go index fa75dc81..c57d05a4 100644 --- a/p2p/kademlia/store/meta/meta.go +++ b/p2p/kademlia/store/meta/meta.go @@ -67,7 +67,7 @@ func NewStore(ctx context.Context, dataDir string) (*Store, error) { quit: make(chan bool), } - logtrace.Info(ctx, fmt.Sprintf("p2p data dir: %v", dataDir), logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, fmt.Sprintf("p2p data dir: %v", dataDir), logtrace.Fields{logtrace.FieldModule: "p2p"}) if _, err := os.Stat(dataDir); os.IsNotExist(err) { if err := os.MkdirAll(dataDir, 0750); err != nil { return nil, fmt.Errorf("mkdir %q: %w", dataDir, err) @@ -185,10 +185,10 @@ func (s *Store) startCheckpointWorker(ctx context.Context) { select { case <-ctx.Done(): - logtrace.Info(ctx, "Stopping checkpoint worker because of context cancel", logtrace.Fields{}) + logtrace.Debug(ctx, "Stopping checkpoint worker because of context cancel", logtrace.Fields{}) return case <-s.worker.quit: - logtrace.Info(ctx, "Stopping checkpoint worker because of quit signal", logtrace.Fields{}) + logtrace.Debug(ctx, "Stopping checkpoint worker because of quit signal", logtrace.Fields{}) return default: } @@ -204,10 +204,10 @@ func (s *Store) start(ctx context.Context) { logtrace.Error(ctx, "Failed to perform job", logtrace.Fields{logtrace.FieldError: err}) } case <-s.worker.quit: - logtrace.Info(ctx, "exit sqlite meta db worker - quit signal received", logtrace.Fields{}) + logtrace.Debug(ctx, "exit sqlite meta db worker - quit signal received", logtrace.Fields{}) return case <-ctx.Done(): - logtrace.Info(ctx, "exit sqlite meta db worker- ctx done signal received", logtrace.Fields{}) + logtrace.Debug(ctx, "exit sqlite meta db worker- ctx done signal received", logtrace.Fields{}) return } } diff --git a/p2p/kademlia/store/sqlite/meta_worker.go b/p2p/kademlia/store/sqlite/meta_worker.go index eb7a968f..6d1207df 100644 --- a/p2p/kademlia/store/sqlite/meta_worker.go +++ b/p2p/kademlia/store/sqlite/meta_worker.go @@ -124,7 +124,7 @@ func NewMigrationMetaStore(ctx context.Context, dataDir string, cloud cloud.Stor go handler.startLastAccessedUpdateWorker(ctx) go handler.startInsertWorker(ctx) go handler.startMigrationExecutionWorker(ctx) - logtrace.Info(ctx, "MigrationMetaStore workers started", logtrace.Fields{}) + logtrace.Debug(ctx, "MigrationMetaStore workers started", logtrace.Fields{}) return handler, nil } @@ -348,7 +348,7 @@ func (d *MigrationMetaStore) startLastAccessedUpdateWorker(ctx context.Context) case <-d.updateTicker.C: d.commitLastAccessedUpdates(ctx) case <-ctx.Done(): - logtrace.Info(ctx, "Shutting down last accessed update worker", logtrace.Fields{}) + logtrace.Debug(ctx, "Shutting down last accessed update worker", logtrace.Fields{}) return } } @@ -414,7 +414,7 @@ func (d *MigrationMetaStore) commitLastAccessedUpdates(ctx context.Context) { d.updates.Delete(k) } - logtrace.Info(ctx, "Committed last accessed updates", logtrace.Fields{"count": len(keysToUpdate)}) + logtrace.Debug(ctx, "Committed last accessed updates", logtrace.Fields{"count": len(keysToUpdate)}) } func PostKeysInsert(updates []UpdateMessage) { @@ -437,7 +437,7 @@ func (d *MigrationMetaStore) startInsertWorker(ctx context.Context) { case <-d.insertTicker.C: d.commitInserts(ctx) case <-ctx.Done(): - logtrace.Info(ctx, "Shutting down insert meta keys worker", logtrace.Fields{}) + logtrace.Debug(ctx, "Shutting down insert meta keys worker", logtrace.Fields{}) d.commitInserts(ctx) return } @@ -501,7 +501,7 @@ func (d *MigrationMetaStore) commitInserts(ctx context.Context) { d.inserts.Delete(k) } - logtrace.Info(ctx, "Committed inserts", logtrace.Fields{"count": len(keysToUpdate)}) + logtrace.Debug(ctx, "Committed inserts", logtrace.Fields{"count": len(keysToUpdate)}) } // startMigrationExecutionWorker starts the worker that executes a migration @@ -511,7 +511,7 @@ func (d *MigrationMetaStore) startMigrationExecutionWorker(ctx context.Context) case <-d.migrationExecutionTicker.C: d.checkAndExecuteMigration(ctx) case <-ctx.Done(): - logtrace.Info(ctx, "Shutting down data migration worker", logtrace.Fields{}) + logtrace.Debug(ctx, "Shutting down data migration worker", logtrace.Fields{}) return } } @@ -544,7 +544,7 @@ func (d *MigrationMetaStore) checkAndExecuteMigration(ctx context.Context) { //return //} - logtrace.Info(ctx, "Starting data migration", logtrace.Fields{"islow": isLow}) + logtrace.Debug(ctx, "Starting data migration", logtrace.Fields{"islow": isLow}) // Step 1: Fetch pending migrations var migrations Migrations @@ -553,11 +553,11 @@ func (d *MigrationMetaStore) checkAndExecuteMigration(ctx context.Context) { logtrace.Error(ctx, "Failed to fetch pending migrations", logtrace.Fields{logtrace.FieldError: err}) return } - logtrace.Info(ctx, "Fetched pending migrations", logtrace.Fields{"count": len(migrations)}) + logtrace.Debug(ctx, "Fetched pending migrations", logtrace.Fields{"count": len(migrations)}) // Iterate over each migration for _, migration := range migrations { - logtrace.Info(ctx, "Processing migration", logtrace.Fields{"migration_id": migration.ID}) + logtrace.Debug(ctx, "Processing migration", logtrace.Fields{"migration_id": migration.ID}) if err := d.ProcessMigrationInBatches(ctx, migration); err != nil { logtrace.Error(ctx, "Failed to process migration", logtrace.Fields{logtrace.FieldError: err, "migration_id": migration.ID}) @@ -579,7 +579,7 @@ func (d *MigrationMetaStore) ProcessMigrationInBatches(ctx context.Context, migr } if totalKeys < minKeysToMigrate { - logtrace.Info(ctx, "Skipping migration due to insufficient keys", logtrace.Fields{"migration_id": migration.ID, "keys-count": totalKeys}) + logtrace.Debug(ctx, "Skipping migration due to insufficient keys", logtrace.Fields{"migration_id": migration.ID, "keys-count": totalKeys}) return nil } @@ -630,7 +630,7 @@ func (d *MigrationMetaStore) ProcessMigrationInBatches(ctx context.Context, migr } } - logtrace.Info(ctx, "Migration processed successfully", logtrace.Fields{"migration_id": migration.ID, "tota-keys-count": totalKeys, "migrated_in_current_iteration": nonMigratedKeys}) + logtrace.Debug(ctx, "Migration processed successfully", logtrace.Fields{"migration_id": migration.ID, "tota-keys-count": totalKeys, "migrated_in_current_iteration": nonMigratedKeys}) return nil } @@ -683,7 +683,7 @@ func (d *MigrationMetaStore) uploadInBatches(ctx context.Context, keys []string, continue } - logtrace.Info(ctx, "Successfully uploaded and deleted records for batch", logtrace.Fields{"batch": i + 1, "total_batches": batches}) + logtrace.Debug(ctx, "Successfully uploaded and deleted records for batch", logtrace.Fields{"batch": i + 1, "total_batches": batches}) } return lastError @@ -823,7 +823,7 @@ func (d *MigrationMetaStore) InsertMetaMigrationData(ctx context.Context, migrat func (d *MigrationMetaStore) batchSetMigrated(keys []string) error { if len(keys) == 0 { // log.P2P().Info("no keys provided for batch update (is_migrated)") - logtrace.Info(context.Background(), "No keys provided for batch update (is_migrated)", logtrace.Fields{}) + logtrace.Debug(context.Background(), "No keys provided for batch update (is_migrated)", logtrace.Fields{}) return nil } diff --git a/p2p/kademlia/store/sqlite/sqlite.go b/p2p/kademlia/store/sqlite/sqlite.go index 71224a57..d38661d1 100644 --- a/p2p/kademlia/store/sqlite/sqlite.go +++ b/p2p/kademlia/store/sqlite/sqlite.go @@ -293,10 +293,10 @@ func (s *Store) startCheckpointWorker(ctx context.Context) { select { case <-ctx.Done(): - logtrace.Info(ctx, "Stopping checkpoint worker because of context cancel", logtrace.Fields{}) + logtrace.Debug(ctx, "Stopping checkpoint worker because of context cancel", logtrace.Fields{}) return case <-s.worker.quit: - logtrace.Info(ctx, "Stopping checkpoint worker because of quit signal", logtrace.Fields{}) + logtrace.Debug(ctx, "Stopping checkpoint worker because of quit signal", logtrace.Fields{}) return default: } @@ -312,10 +312,10 @@ func (s *Store) start(ctx context.Context) { logtrace.Error(ctx, "Failed to perform job", logtrace.Fields{logtrace.FieldError: err.Error()}) } case <-s.worker.quit: - logtrace.Info(ctx, "exit sqlite db worker - quit signal received", logtrace.Fields{}) + logtrace.Debug(ctx, "exit sqlite db worker - quit signal received", logtrace.Fields{}) return case <-ctx.Done(): - logtrace.Info(ctx, "exit sqlite db worker- ctx done signal received", logtrace.Fields{}) + logtrace.Debug(ctx, "exit sqlite db worker- ctx done signal received", logtrace.Fields{}) return } } @@ -737,11 +737,11 @@ func (s *Store) GetOwnCreatedAt(ctx context.Context) (time.Time, error) { func (s *Store) GetLocalKeys(from time.Time, to time.Time) ([]string, error) { var keys []string ctx := context.Background() - logtrace.Info(ctx, "getting all keys for SC", logtrace.Fields{}) + logtrace.Debug(ctx, "getting all keys for SC", logtrace.Fields{}) if err := s.db.SelectContext(ctx, &keys, `SELECT key FROM data WHERE createdAt > ? and createdAt < ?`, from, to); err != nil { return keys, fmt.Errorf("error reading all keys from database: %w", err) } - logtrace.Info(ctx, "got all keys for SC", logtrace.Fields{}) + logtrace.Debug(ctx, "got all keys for SC", logtrace.Fields{}) return keys, nil } @@ -762,7 +762,7 @@ func stringArgsToInterface(args []string) []interface{} { func batchDeleteRecords(db *sqlx.DB, keys []string) error { if len(keys) == 0 { - logtrace.Info(context.Background(), "no keys provided for batch delete", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(context.Background(), "no keys provided for batch delete", logtrace.Fields{logtrace.FieldModule: "p2p"}) return nil } total := int64(0) @@ -784,7 +784,7 @@ func batchDeleteRecords(db *sqlx.DB, keys []string) error { func batchSetMigratedRecords(db *sqlx.DB, keys []string) error { if len(keys) == 0 { - logtrace.Info(context.Background(), "no keys provided for batch update (migrated)", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(context.Background(), "no keys provided for batch update (migrated)", logtrace.Fields{logtrace.FieldModule: "p2p"}) return nil } total := int64(0) diff --git a/p2p/p2p.go b/p2p/p2p.go index 006c469a..2e416111 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -40,15 +40,15 @@ type P2P interface { // p2p structure to implements interface type p2p struct { - store kademlia.Store // the store for kademlia network - metaStore kademlia.MetaStore - dht *kademlia.DHT // the kademlia network - config *Config // the service configuration - running bool // if the kademlia network is ready - lumeraClient lumera.Client - keyring keyring.Keyring // Add the keyring field - rqstore rqstore.Store - metricsDisabled bool + store kademlia.Store // the store for kademlia network + metaStore kademlia.MetaStore + dht *kademlia.DHT // the kademlia network + config *Config // the service configuration + running bool // if the kademlia network is ready + lumeraClient lumera.Client + keyring keyring.Keyring // Add the keyring field + rqstore rqstore.Store + metricsDisabled bool } // Run the kademlia network @@ -65,7 +65,7 @@ func (s *p2p) Run(ctx context.Context) error { logtrace.Error(ctx, "failed to run kadmelia, retrying.", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err}) } else { - logtrace.Info(ctx, "kadmelia started successfully", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "kadmelia started successfully", logtrace.Fields{logtrace.FieldModule: "p2p"}) return nil } } @@ -75,7 +75,7 @@ func (s *p2p) Run(ctx context.Context) error { // run the kademlia network func (s *p2p) run(ctx context.Context) error { - logtrace.Info(ctx, "Running kademlia network", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "Running kademlia network", logtrace.Fields{logtrace.FieldModule: "p2p"}) // configure the kademlia dht for p2p service if err := s.configure(ctx); err != nil { return errors.Errorf("configure kademlia dht: %w", err) @@ -96,7 +96,7 @@ func (s *p2p) run(ctx context.Context) error { } s.running = true - logtrace.Info(ctx, "p2p service is started", logtrace.Fields{}) + logtrace.Debug(ctx, "p2p service is started", logtrace.Fields{}) // block until context is done <-ctx.Done() @@ -104,7 +104,7 @@ func (s *p2p) run(ctx context.Context) error { // stop the node for kademlia network s.dht.Stop(ctx) - logtrace.Info(ctx, "p2p service is stopped", logtrace.Fields{}) + logtrace.Debug(ctx, "p2p service is stopped", logtrace.Fields{}) return nil } @@ -227,15 +227,15 @@ func (s *p2p) NClosestNodesWithIncludingNodeList(ctx context.Context, n int, key // configure the distributed hash table for p2p service func (s *p2p) configure(ctx context.Context) error { // new the queries storage - kadOpts := &kademlia.Options{ - LumeraClient: s.lumeraClient, - Keyring: s.keyring, // Pass the keyring - BootstrapNodes: []*kademlia.Node{}, - IP: s.config.ListenAddress, - Port: s.config.Port, - ID: []byte(s.config.ID), - MetricsDisabled: s.metricsDisabled, - } + kadOpts := &kademlia.Options{ + LumeraClient: s.lumeraClient, + Keyring: s.keyring, // Pass the keyring + BootstrapNodes: []*kademlia.Node{}, + IP: s.config.ListenAddress, + Port: s.config.Port, + ID: []byte(s.config.ID), + MetricsDisabled: s.metricsDisabled, + } if len(kadOpts.ID) == 0 { errors.Errorf("node id is empty") @@ -254,25 +254,25 @@ func (s *p2p) configure(ctx context.Context) error { // New returns a new p2p instance. func New(ctx context.Context, config *Config, lumeraClient lumera.Client, kr keyring.Keyring, rqstore rqstore.Store, cloud cloud.Storage, mst *sqlite.MigrationMetaStore, metricsDisabled bool) (P2P, error) { - store, err := sqlite.NewStore(ctx, config.DataDir, cloud, mst) - if err != nil { - return nil, errors.Errorf("new kademlia store: %w", err) - } + store, err := sqlite.NewStore(ctx, config.DataDir, cloud, mst) + if err != nil { + return nil, errors.Errorf("new kademlia store: %w", err) + } meta, err := meta.NewStore(ctx, config.DataDir) if err != nil { return nil, errors.Errorf("new kademlia meta store: %w", err) } - return &p2p{ - store: store, - metaStore: meta, - config: config, - lumeraClient: lumeraClient, - keyring: kr, // Store the keyring - rqstore: rqstore, - metricsDisabled: metricsDisabled, - }, nil + return &p2p{ + store: store, + metaStore: meta, + config: config, + lumeraClient: lumeraClient, + keyring: kr, // Store the keyring + rqstore: rqstore, + metricsDisabled: metricsDisabled, + }, nil } // LocalStore store data into the kademlia network diff --git a/pkg/codec/decode.go b/pkg/codec/decode.go index bd3b0231..348894e4 100644 --- a/pkg/codec/decode.go +++ b/pkg/codec/decode.go @@ -145,7 +145,7 @@ func (rq *raptorQ) PrepareDecode( return os.RemoveAll(symbolsDir) } - logtrace.Info(ctx, "prepare decode workspace created", logtrace.Fields{ + logtrace.Debug(ctx, "prepare decode workspace created", logtrace.Fields{ "symbols_dir": symbolsDir, "blocks": len(blockDirs), }) @@ -164,7 +164,7 @@ func (rq *raptorQ) DecodeFromPrepared( logtrace.FieldModule: "rq", logtrace.FieldActionID: ws.ActionID, } - logtrace.Info(ctx, "RaptorQ decode (prepared) requested", fields) + logtrace.Debug(ctx, "RaptorQ decode (prepared) requested", fields) processor, err := raptorq.NewRaptorQProcessor(rqSymbolSize, rqRedundancyFactor, rqMaxMemoryMB, rqConcurrency) if err != nil { @@ -184,7 +184,7 @@ func (rq *raptorQ) DecodeFromPrepared( fields[logtrace.FieldError] = err.Error() return DecodeResponse{}, fmt.Errorf("write layout file: %w", err) } - logtrace.Info(ctx, "layout.json written (prepared)", fields) + logtrace.Debug(ctx, "layout.json written (prepared)", fields) // Decode to output (idempotent-safe: overwrite on success) outputPath := filepath.Join(ws.SymbolsDir, "output") @@ -194,64 +194,64 @@ func (rq *raptorQ) DecodeFromPrepared( return DecodeResponse{}, fmt.Errorf("raptorq decode: %w", err) } - logtrace.Info(ctx, "RaptorQ decoding completed successfully (prepared)", logtrace.Fields{ + logtrace.Debug(ctx, "RaptorQ decoding completed successfully (prepared)", logtrace.Fields{ "output_path": outputPath, }) return DecodeResponse{FilePath: outputPath, DecodeTmpDir: ws.SymbolsDir}, nil } func (rq *raptorQ) Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) { - fields := logtrace.Fields{ - logtrace.FieldMethod: "Decode", - logtrace.FieldModule: "rq", - logtrace.FieldActionID: req.ActionID, - } - logtrace.Info(ctx, "RaptorQ decode request received", fields) + fields := logtrace.Fields{ + logtrace.FieldMethod: "Decode", + logtrace.FieldModule: "rq", + logtrace.FieldActionID: req.ActionID, + } + logtrace.Debug(ctx, "RaptorQ decode request received", fields) - // 1) Validate layout (the check) - if len(req.Layout.Blocks) == 0 { - fields[logtrace.FieldError] = "empty layout" - return DecodeResponse{}, fmt.Errorf("invalid layout: no blocks present") - } - for _, blk := range req.Layout.Blocks { - if len(blk.Symbols) == 0 { - fields[logtrace.FieldError] = fmt.Sprintf("block_%d has no symbols", blk.BlockID) - return DecodeResponse{}, fmt.Errorf("invalid layout: block %d has no symbols", blk.BlockID) - } - } + // 1) Validate layout (the check) + if len(req.Layout.Blocks) == 0 { + fields[logtrace.FieldError] = "empty layout" + return DecodeResponse{}, fmt.Errorf("invalid layout: no blocks present") + } + for _, blk := range req.Layout.Blocks { + if len(blk.Symbols) == 0 { + fields[logtrace.FieldError] = fmt.Sprintf("block_%d has no symbols", blk.BlockID) + return DecodeResponse{}, fmt.Errorf("invalid layout: block %d has no symbols", blk.BlockID) + } + } - // 2) Prepare workspace (functionality) - _, Write, Cleanup, ws, err := rq.PrepareDecode(ctx, req.ActionID, req.Layout) - if err != nil { - fields[logtrace.FieldError] = err.Error() - return DecodeResponse{}, fmt.Errorf("prepare decode workspace: %w", err) - } + // 2) Prepare workspace (functionality) + _, Write, Cleanup, ws, err := rq.PrepareDecode(ctx, req.ActionID, req.Layout) + if err != nil { + fields[logtrace.FieldError] = err.Error() + return DecodeResponse{}, fmt.Errorf("prepare decode workspace: %w", err) + } - // Ensure workspace cleanup on failure. On success, caller cleans up via returned path. - success := false - defer func() { - if !success && Cleanup != nil { - _ = Cleanup() - } - }() + // Ensure workspace cleanup on failure. On success, caller cleans up via returned path. + success := false + defer func() { + if !success && Cleanup != nil { + _ = Cleanup() + } + }() - // 3) Persist provided in-memory symbols via Write (functionality) - if len(req.Symbols) > 0 { - for id, data := range req.Symbols { - if _, werr := Write(-1, id, data); werr != nil { - fields[logtrace.FieldError] = werr.Error() - return DecodeResponse{}, werr - } - } - logtrace.Info(ctx, "symbols persisted via Write()", fields) - } + // 3) Persist provided in-memory symbols via Write (functionality) + if len(req.Symbols) > 0 { + for id, data := range req.Symbols { + if _, werr := Write(-1, id, data); werr != nil { + fields[logtrace.FieldError] = werr.Error() + return DecodeResponse{}, werr + } + } + logtrace.Debug(ctx, "symbols persisted via Write()", fields) + } - // 4) Decode using the prepared workspace (functionality) - resp, derr := rq.DecodeFromPrepared(ctx, ws, req.Layout) - if derr != nil { - fields[logtrace.FieldError] = derr.Error() - return DecodeResponse{}, derr - } - success = true - return resp, nil + // 4) Decode using the prepared workspace (functionality) + resp, derr := rq.DecodeFromPrepared(ctx, ws, req.Layout) + if derr != nil { + fields[logtrace.FieldError] = derr.Error() + return DecodeResponse{}, derr + } + success = true + return resp, nil } diff --git a/pkg/codec/raptorq.go b/pkg/codec/raptorq.go index 4564bc1b..7c28d0c5 100644 --- a/pkg/codec/raptorq.go +++ b/pkg/codec/raptorq.go @@ -48,7 +48,7 @@ func (rq *raptorQ) Encode(ctx context.Context, req EncodeRequest) (EncodeRespons return EncodeResponse{}, fmt.Errorf("create RaptorQ processor: %w", err) } defer processor.Free() - logtrace.Info(ctx, "RaptorQ processor created", fields) + logtrace.Debug(ctx, "RaptorQ processor created", fields) /* ---------- 1. run the encoder ---------- */ // Deterministic: force single block @@ -60,7 +60,7 @@ func (rq *raptorQ) Encode(ctx context.Context, req EncodeRequest) (EncodeRespons os.Remove(req.Path) return EncodeResponse{}, fmt.Errorf("mkdir %s: %w", symbolsDir, err) } - logtrace.Info(ctx, "RaptorQ processor encoding", fields) + logtrace.Debug(ctx, "RaptorQ processor encoding", fields) resp, err := processor.EncodeFile(req.Path, symbolsDir, blockSize) if err != nil { @@ -74,7 +74,7 @@ func (rq *raptorQ) Encode(ctx context.Context, req EncodeRequest) (EncodeRespons /* ---------- 2. read the layout JSON ---------- */ layoutData, err := os.ReadFile(resp.LayoutFilePath) - logtrace.Info(ctx, "RaptorQ processor layout file", logtrace.Fields{ + logtrace.Debug(ctx, "RaptorQ processor layout file", logtrace.Fields{ "layout-file": resp.LayoutFilePath}) if err != nil { fields[logtrace.FieldError] = err.Error() diff --git a/pkg/common/task/task.go b/pkg/common/task/task.go index e4bb062a..adf173e4 100644 --- a/pkg/common/task/task.go +++ b/pkg/common/task/task.go @@ -92,13 +92,13 @@ func (task *task) RunAction(ctx context.Context) error { for { select { case <-ctx.Done(): - logtrace.Info(ctx, "context done", logtrace.Fields{"task_id": task.ID()}) + logtrace.Debug(ctx, "context done", logtrace.Fields{"task_id": task.ID()}) case <-task.Done(): - logtrace.Info(ctx, "task done", logtrace.Fields{"task_id": task.ID()}) + logtrace.Debug(ctx, "task done", logtrace.Fields{"task_id": task.ID()}) cancel() case action, ok := <-task.actionCh: if !ok { - logtrace.Info(ctx, "action channel closed", logtrace.Fields{"task_id": task.ID()}) + logtrace.Debug(ctx, "action channel closed", logtrace.Fields{"task_id": task.ID()}) return group.Wait() } diff --git a/pkg/common/task/worker.go b/pkg/common/task/worker.go index 280b5fb8..14043079 100644 --- a/pkg/common/task/worker.go +++ b/pkg/common/task/worker.go @@ -91,7 +91,7 @@ func (worker *Worker) Run(ctx context.Context) error { logtrace.Error(ctx, "Recovered from panic in common task's worker run", logtrace.Fields{"task": currentTask.ID(), "error": r}) } - logtrace.Info(ctx, "Task Removed", logtrace.Fields{"task": currentTask.ID()}) + logtrace.Debug(ctx, "Task Removed", logtrace.Fields{"task": currentTask.ID()}) // Remove the task from the worker's task list worker.RemoveTask(currentTask) }() diff --git a/pkg/dd/image_rareness.go b/pkg/dd/image_rareness.go index d021da1b..74fec800 100644 --- a/pkg/dd/image_rareness.go +++ b/pkg/dd/image_rareness.go @@ -56,7 +56,7 @@ func (c *ddServerClientImpl) ImageRarenessScore(ctx context.Context, req Rarenes logtrace.FieldMethod: "ImageRarenessScore", logtrace.FieldRequest: req, } - logtrace.Info(ctx, "getting image rareness score", fields) + logtrace.Debug(ctx, "getting image rareness score", fields) res, err := c.ddService.ImageRarenessScore(ctx, &ddService.RarenessScoreRequest{ImageFilepath: req.Filepath}) if err != nil { @@ -65,7 +65,7 @@ func (c *ddServerClientImpl) ImageRarenessScore(ctx context.Context, req Rarenes return ImageRarenessScoreResponse{}, fmt.Errorf("dd image rareness score error: %w", err) } - logtrace.Info(ctx, "successfully got image rareness score", fields) + logtrace.Debug(ctx, "successfully got image rareness score", fields) return toImageRarenessScoreResponse(res), nil } diff --git a/pkg/dd/status.go b/pkg/dd/status.go index fc7f4d30..812b62d6 100644 --- a/pkg/dd/status.go +++ b/pkg/dd/status.go @@ -26,7 +26,7 @@ func (c *ddServerClientImpl) GetStatus(ctx context.Context, req GetStatusRequest logtrace.FieldMethod: "GetStatus", logtrace.FieldRequest: req, } - logtrace.Info(ctx, "getting status", fields) + logtrace.Debug(ctx, "getting status", fields) res, err := c.ddService.GetStatus(ctx, &ddService.GetStatusRequest{}) if err != nil { @@ -35,7 +35,7 @@ func (c *ddServerClientImpl) GetStatus(ctx context.Context, req GetStatusRequest return GetStatusResponse{}, fmt.Errorf("dd get status error: %w", err) } - logtrace.Info(ctx, "successfully got status", fields) + logtrace.Debug(ctx, "successfully got status", fields) return GetStatusResponse{ Version: res.GetVersion(), TaskCount: res.GetTaskCount(), diff --git a/pkg/lumera/connection.go b/pkg/lumera/connection.go index ab28702c..06c39748 100644 --- a/pkg/lumera/connection.go +++ b/pkg/lumera/connection.go @@ -127,7 +127,7 @@ func newGRPCConnection(ctx context.Context, rawAddr string) (Connection, error) if firstCand.useTLS { scheme = "tls" } - logtrace.Info(ctx, "gRPC connection established", logtrace.Fields{ + logtrace.Debug(ctx, "gRPC connection established", logtrace.Fields{ "target": firstCand.target, "scheme": scheme, }) diff --git a/pkg/lumera/modules/auth/impl.go b/pkg/lumera/modules/auth/impl.go index 3597d7a9..a3ad3bca 100644 --- a/pkg/lumera/modules/auth/impl.go +++ b/pkg/lumera/modules/auth/impl.go @@ -46,7 +46,7 @@ func (m *module) Verify(ctx context.Context, accAddress string, data, signature return fmt.Errorf("invalid address: %w", err) } - logtrace.Info(ctx, "Verifying signature", logtrace.Fields{"address": addr.String()}) + logtrace.Debug(ctx, "Verifying signature", logtrace.Fields{"address": addr.String()}) // Use Account RPC instead of AccountInfo to get the full account with public key accResp, err := m.client.Account(ctx, &authtypes.QueryAccountRequest{ @@ -66,7 +66,7 @@ func (m *module) Verify(ctx context.Context, accAddress string, data, signature if pubKey == nil { return fmt.Errorf("public key is nil") } - logtrace.Info(ctx, "Public key retrieved", logtrace.Fields{"pubKey": pubKey.String()}) + logtrace.Debug(ctx, "Public key retrieved", logtrace.Fields{"pubKey": pubKey.String()}) if !pubKey.VerifySignature(data, signature) { return fmt.Errorf("invalid signature") } diff --git a/pkg/lumera/modules/tx/impl.go b/pkg/lumera/modules/tx/impl.go index d342601b..bcdb694d 100644 --- a/pkg/lumera/modules/tx/impl.go +++ b/pkg/lumera/modules/tx/impl.go @@ -103,7 +103,7 @@ func (m *module) SimulateTransaction(ctx context.Context, msgs []types.Msg, acco return nil, fmt.Errorf("simulation error: %w", err) } - logtrace.Info(ctx, fmt.Sprintf("simulation complete | gasUsed=%d", simRes.GasInfo.GasUsed), nil) + logtrace.Debug(ctx, fmt.Sprintf("simulation complete | gasUsed=%d", simRes.GasInfo.GasUsed), nil) return simRes, nil } @@ -143,7 +143,7 @@ func (m *module) BuildAndSignTransaction(ctx context.Context, msgs []types.Msg, return nil, fmt.Errorf("failed to sign transaction: %w", err) } - logtrace.Info(ctx, "transaction signed successfully", nil) + logtrace.Debug(ctx, "transaction signed successfully", nil) // Encode signed transaction txBytes, err := clientCtx.TxConfig.TxEncoder()(txBuilder.GetTx()) @@ -273,7 +273,7 @@ func (m *module) ProcessTransaction(ctx context.Context, msgs []types.Msg, accou // Step 3: Calculate fee based on adjusted gas fee := m.CalculateFee(gasToUse, config) - logtrace.Info(ctx, fmt.Sprintf("using simulated gas and calculated fee | simulatedGas=%d adjustedGas=%d fee=%s", simulatedGasUsed, gasToUse, fee), nil) + logtrace.Debug(ctx, fmt.Sprintf("using simulated gas and calculated fee | simulatedGas=%d adjustedGas=%d fee=%s", simulatedGasUsed, gasToUse, fee), nil) // Step 4: Build and sign transaction txBytes, err := m.BuildAndSignTransaction(ctx, msgs, accountInfo, gasToUse, fee, config) @@ -288,7 +288,7 @@ func (m *module) ProcessTransaction(ctx context.Context, msgs []types.Msg, accou } if result != nil && result.TxResponse != nil && result.TxResponse.Code == 0 && len(result.TxResponse.Events) == 0 { - logtrace.Info(ctx, "Transaction broadcast successful, waiting for inclusion to get events...", nil) + logtrace.Debug(ctx, "Transaction broadcast successful, waiting for inclusion to get events...", nil) // Retry 5 times with 1 second intervals var txResp *sdktx.GetTxResponse @@ -298,7 +298,7 @@ func (m *module) ProcessTransaction(ctx context.Context, msgs []types.Msg, accou txResp, err = m.GetTransaction(ctx, result.TxResponse.TxHash) if err == nil && txResp != nil && txResp.TxResponse != nil { // Successfully got the transaction with events - logtrace.Info(ctx, fmt.Sprintf("Retrieved transaction with %d events", len(txResp.TxResponse.Events)), nil) + logtrace.Debug(ctx, fmt.Sprintf("Retrieved transaction with %d events", len(txResp.TxResponse.Events)), nil) result.TxResponse = txResp.TxResponse break } diff --git a/pkg/net/grpc/server/server.go b/pkg/net/grpc/server/server.go index 64dfe0f2..ae1a3524 100644 --- a/pkg/net/grpc/server/server.go +++ b/pkg/net/grpc/server/server.go @@ -203,7 +203,7 @@ func (s *Server) createListener(ctx context.Context, address string) (net.Listen if err != nil { return nil, errors.Errorf("failed to create listener: %w", err).WithField("address", address) } - logtrace.Info(ctx, "gRPC server listening", logtrace.Fields{"address": address}) + logtrace.Debug(ctx, "gRPC server listening", logtrace.Fields{"address": address}) return lis, nil } @@ -256,7 +256,7 @@ func (s *Server) Serve(ctx context.Context, address string, opts *ServerOptions) // Wait for context cancellation or error select { case <-ctx.Done(): - logtrace.Info(ctx, "Shutting down gRPC server", logtrace.Fields{"address": address}) + logtrace.Debug(ctx, "Shutting down gRPC server", logtrace.Fields{"address": address}) return s.Stop(opts.GracefulShutdownTime) case err := <-serveErr: return err diff --git a/pkg/net/interceptor.go b/pkg/net/interceptor.go index f29d88a1..b33aadcf 100644 --- a/pkg/net/interceptor.go +++ b/pkg/net/interceptor.go @@ -34,7 +34,7 @@ func UnaryServerInterceptor() grpc.UnaryServerInterceptor { logtrace.FieldMethod: info.FullMethod, logtrace.FieldCorrelationID: correlationID, } - logtrace.Info(ctx, "received gRPC request", fields) + logtrace.Debug(ctx, "received gRPC request", fields) resp, err := handler(ctx, req) @@ -42,7 +42,7 @@ func UnaryServerInterceptor() grpc.UnaryServerInterceptor { fields[logtrace.FieldError] = err.Error() logtrace.Error(ctx, "gRPC request failed", fields) } else { - logtrace.Info(ctx, "gRPC request processed successfully", fields) + logtrace.Debug(ctx, "gRPC request processed successfully", fields) } return resp, err diff --git a/pkg/storage/queries/health_check.go b/pkg/storage/queries/health_check.go index e76799da..96802dd8 100644 --- a/pkg/storage/queries/health_check.go +++ b/pkg/storage/queries/health_check.go @@ -98,10 +98,10 @@ func (s *SQLiteStore) GetHCSummaryStats(from time.Time) (hcMetrics metrics.HCMet if err != nil { return hcMetrics, err } - logtrace.Info(context.Background(), "observer evaluations retrieved", logtrace.Fields{"observer_evaluations": len(hcObserversEvaluations), "from": from}) + logtrace.Debug(context.Background(), "observer evaluations retrieved", logtrace.Fields{"observer_evaluations": len(hcObserversEvaluations), "from": from}) observerEvaluationMetrics := processHCObserverEvaluations(hcObserversEvaluations) - logtrace.Info(context.Background(), "observer evaluation metrics retrieved", logtrace.Fields{"observer_evaluation_metrics": len(observerEvaluationMetrics), "from": from}) + logtrace.Debug(context.Background(), "observer evaluation metrics retrieved", logtrace.Fields{"observer_evaluation_metrics": len(observerEvaluationMetrics), "from": from}) for _, obMetrics := range observerEvaluationMetrics { if obMetrics.ChallengesVerified >= 3 { @@ -154,7 +154,7 @@ func (s *SQLiteStore) GetMetricsDataByHealthCheckChallengeID(ctx context.Context if err != nil { return healthCheckChallengeMessages, err } - logtrace.Info(ctx, "health-check-challenge metrics row count", logtrace.Fields{"rows": len(hcMetrics), "challenge_id": challengeID}) + logtrace.Debug(ctx, "health-check-challenge metrics row count", logtrace.Fields{"rows": len(hcMetrics), "challenge_id": challengeID}) for _, hcMetric := range hcMetrics { msg := types.HealthCheckMessageData{} diff --git a/pkg/storage/queries/self_healing.go b/pkg/storage/queries/self_healing.go index 47145a0b..61e7c63c 100644 --- a/pkg/storage/queries/self_healing.go +++ b/pkg/storage/queries/self_healing.go @@ -257,7 +257,7 @@ func (s *SQLiteStore) GetSHExecutionMetrics(ctx context.Context, from time.Time) if err != nil { return m, err } - logtrace.Info(ctx, "self-healing execution metrics row count", logtrace.Fields{"rows": len(rows)}) + logtrace.Debug(ctx, "self-healing execution metrics row count", logtrace.Fields{"rows": len(rows)}) challenges := make(map[string]SHChallengeMetric) for _, row := range rows { @@ -361,11 +361,11 @@ func (s *SQLiteStore) GetSHExecutionMetrics(ctx context.Context, from time.Time) } } - logtrace.Info(ctx, "self-healing execution metrics challenges count", logtrace.Fields{"challenges": len(challenges)}) + logtrace.Debug(ctx, "self-healing execution metrics challenges count", logtrace.Fields{"challenges": len(challenges)}) for _, challenge := range challenges { - logtrace.Info(ctx, "self-healing challenge metric", logtrace.Fields{ + logtrace.Debug(ctx, "self-healing challenge metric", logtrace.Fields{ "challenge-id": challenge.ChallengeID, "is-accepted": challenge.IsAccepted, "is-verified": challenge.IsVerified, @@ -475,7 +475,7 @@ func (s *SQLiteStore) GetLastNSHChallenges(ctx context.Context, n int) (types.Se if err != nil { return challenges, err } - logtrace.Info(ctx, "self-healing execution metrics row count", logtrace.Fields{"rows": len(rows)}) + logtrace.Debug(ctx, "self-healing execution metrics row count", logtrace.Fields{"rows": len(rows)}) challengesInserted := 0 for _, row := range rows { @@ -507,7 +507,7 @@ func (s *SQLiteStore) GetSHChallengeReport(ctx context.Context, challengeID stri if err != nil { return challenges, err } - logtrace.Info(ctx, "self-healing execution metrics row count", logtrace.Fields{"rows": len(rows)}) + logtrace.Debug(ctx, "self-healing execution metrics row count", logtrace.Fields{"rows": len(rows)}) for _, row := range rows { if row.ChallengeID == challengeID { diff --git a/pkg/storage/queries/storage_challenge.go b/pkg/storage/queries/storage_challenge.go index 574e7f4f..164ed2be 100644 --- a/pkg/storage/queries/storage_challenge.go +++ b/pkg/storage/queries/storage_challenge.go @@ -97,7 +97,7 @@ func (s *SQLiteStore) GetMetricsDataByStorageChallengeID(ctx context.Context, ch return storageChallengeMessages, err } // log.WithContext(ctx).WithField("rows", len(scMetrics)).Info("storage-challenge metrics row count") - logtrace.Info(ctx, "storage-challenge metrics row count", logtrace.Fields{ + logtrace.Debug(ctx, "storage-challenge metrics row count", logtrace.Fields{ "rows": len(scMetrics), }) @@ -210,13 +210,13 @@ func (s *SQLiteStore) GetSCSummaryStats(from time.Time) (scMetrics metrics.SCMet return scMetrics, err } // log.WithField("observer_evaluations", len(observersEvaluations)).Info("observer evaluations retrieved") - logtrace.Info(context.Background(), "observer evaluations retrieved", logtrace.Fields{ + logtrace.Debug(context.Background(), "observer evaluations retrieved", logtrace.Fields{ "observer_evaluations": len(observersEvaluations), }) observerEvaluationMetrics := processObserverEvaluations(observersEvaluations) // log.WithField("observer_evaluation_metrics", len(observerEvaluationMetrics)).Info("observer evaluation metrics retrieved") - logtrace.Info(context.Background(), "observer evaluation metrics retrieved", logtrace.Fields{ + logtrace.Debug(context.Background(), "observer evaluation metrics retrieved", logtrace.Fields{ "observer_evaluation_metrics": len(observerEvaluationMetrics), }) diff --git a/pkg/storage/queries/task_history.go b/pkg/storage/queries/task_history.go index 73a55ef8..29539a49 100644 --- a/pkg/storage/queries/task_history.go +++ b/pkg/storage/queries/task_history.go @@ -59,7 +59,7 @@ func (s *SQLiteStore) QueryTaskHistory(taskID string) (history []types.TaskHisto err = json.Unmarshal([]byte(details), &i.Details) if err != nil { - logtrace.Info(context.Background(), "Detals", logtrace.Fields{"details": details}) + logtrace.Debug(context.Background(), "Detals", logtrace.Fields{"details": details}) logtrace.Error(context.Background(), fmt.Sprintf("cannot unmarshal task history details: %s", details), logtrace.Fields{"error": err}) i.Details = nil } diff --git a/sdk/task/task.go b/sdk/task/task.go index e359c907..976725a0 100644 --- a/sdk/task/task.go +++ b/sdk/task/task.go @@ -126,7 +126,7 @@ func (t *BaseTask) isServing(parent context.Context, sn lumera.Supernode) bool { PeerType: t.config.Account.PeerType, }).CreateClient(ctx, sn) if err != nil { - logtrace.Info(ctx, "Failed to create client for supernode", logtrace.Fields{logtrace.FieldMethod: "isServing"}) + logtrace.Debug(ctx, "Failed to create client for supernode", logtrace.Fields{logtrace.FieldMethod: "isServing"}) return false } defer client.Close(ctx) diff --git a/supernode/cmd/service.go b/supernode/cmd/service.go index d4af1269..8cd8708f 100644 --- a/supernode/cmd/service.go +++ b/supernode/cmd/service.go @@ -23,7 +23,7 @@ func RunServices(ctx context.Context, services ...service) error { if err != nil { logtrace.Error(ctx, "service stopped with an error", logtrace.Fields{"service": reflect.TypeOf(service).String(), "error": err}) } else { - logtrace.Info(ctx, "service stopped", logtrace.Fields{"service": reflect.TypeOf(service).String()}) + logtrace.Debug(ctx, "service stopped", logtrace.Fields{"service": reflect.TypeOf(service).String()}) } return err }) diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index eaf1339e..952113d3 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -46,7 +46,7 @@ The supernode will connect to the Lumera network and begin participating in the // Log configuration info cfgFile := filepath.Join(baseDir, DefaultConfigFile) - logtrace.Info(ctx, "Starting supernode with configuration", logtrace.Fields{"config_file": cfgFile, "keyring_dir": appConfig.GetKeyringDir(), "key_name": appConfig.SupernodeConfig.KeyName}) + logtrace.Debug(ctx, "Starting supernode with configuration", logtrace.Fields{"config_file": cfgFile, "keyring_dir": appConfig.GetKeyringDir(), "key_name": appConfig.SupernodeConfig.KeyName}) // Initialize keyring kr, err := initKeyringFromConfig(appConfig) @@ -61,7 +61,7 @@ The supernode will connect to the Lumera network and begin participating in the } // Verify config matches chain registration before starting services - logtrace.Info(ctx, "Verifying configuration against chain registration", logtrace.Fields{}) + logtrace.Debug(ctx, "Verifying configuration against chain registration", logtrace.Fields{}) configVerifier := verifier.NewConfigVerifier(appConfig, lumeraClient, kr) verificationResult, err := configVerifier.VerifyConfig(ctx) if err != nil { @@ -76,7 +76,7 @@ The supernode will connect to the Lumera network and begin participating in the logtrace.Warn(ctx, "Config verification warnings", logtrace.Fields{"summary": verificationResult.Summary()}) } - logtrace.Info(ctx, "Configuration verification successful", logtrace.Fields{}) + logtrace.Debug(ctx, "Configuration verification successful", logtrace.Fields{}) // Initialize RaptorQ store for Cascade processing rqStore, err := initRQStore(ctx, appConfig) @@ -84,14 +84,14 @@ The supernode will connect to the Lumera network and begin participating in the logtrace.Fatal(ctx, "Failed to initialize RaptorQ store", logtrace.Fields{"error": err.Error()}) } - // Manually set the disable flag at the highest level - disableMetrics := true + // Manually set the disable flag at the highest level + disableMetrics := true - // Initialize P2P service with explicit disable flag - p2pService, err := initP2PService(ctx, appConfig, lumeraClient, kr, rqStore, nil, nil, disableMetrics) - if err != nil { - logtrace.Fatal(ctx, "Failed to initialize P2P service", logtrace.Fields{"error": err.Error()}) - } + // Initialize P2P service with explicit disable flag + p2pService, err := initP2PService(ctx, appConfig, lumeraClient, kr, rqStore, nil, nil, disableMetrics) + if err != nil { + logtrace.Fatal(ctx, "Failed to initialize P2P service", logtrace.Fields{"error": err.Error()}) + } // Initialize the supernode supernodeInstance, err := NewSupernode(ctx, appConfig, kr, p2pService, rqStore, lumeraClient) @@ -100,19 +100,19 @@ The supernode will connect to the Lumera network and begin participating in the } // Configure cascade service - cService := cascadeService.NewCascadeService( - &cascadeService.Config{ - Config: common.Config{ - SupernodeAccountAddress: appConfig.SupernodeConfig.Identity, - }, - RqFilesDir: appConfig.GetRaptorQFilesDir(), - MetricsDisabled: disableMetrics, - }, - lumeraClient, - *p2pService, - codec.NewRaptorQCodec(appConfig.GetRaptorQFilesDir()), - rqStore, - ) + cService := cascadeService.NewCascadeService( + &cascadeService.Config{ + Config: common.Config{ + SupernodeAccountAddress: appConfig.SupernodeConfig.Identity, + }, + RqFilesDir: appConfig.GetRaptorQFilesDir(), + MetricsDisabled: disableMetrics, + }, + lumeraClient, + *p2pService, + codec.NewRaptorQCodec(appConfig.GetRaptorQFilesDir()), + rqStore, + ) // Create cascade action server cascadeActionServer := cascade.NewCascadeActionServer(cService) @@ -152,7 +152,7 @@ The supernode will connect to the Lumera network and begin participating in the if isTestnet { profilingAddr := "0.0.0.0:8082" - logtrace.Info(ctx, "Starting profiling server", logtrace.Fields{ + logtrace.Debug(ctx, "Starting profiling server", logtrace.Fields{ "address": profilingAddr, "chain_id": appConfig.LumeraClientConfig.ChainID, "is_testnet": isTestnet, @@ -178,7 +178,7 @@ The supernode will connect to the Lumera network and begin participating in the // Wait for termination signal sig := <-sigCh - logtrace.Info(ctx, "Received signal, shutting down", logtrace.Fields{"signal": sig.String()}) + logtrace.Debug(ctx, "Received signal, shutting down", logtrace.Fields{"signal": sig.String()}) // Graceful shutdown if err := supernodeInstance.Stop(ctx); err != nil { @@ -208,9 +208,9 @@ func initP2PService(ctx context.Context, config *config.Config, lumeraClient lum // Create P2P config using helper function p2pConfig := createP2PConfig(config, address.String()) - logtrace.Info(ctx, "Initializing P2P service", logtrace.Fields{"address": p2pConfig.ListenAddress, "port": p2pConfig.Port, "data_dir": p2pConfig.DataDir, "supernode_id": address.String()}) + logtrace.Debug(ctx, "Initializing P2P service", logtrace.Fields{"address": p2pConfig.ListenAddress, "port": p2pConfig.Port, "data_dir": p2pConfig.DataDir, "supernode_id": address.String()}) - p2pService, err := p2p.New(ctx, p2pConfig, lumeraClient, kr, rqStore, cloud, mst, metricsDisabled) + p2pService, err := p2p.New(ctx, p2pConfig, lumeraClient, kr, rqStore, cloud, mst, metricsDisabled) if err != nil { return nil, fmt.Errorf("failed to initialize p2p service: %w", err) } diff --git a/supernode/cmd/supernode.go b/supernode/cmd/supernode.go index 19a65718..c0740fd0 100644 --- a/supernode/cmd/supernode.go +++ b/supernode/cmd/supernode.go @@ -71,13 +71,13 @@ func (s *Supernode) Start(ctx context.Context) error { return err } - logtrace.Info(ctx, "Found valid key in keyring", logtrace.Fields{ + logtrace.Debug(ctx, "Found valid key in keyring", logtrace.Fields{ "key_name": s.config.SupernodeConfig.KeyName, "address": address.String(), }) // Use the P2P service that was passed in via constructor - logtrace.Info(ctx, "Starting P2P service", logtrace.Fields{}) + logtrace.Debug(ctx, "Starting P2P service", logtrace.Fields{}) if err := s.p2pService.Run(ctx); err != nil { return fmt.Errorf("p2p service error: %w", err) } @@ -89,7 +89,7 @@ func (s *Supernode) Start(ctx context.Context) error { func (s *Supernode) Stop(ctx context.Context) error { // Close the Lumera client connection if s.lumeraClient != nil { - logtrace.Info(ctx, "Closing Lumera client", logtrace.Fields{}) + logtrace.Debug(ctx, "Closing Lumera client", logtrace.Fields{}) if err := s.lumeraClient.Close(); err != nil { logtrace.Error(ctx, "Error closing Lumera client", logtrace.Fields{ "error": err.Error(), @@ -131,7 +131,7 @@ func initRQStore(ctx context.Context, config *config.Config) (rqstore.Store, err // Create the SQLite file path rqStoreFile := rqDir + "/rqstore.db" - logtrace.Info(ctx, "Initializing RaptorQ store", logtrace.Fields{ + logtrace.Debug(ctx, "Initializing RaptorQ store", logtrace.Fields{ "file_path": rqStoreFile, }) diff --git a/supernode/node/action/server/cascade/cascade_action_server.go b/supernode/node/action/server/cascade/cascade_action_server.go index 449f4c42..180ebd3d 100644 --- a/supernode/node/action/server/cascade/cascade_action_server.go +++ b/supernode/node/action/server/cascade/cascade_action_server.go @@ -74,7 +74,7 @@ func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) er } ctx := stream.Context() - logtrace.Info(ctx, "client streaming request to upload cascade input data received", fields) + logtrace.Debug(ctx, "client streaming request to upload cascade input data received", fields) const maxFileSize = 1 * 1024 * 1024 * 1024 // 1GB limit @@ -140,7 +140,7 @@ func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) er return fmt.Errorf("file size %d exceeds maximum allowed size of 1GB", totalSize) } - logtrace.Info(ctx, "received data chunk", logtrace.Fields{ + logtrace.Debug(ctx, "received data chunk", logtrace.Fields{ "chunk_size": len(x.Chunk.Data), "total_size_so_far": totalSize, }) @@ -148,7 +148,7 @@ func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) er case *pb.RegisterRequest_Metadata: // Store metadata - this should be the final message metadata = x.Metadata - logtrace.Info(ctx, "received metadata", logtrace.Fields{ + logtrace.Debug(ctx, "received metadata", logtrace.Fields{ "task_id": metadata.TaskId, "action_id": metadata.ActionId, }) @@ -162,7 +162,7 @@ func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) er } fields[logtrace.FieldTaskID] = metadata.GetTaskId() fields[logtrace.FieldActionID] = metadata.GetActionId() - logtrace.Info(ctx, "metadata received from action-sdk", fields) + logtrace.Debug(ctx, "metadata received from action-sdk", fields) // Ensure all data is written to disk before calculating hash if err := tempFile.Sync(); err != nil { @@ -174,7 +174,7 @@ func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) er hash := hasher.Sum(nil) hashHex := hex.EncodeToString(hash) fields[logtrace.FieldHashHex] = hashHex - logtrace.Info(ctx, "final BLAKE3 hash generated", fields) + logtrace.Debug(ctx, "final BLAKE3 hash generated", fields) targetPath, err := replaceTempDirWithTaskDir(metadata.GetTaskId(), tempFilePath, tempFile) if err != nil { @@ -213,7 +213,7 @@ func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) er return fmt.Errorf("registration failed: %w", err) } - logtrace.Info(ctx, "cascade registration completed successfully", fields) + logtrace.Debug(ctx, "cascade registration completed successfully", fields) return nil } @@ -225,7 +225,7 @@ func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeS } ctx := stream.Context() - logtrace.Info(ctx, "download request received from client", fields) + logtrace.Debug(ctx, "download request received from client", fields) task := server.factory.NewCascadeRegistrationTask() @@ -254,7 +254,7 @@ func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeS if err := task.CleanupDownload(ctx, tmpDir); err != nil { logtrace.Error(ctx, "error cleaning up the tmp dir", logtrace.Fields{logtrace.FieldError: err.Error()}) } else { - logtrace.Info(ctx, "tmp dir has been cleaned up", logtrace.Fields{"tmp_dir": tmpDir}) + logtrace.Debug(ctx, "tmp dir has been cleaned up", logtrace.Fields{"tmp_dir": tmpDir}) } } }() @@ -290,7 +290,7 @@ func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeS logtrace.Error(ctx, "no artefact file retrieved", fields) return fmt.Errorf("no artefact to stream") } - logtrace.Info(ctx, "streaming artefact file in chunks", fields) + logtrace.Debug(ctx, "streaming artefact file in chunks", fields) // Open the restored file and stream directly from disk to avoid buffering entire file in memory f, err := os.Open(restoredFilePath) @@ -308,7 +308,7 @@ func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeS // Calculate optimal chunk size based on file size chunkSize := calculateOptimalChunkSize(fi.Size()) - logtrace.Info(ctx, "calculated optimal chunk size for download", logtrace.Fields{ + logtrace.Debug(ctx, "calculated optimal chunk size for download", logtrace.Fields{ "file_size": fi.Size(), "chunk_size": chunkSize, }) @@ -347,7 +347,7 @@ func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeS // If EOF after first read, we're done if readErr == io.EOF { - logtrace.Info(ctx, "completed streaming all chunks", fields) + logtrace.Debug(ctx, "completed streaming all chunks", fields) return nil } @@ -374,6 +374,6 @@ func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeS // Cleanup is handled in deferred block above - logtrace.Info(ctx, "completed streaming all chunks", fields) + logtrace.Debug(ctx, "completed streaming all chunks", fields) return nil } diff --git a/supernode/node/supernode/gateway/server.go b/supernode/node/supernode/gateway/server.go index 5440a7f4..7e17e238 100644 --- a/supernode/node/supernode/gateway/server.go +++ b/supernode/node/supernode/gateway/server.go @@ -86,7 +86,7 @@ func (s *Server) Run(ctx context.Context) error { IdleTimeout: 60 * time.Second, } - logtrace.Info(ctx, "Starting HTTP gateway server", logtrace.Fields{ + logtrace.Debug(ctx, "Starting HTTP gateway server", logtrace.Fields{ "address": s.ipAddress, "port": s.port, }) @@ -105,7 +105,7 @@ func (s *Server) Stop(ctx context.Context) error { return nil } - logtrace.Info(ctx, "Shutting down HTTP gateway server", nil) + logtrace.Debug(ctx, "Shutting down HTTP gateway server", nil) return s.server.Shutdown(ctx) } diff --git a/supernode/node/supernode/server/server.go b/supernode/node/supernode/server/server.go index 37e8f4dd..774be094 100644 --- a/supernode/node/supernode/server/server.go +++ b/supernode/node/supernode/server/server.go @@ -48,8 +48,8 @@ func (server *Server) Run(ctx context.Context) error { // Set up gRPC logging logtrace.SetGRPCLogger() - logtrace.Info(ctx, "Server identity configured", logtrace.Fields{logtrace.FieldModule: "server", "identity": server.config.Identity}) - logtrace.Info(ctx, "Server listening", logtrace.Fields{logtrace.FieldModule: "server", "addresses": server.config.ListenAddresses}) + logtrace.Debug(ctx, "Server identity configured", logtrace.Fields{logtrace.FieldModule: "server", "identity": server.config.Identity}) + logtrace.Debug(ctx, "Server listening", logtrace.Fields{logtrace.FieldModule: "server", "addresses": server.config.ListenAddresses}) group, ctx := errgroup.WithContext(ctx) @@ -74,7 +74,7 @@ func (server *Server) Run(ctx context.Context) error { address := addr // Create a new variable to avoid closure issues group.Go(func() error { - logtrace.Info(ctx, "Starting gRPC server", logtrace.Fields{logtrace.FieldModule: "server", "address": address}) + logtrace.Debug(ctx, "Starting gRPC server", logtrace.Fields{logtrace.FieldModule: "server", "address": address}) return server.grpcServer.Serve(ctx, address, opts) }) } diff --git a/supernode/services/cascade/adaptors/p2p.go b/supernode/services/cascade/adaptors/p2p.go index a29e2b99..3621167e 100644 --- a/supernode/services/cascade/adaptors/p2p.go +++ b/supernode/services/cascade/adaptors/p2p.go @@ -39,14 +39,14 @@ type P2PService interface { // p2pImpl is the default implementation of the P2PService interface. type p2pImpl struct { - p2p p2p.Client - rqStore rqstore.Store - metricsDisabled bool + p2p p2p.Client + rqStore rqstore.Store + metricsDisabled bool } // NewP2PService returns a concrete implementation of P2PService. func NewP2PService(client p2p.Client, store rqstore.Store, metricsDisabled bool) P2PService { - return &p2pImpl{p2p: client, rqStore: store, metricsDisabled: metricsDisabled} + return &p2pImpl{p2p: client, rqStore: store, metricsDisabled: metricsDisabled} } type StoreArtefactsRequest struct { @@ -57,13 +57,13 @@ type StoreArtefactsRequest struct { } func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) error { - logtrace.Info(ctx, "About to store artefacts (metadata + symbols)", logtrace.Fields{"taskID": req.TaskID, "id_files": len(req.IDFiles)}) + logtrace.Debug(ctx, "About to store artefacts (metadata + symbols)", logtrace.Fields{"taskID": req.TaskID, "id_files": len(req.IDFiles)}) - // Optionally enable per-node store RPC capture for this task - if !p.metricsDisabled { - cm.StartStoreCapture(req.TaskID) - defer cm.StopStoreCapture(req.TaskID) - } + // Optionally enable per-node store RPC capture for this task + if !p.metricsDisabled { + cm.StartStoreCapture(req.TaskID) + defer cm.StopStoreCapture(req.TaskID) + } start := time.Now() firstPassSymbols, totalSymbols, err := p.storeCascadeSymbolsAndData(ctx, req.TaskID, req.ActionID, req.SymbolsDir, req.IDFiles) @@ -71,7 +71,7 @@ func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, return errors.Wrap(err, "error storing artefacts") } dur := time.Since(start).Milliseconds() - logtrace.Info(ctx, "artefacts have been stored", logtrace.Fields{"taskID": req.TaskID, "symbols_first_pass": firstPassSymbols, "symbols_total": totalSymbols, "id_files_count": len(req.IDFiles)}) + logtrace.Debug(ctx, "artefacts have been stored", logtrace.Fields{"taskID": req.TaskID, "symbols_first_pass": firstPassSymbols, "symbols_total": totalSymbols, "id_files_count": len(req.IDFiles)}) // Record store summary for later event emission cm.SetStoreSummary(req.TaskID, firstPassSymbols, totalSymbols, len(req.IDFiles), dur) return nil @@ -101,7 +101,7 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action if targetCount < 1 && totalAvailable > 0 { targetCount = 1 } - logtrace.Info(ctx, "first-pass target coverage (symbols)", logtrace.Fields{ + logtrace.Debug(ctx, "first-pass target coverage (symbols)", logtrace.Fields{ "total_symbols": totalAvailable, "target_percent": storeSymbolsPercent, "target_count": targetCount, @@ -117,7 +117,7 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action sort.Strings(keys) // deterministic order inside the sample } - logtrace.Info(ctx, "storing RaptorQ symbols", logtrace.Fields{"count": len(keys)}) + logtrace.Debug(ctx, "storing RaptorQ symbols", logtrace.Fields{"count": len(keys)}) /* stream in fixed-size batches -------------------------------------- */ @@ -188,7 +188,7 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action if totalAvailable > 0 { achievedPct = (float64(totalSymbols) / float64(totalAvailable)) * 100.0 } - logtrace.Info(ctx, "first-pass achieved coverage (symbols)", + logtrace.Debug(ctx, "first-pass achieved coverage (symbols)", logtrace.Fields{"achieved_symbols": totalSymbols, "achieved_percent": achievedPct}) if err := p.rqStore.UpdateIsFirstBatchStored(actionID); err != nil { @@ -228,7 +228,7 @@ func walkSymbolTree(root string) ([]string, error) { // storeSymbolsInP2P loads a batch of symbols and stores them via P2P. // Returns (ratePct, requests, count, error) where `count` is the number of symbols in this batch. func (c *p2pImpl) storeSymbolsInP2P(ctx context.Context, taskID, root string, fileKeys []string) (int, error) { - logtrace.Info(ctx, "loading batch symbols", logtrace.Fields{"count": len(fileKeys)}) + logtrace.Debug(ctx, "loading batch symbols", logtrace.Fields{"count": len(fileKeys)}) symbols, err := utils.LoadSymbols(root, fileKeys) if err != nil { @@ -242,12 +242,12 @@ func (c *p2pImpl) storeSymbolsInP2P(ctx context.Context, taskID, root string, fi if err := c.p2p.StoreBatch(symCtx, symbols, storage.P2PDataRaptorQSymbol, taskID); err != nil { return len(symbols), fmt.Errorf("p2p store batch: %w", err) } - logtrace.Info(ctx, "stored batch symbols", logtrace.Fields{"count": len(symbols)}) + logtrace.Debug(ctx, "stored batch symbols", logtrace.Fields{"count": len(symbols)}) if err := utils.DeleteSymbols(ctx, root, fileKeys); err != nil { return len(symbols), fmt.Errorf("delete symbols: %w", err) } - logtrace.Info(ctx, "deleted batch symbols", logtrace.Fields{"count": len(symbols)}) + logtrace.Debug(ctx, "deleted batch symbols", logtrace.Fields{"count": len(symbols)}) // No per-RPC metrics propagated from p2p return len(symbols), nil diff --git a/supernode/services/cascade/download.go b/supernode/services/cascade/download.go index 9da3dc1e..8271fc51 100644 --- a/supernode/services/cascade/download.go +++ b/supernode/services/cascade/download.go @@ -39,7 +39,7 @@ func (task *CascadeRegistrationTask) Download( send func(resp *DownloadResponse) error, ) (err error) { fields := logtrace.Fields{logtrace.FieldMethod: "Download", logtrace.FieldRequest: req} - logtrace.Info(ctx, "Cascade download request received", fields) + logtrace.Debug(ctx, "Cascade download request received", fields) // Ensure task status is finalized regardless of outcome defer func() { @@ -56,7 +56,7 @@ func (task *CascadeRegistrationTask) Download( fields[logtrace.FieldError] = err return task.wrapErr(ctx, "failed to get action", err, fields) } - logtrace.Info(ctx, "Action retrieved", fields) + logtrace.Debug(ctx, "Action retrieved", fields) task.streamDownloadEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", "", send) if actionDetails.GetAction().State != actiontypes.ActionStateDone { @@ -65,14 +65,14 @@ func (task *CascadeRegistrationTask) Download( fields[logtrace.FieldActionState] = actionDetails.GetAction().State return task.wrapErr(ctx, "action not found", err, fields) } - logtrace.Info(ctx, "Action state validated", fields) + logtrace.Debug(ctx, "Action state validated", fields) metadata, err := task.decodeCascadeMetadata(ctx, actionDetails.GetAction().Metadata, fields) if err != nil { fields[logtrace.FieldError] = err.Error() return task.wrapErr(ctx, "error decoding cascade metadata", err, fields) } - logtrace.Info(ctx, "Cascade metadata decoded", fields) + logtrace.Debug(ctx, "Cascade metadata decoded", fields) task.streamDownloadEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", "", send) // Notify: network retrieval phase begins @@ -83,7 +83,7 @@ func (task *CascadeRegistrationTask) Download( fields[logtrace.FieldError] = err.Error() return task.wrapErr(ctx, "failed to download artifacts", err, fields) } - logtrace.Info(ctx, "File reconstructed and hash verified", fields) + logtrace.Debug(ctx, "File reconstructed and hash verified", fields) // Notify: decode completed, file ready on disk task.streamDownloadEvent(SupernodeEventTypeDecodeCompleted, "Decode completed", filePath, tmpDir, send) @@ -91,7 +91,7 @@ func (task *CascadeRegistrationTask) Download( } func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, actionID string, metadata actiontypes.CascadeMetadata, fields logtrace.Fields, send func(resp *DownloadResponse) error) (string, string, error) { - logtrace.Info(ctx, "started downloading the artifacts", fields) + logtrace.Debug(ctx, "started downloading the artifacts", fields) var ( layout codec.Layout @@ -109,7 +109,7 @@ func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, acti // Parse index file to get layout IDs indexData, err := task.parseIndexFile(indexFile) if err != nil { - logtrace.Info(ctx, "failed to parse index file", fields) + logtrace.Debug(ctx, "failed to parse index file", fields) continue } @@ -117,14 +117,14 @@ func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, acti var netMS, decMS int64 layout, netMS, decMS, layoutAttempts, err = task.retrieveLayoutFromIndex(ctx, indexData, fields) if err != nil { - logtrace.Info(ctx, "failed to retrieve layout from index", fields) + logtrace.Debug(ctx, "failed to retrieve layout from index", fields) continue } layoutFetchMS = netMS layoutDecodeMS = decMS if len(layout.Blocks) > 0 { - logtrace.Info(ctx, "layout file retrieved via index", fields) + logtrace.Debug(ctx, "layout file retrieved via index", fields) break } } @@ -163,13 +163,12 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( if targetRequiredCount < 1 && totalSymbols > 0 { targetRequiredCount = 1 } - logtrace.Info(ctx, "Retrieving target-required symbols for decode", fields) + logtrace.Debug(ctx, "Retrieving target-required symbols for decode", fields) - - if !task.config.MetricsDisabled { - cm.StartRetrieveCapture(actionID) - defer cm.StopRetrieveCapture(actionID) - } + if !task.config.MetricsDisabled { + cm.StartRetrieveCapture(actionID) + defer cm.StopRetrieveCapture(actionID) + } // Measure symbols batch retrieve duration retrieveStart := time.Now() @@ -203,22 +202,22 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( } decodeMS := time.Since(decodeStart).Milliseconds() - // Set minimal retrieve summary and emit event strictly from internal collector - if !task.config.MetricsDisabled { - cm.SetRetrieveSummary(actionID, retrieveMS, decodeMS) - payload := cm.BuildDownloadEventPayloadFromCollector(actionID) - if retrieve, ok := payload["retrieve"].(map[string]any); ok { - retrieve["target_required_percent"] = targetRequiredPercent - retrieve["target_required_count"] = targetRequiredCount - retrieve["total_symbols"] = totalSymbols - } - if b, err := json.MarshalIndent(payload, "", " "); err == nil { - task.streamDownloadEvent(SupernodeEventTypeArtefactsDownloaded, string(b), "", "", send) - } - } else { - // Send minimal hardcoded event when metrics disabled - task.streamDownloadEvent(SupernodeEventTypeArtefactsDownloaded, "Download completed (metrics disabled)", "", "", send) - } + // Set minimal retrieve summary and emit event strictly from internal collector + if !task.config.MetricsDisabled { + cm.SetRetrieveSummary(actionID, retrieveMS, decodeMS) + payload := cm.BuildDownloadEventPayloadFromCollector(actionID) + if retrieve, ok := payload["retrieve"].(map[string]any); ok { + retrieve["target_required_percent"] = targetRequiredPercent + retrieve["target_required_count"] = targetRequiredCount + retrieve["total_symbols"] = totalSymbols + } + if b, err := json.MarshalIndent(payload, "", " "); err == nil { + task.streamDownloadEvent(SupernodeEventTypeArtefactsDownloaded, string(b), "", "", send) + } + } else { + // Send minimal hardcoded event when metrics disabled + task.streamDownloadEvent(SupernodeEventTypeArtefactsDownloaded, "Download completed (metrics disabled)", "", "", send) + } fileHash, err := crypto.HashFileIncrementally(decodeInfo.FilePath, 0) if err != nil { @@ -238,7 +237,7 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( fields[logtrace.FieldError] = err.Error() return "", decodeInfo.DecodeTmpDir, err } - logtrace.Info(ctx, "File successfully restored and hash verified", fields) + logtrace.Debug(ctx, "File successfully restored and hash verified", fields) return decodeInfo.FilePath, decodeInfo.DecodeTmpDir, nil } diff --git a/supernode/services/cascade/helper.go b/supernode/services/cascade/helper.go index fb8c7ef5..b22ec14a 100644 --- a/supernode/services/cascade/helper.go +++ b/supernode/services/cascade/helper.go @@ -14,9 +14,9 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/errors" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" + cm "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics" "github.com/LumeraProtocol/supernode/v2/pkg/utils" "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - cm "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/golang/protobuf/proto" @@ -36,7 +36,7 @@ func (task *CascadeRegistrationTask) fetchAction(ctx context.Context, actionID s if res.GetAction().ActionID == "" { return nil, task.wrapErr(ctx, "action not found", errors.New(""), f) } - logtrace.Info(ctx, "action has been retrieved", f) + logtrace.Debug(ctx, "action has been retrieved", f) return res.GetAction(), nil } @@ -46,7 +46,7 @@ func (task *CascadeRegistrationTask) ensureIsTopSupernode(ctx context.Context, b if err != nil { return task.wrapErr(ctx, "failed to get top SNs", err, f) } - logtrace.Info(ctx, "Fetched Top Supernodes", f) + logtrace.Debug(ctx, "Fetched Top Supernodes", f) if !supernode.Exists(top.Supernodes, task.config.SupernodeAccountAddress) { // Build information about supernodes for better error context @@ -54,7 +54,7 @@ func (task *CascadeRegistrationTask) ensureIsTopSupernode(ctx context.Context, b for i, sn := range top.Supernodes { addresses[i] = sn.SupernodeAccount } - logtrace.Info(ctx, "Supernode not in top list", logtrace.Fields{ + logtrace.Debug(ctx, "Supernode not in top list", logtrace.Fields{ "currentAddress": task.config.SupernodeAccountAddress, "topSupernodes": addresses, }) @@ -78,7 +78,7 @@ func (task *CascadeRegistrationTask) verifyDataHash(ctx context.Context, dh []by if string(b64) != expected { return task.wrapErr(ctx, "data hash doesn't match", errors.New(""), f) } - logtrace.Info(ctx, "request data-hash has been matched with the action data-hash", f) + logtrace.Debug(ctx, "request data-hash has been matched with the action data-hash", f) return nil } @@ -110,7 +110,7 @@ func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context. if err := task.LumeraClient.Verify(ctx, creator, []byte(indexFileB64), creatorSigBytes); err != nil { return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify creator signature", err, f) } - logtrace.Info(ctx, "creator signature successfully verified", f) + logtrace.Debug(ctx, "creator signature successfully verified", f) // Decode index file to get the layout signature indexFile, err := decodeIndexFile(indexFileB64) @@ -132,7 +132,7 @@ func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context. if err := task.LumeraClient.Verify(ctx, creator, layoutB64, layoutSigBytes); err != nil { return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify layout signature", err, f) } - logtrace.Info(ctx, "layout signature successfully verified", f) + logtrace.Debug(ctx, "layout signature successfully verified", f) return encodedMeta, indexFile.LayoutSignature, nil } @@ -199,22 +199,22 @@ func (task *CascadeRegistrationTask) wrapErr(ctx context.Context, msg string, er // emitArtefactsStored builds a single-line metrics summary and emits the // SupernodeEventTypeArtefactsStored event while logging the metrics line. func (task *CascadeRegistrationTask) emitArtefactsStored( - ctx context.Context, - fields logtrace.Fields, - _ codec.Layout, - send func(resp *RegisterResponse) error, + ctx context.Context, + fields logtrace.Fields, + _ codec.Layout, + send func(resp *RegisterResponse) error, ) { if fields == nil { fields = logtrace.Fields{} } - // Build payload strictly from internal collector (no P2P snapshots) - payload := cm.BuildStoreEventPayloadFromCollector(task.ID()) + // Build payload strictly from internal collector (no P2P snapshots) + payload := cm.BuildStoreEventPayloadFromCollector(task.ID()) b, _ := json.MarshalIndent(payload, "", " ") msg := string(b) fields["metrics_json"] = msg - logtrace.Info(ctx, "artefacts have been stored", fields) + logtrace.Debug(ctx, "artefacts have been stored", fields) task.streamEvent(SupernodeEventTypeArtefactsStored, msg, "", send) // No central state to clear; adaptor returns calls inline } @@ -279,7 +279,7 @@ func (task *CascadeRegistrationTask) verifyActionFee(ctx context.Context, action requiredFee := sdk.NewCoin("ulume", math.NewInt(amount)) // Log the calculated fee - logtrace.Info(ctx, "calculated required fee", logtrace.Fields{ + logtrace.Debug(ctx, "calculated required fee", logtrace.Fields{ "fee": requiredFee.String(), "dataBytes": dataSize, }) @@ -377,6 +377,6 @@ func (task *CascadeRegistrationTask) VerifyDownloadSignature(ctx context.Context return task.wrapErr(ctx, "failed to verify download signature", err, fields) } - logtrace.Info(ctx, "download signature successfully verified", fields) + logtrace.Debug(ctx, "download signature successfully verified", fields) return nil } diff --git a/supernode/services/cascade/register.go b/supernode/services/cascade/register.go index 4739e0d9..4b456734 100644 --- a/supernode/services/cascade/register.go +++ b/supernode/services/cascade/register.go @@ -1,11 +1,11 @@ package cascade import ( - "context" - "os" + "context" + "os" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/supernode/services/common" ) // RegisterRequest contains parameters for upload request @@ -46,7 +46,7 @@ func (task *CascadeRegistrationTask) Register( ) (err error) { fields := logtrace.Fields{logtrace.FieldMethod: "Register", logtrace.FieldRequest: req} - logtrace.Info(ctx, "Cascade registration request received", fields) + logtrace.Debug(ctx, "Cascade registration request received", fields) // Ensure task status and resources are finalized regardless of outcome defer func() { @@ -64,7 +64,7 @@ func (task *CascadeRegistrationTask) Register( if remErr := os.RemoveAll(req.FilePath); remErr != nil { logtrace.Warn(ctx, "Failed to remove uploaded file", fields) } else { - logtrace.Info(ctx, "Uploaded file cleaned up", fields) + logtrace.Debug(ctx, "Uploaded file cleaned up", fields) } } }() @@ -78,14 +78,14 @@ func (task *CascadeRegistrationTask) Register( fields[logtrace.FieldCreator] = action.Creator fields[logtrace.FieldStatus] = action.State fields[logtrace.FieldPrice] = action.Price - logtrace.Info(ctx, "Action retrieved", fields) + logtrace.Debug(ctx, "Action retrieved", fields) task.streamEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", send) /* 2. Verify action fee -------------------------------------------------------- */ if err := task.verifyActionFee(ctx, action, req.DataSize, fields); err != nil { return err } - logtrace.Info(ctx, "Action fee verified", fields) + logtrace.Debug(ctx, "Action fee verified", fields) task.streamEvent(SupernodeEventTypeActionFeeVerified, "Action fee verified", "", send) /* 3. Ensure this super-node is eligible -------------------------------------- */ @@ -93,7 +93,7 @@ func (task *CascadeRegistrationTask) Register( if err := task.ensureIsTopSupernode(ctx, uint64(action.BlockHeight), fields); err != nil { return err } - logtrace.Info(ctx, "Top supernode eligibility confirmed", fields) + logtrace.Debug(ctx, "Top supernode eligibility confirmed", fields) task.streamEvent(SupernodeEventTypeTopSupernodeCheckPassed, "Top supernode eligibility confirmed", "", send) /* 4. Decode cascade metadata -------------------------------------------------- */ @@ -101,14 +101,14 @@ func (task *CascadeRegistrationTask) Register( if err != nil { return err } - logtrace.Info(ctx, "Cascade metadata decoded", fields) + logtrace.Debug(ctx, "Cascade metadata decoded", fields) task.streamEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", send) /* 5. Verify data hash --------------------------------------------------------- */ if err := task.verifyDataHash(ctx, req.DataHash, cascadeMeta.DataHash, fields); err != nil { return err } - logtrace.Info(ctx, "Data hash verified", fields) + logtrace.Debug(ctx, "Data hash verified", fields) task.streamEvent(SupernodeEventTypeDataHashVerified, "Data hash verified", "", send) /* 6. Encode the raw data ------------------------------------------------------ */ @@ -116,7 +116,7 @@ func (task *CascadeRegistrationTask) Register( if err != nil { return err } - logtrace.Info(ctx, "Input encoded", fields) + logtrace.Debug(ctx, "Input encoded", fields) task.streamEvent(SupernodeEventTypeInputEncoded, "Input encoded", "", send) /* 7. Signature verification + layout decode ---------------------------------- */ @@ -126,7 +126,7 @@ func (task *CascadeRegistrationTask) Register( if err != nil { return err } - logtrace.Info(ctx, "Signature verified", fields) + logtrace.Debug(ctx, "Signature verified", fields) task.streamEvent(SupernodeEventTypeSignatureVerified, "Signature verified", "", send) /* 8. Generate RQ-ID files ----------------------------------------------------- */ @@ -134,48 +134,48 @@ func (task *CascadeRegistrationTask) Register( if err != nil { return err } - logtrace.Info(ctx, "RQID files generated", fields) + logtrace.Debug(ctx, "RQID files generated", fields) task.streamEvent(SupernodeEventTypeRQIDsGenerated, "RQID files generated", "", send) /* 9. Consistency checks ------------------------------------------------------- */ if err := verifyIDs(layout, encResp.Metadata); err != nil { return task.wrapErr(ctx, "failed to verify IDs", err, fields) } - logtrace.Info(ctx, "RQIDs verified", fields) + logtrace.Debug(ctx, "RQIDs verified", fields) task.streamEvent(SupernodeEventTypeRqIDsVerified, "RQIDs verified", "", send) /* 10. Simulate finalize to avoid storing artefacts if it would fail ---------- */ if _, err := task.LumeraClient.SimulateFinalizeAction(ctx, action.ActionID, rqidResp.RQIDs); err != nil { fields[logtrace.FieldError] = err.Error() - logtrace.Info(ctx, "Finalize simulation failed", fields) + logtrace.Debug(ctx, "Finalize simulation failed", fields) // Emit explicit simulation failure event for client visibility task.streamEvent(SupernodeEventTypeFinalizeSimulationFailed, "Finalize simulation failed", "", send) return task.wrapErr(ctx, "finalize action simulation failed", err, fields) } - logtrace.Info(ctx, "Finalize simulation passed", fields) + logtrace.Debug(ctx, "Finalize simulation passed", fields) // Transmit as a standard event so SDK can propagate it (dedicated type) task.streamEvent(SupernodeEventTypeFinalizeSimulated, "Finalize simulation passed", "", send) /* 11. Persist artefacts -------------------------------------------------------- */ - // Persist artefacts to the P2P network. P2P interfaces return error only; - // metrics are summarized at the cascade layer and emitted via event. + // Persist artefacts to the P2P network. P2P interfaces return error only; + // metrics are summarized at the cascade layer and emitted via event. if err := task.storeArtefacts(ctx, action.ActionID, rqidResp.RedundantMetadataFiles, encResp.SymbolsDir, fields); err != nil { return err } - // Emit compact analytics payload from centralized metrics collector (optional) - if !task.config.MetricsDisabled { - task.emitArtefactsStored(ctx, fields, encResp.Metadata, send) - } + // Emit compact analytics payload from centralized metrics collector (optional) + if !task.config.MetricsDisabled { + task.emitArtefactsStored(ctx, fields, encResp.Metadata, send) + } resp, err := task.LumeraClient.FinalizeAction(ctx, action.ActionID, rqidResp.RQIDs) if err != nil { fields[logtrace.FieldError] = err.Error() - logtrace.Info(ctx, "Finalize action error", fields) + logtrace.Debug(ctx, "Finalize action error", fields) return task.wrapErr(ctx, "failed to finalize action", err, fields) } txHash := resp.TxResponse.TxHash fields[logtrace.FieldTxHash] = txHash - logtrace.Info(ctx, "Action finalized", fields) + logtrace.Debug(ctx, "Action finalized", fields) task.streamEvent(SupernodeEventTypeActionFinalized, "Action finalized", txHash, send) return nil diff --git a/supernode/services/common/base/supernode_service.go b/supernode/services/common/base/supernode_service.go index 1d41715b..424556b0 100644 --- a/supernode/services/common/base/supernode_service.go +++ b/supernode/services/common/base/supernode_service.go @@ -52,7 +52,7 @@ func (service *SuperNodeService) RunHelper(ctx context.Context, nodeID string, p service.Worker = task.NewWorker() logtrace.Error(ctx, "Service run failed, retrying", logtrace.Fields{logtrace.FieldModule: "supernode", logtrace.FieldError: err.Error()}) } else { - logtrace.Info(ctx, "Service run completed successfully - closing sn services", logtrace.Fields{logtrace.FieldModule: "supernode"}) + logtrace.Debug(ctx, "Service run completed successfully - closing sn services", logtrace.Fields{logtrace.FieldModule: "supernode"}) return nil } } diff --git a/supernode/services/common/base/supernode_task.go b/supernode/services/common/base/supernode_task.go index 937e6013..2908558d 100644 --- a/supernode/services/common/base/supernode_task.go +++ b/supernode/services/common/base/supernode_task.go @@ -25,7 +25,7 @@ type SuperNodeTask struct { func (task *SuperNodeTask) RunHelper(ctx context.Context, clean TaskCleanerFunc) error { ctx = task.context(ctx) logtrace.Debug(ctx, "Start task", logtrace.Fields{}) - defer logtrace.Info(ctx, "Task canceled", logtrace.Fields{}) + defer logtrace.Debug(ctx, "Task canceled", logtrace.Fields{}) defer task.Cancel() task.SetStatusNotifyFunc(func(status *state.Status) { diff --git a/supernode/services/common/storage/handler.go b/supernode/services/common/storage/handler.go index 210dab0f..3967fe2d 100644 --- a/supernode/services/common/storage/handler.go +++ b/supernode/services/common/storage/handler.go @@ -74,7 +74,7 @@ func (h *StorageHandler) StoreBatch(ctx context.Context, list [][]byte, typ int) taskID = fmt.Sprintf("%v", val) } - logtrace.Info(ctx, "task_id in storeList", logtrace.Fields{logtrace.FieldTaskID: taskID}) + logtrace.Debug(ctx, "task_id in storeList", logtrace.Fields{logtrace.FieldTaskID: taskID}) // Add taskID to context for metrics ctx = p2pmetrics.WithTaskID(ctx, taskID) return h.P2PClient.StoreBatch(ctx, list, typ, taskID) @@ -110,7 +110,7 @@ func (h *StorageHandler) StoreRaptorQSymbolsIntoP2P(ctx context.Context, taskID, sort.Strings(keys) // deterministic order inside the sample } - logtrace.Info(ctx, "storing RaptorQ symbols", logtrace.Fields{"count": len(keys)}) + logtrace.Debug(ctx, "storing RaptorQ symbols", logtrace.Fields{"count": len(keys)}) /* stream in fixed-size batches -------------------------------------- */ for start := 0; start < len(keys); { @@ -128,7 +128,7 @@ func (h *StorageHandler) StoreRaptorQSymbolsIntoP2P(ctx context.Context, taskID, return fmt.Errorf("update first-batch flag: %w", err) } - logtrace.Info(ctx, "finished storing RaptorQ symbols", logtrace.Fields{"curr-time": time.Now().UTC(), "count": len(keys)}) + logtrace.Debug(ctx, "finished storing RaptorQ symbols", logtrace.Fields{"curr-time": time.Now().UTC(), "count": len(keys)}) return nil } @@ -160,7 +160,7 @@ func walkSymbolTree(root string) ([]string, error) { } func (h *StorageHandler) storeSymbolsInP2P(ctx context.Context, taskID, root string, fileKeys []string) error { - logtrace.Info(ctx, "loading batch symbols", logtrace.Fields{"count": len(fileKeys)}) + logtrace.Debug(ctx, "loading batch symbols", logtrace.Fields{"count": len(fileKeys)}) symbols, err := utils.LoadSymbols(root, fileKeys) if err != nil { @@ -173,13 +173,13 @@ func (h *StorageHandler) storeSymbolsInP2P(ctx context.Context, taskID, root str return fmt.Errorf("p2p store batch: %w", err) } - logtrace.Info(ctx, "stored batch symbols", logtrace.Fields{"count": len(symbols)}) + logtrace.Debug(ctx, "stored batch symbols", logtrace.Fields{"count": len(symbols)}) if err := utils.DeleteSymbols(ctx, root, fileKeys); err != nil { return fmt.Errorf("delete symbols: %w", err) } - logtrace.Info(ctx, "deleted batch symbols", logtrace.Fields{"count": len(symbols)}) + logtrace.Debug(ctx, "deleted batch symbols", logtrace.Fields{"count": len(symbols)}) return nil } diff --git a/supernode/services/common/supernode/service.go b/supernode/services/common/supernode/service.go index 13d5efe4..81bac456 100644 --- a/supernode/services/common/supernode/service.go +++ b/supernode/services/common/supernode/service.go @@ -54,7 +54,7 @@ func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetric logtrace.FieldMethod: "GetStatus", logtrace.FieldModule: "SupernodeStatusService", } - logtrace.Info(ctx, "status request received", fields) + logtrace.Debug(ctx, "status request received", fields) var resp StatusResponse resp.Version = Version diff --git a/supernode/services/verifier/verifier.go b/supernode/services/verifier/verifier.go index 867bd966..68a2ae77 100644 --- a/supernode/services/verifier/verifier.go +++ b/supernode/services/verifier/verifier.go @@ -75,7 +75,7 @@ func (cv *ConfigVerifier) VerifyConfig(ctx context.Context) (*VerificationResult // Check 5: Verify all required ports are available cv.checkPortsAvailable(result) - logtrace.Info(ctx, "Config verification completed", logtrace.Fields{ + logtrace.Debug(ctx, "Config verification completed", logtrace.Fields{ "valid": result.IsValid(), "errors": len(result.Errors), "warnings": len(result.Warnings), From 6bd2b9c381241772d857d9fed7baad9f4063eedb Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Tue, 30 Sep 2025 08:57:51 +0500 Subject: [PATCH 12/27] high-signal logs --- p2p/kademlia/dht.go | 114 ++++++++++++--------- p2p/kademlia/fetch_and_store.go | 3 + p2p/kademlia/network.go | 12 +-- p2p/kademlia/rq_symbols.go | 27 ++++- supernode/services/cascade/adaptors/p2p.go | 53 ++++++++-- supernode/services/cascade/download.go | 56 +++++++--- supernode/services/cascade/helper.go | 32 ++++-- supernode/services/cascade/register.go | 24 ++--- 8 files changed, 220 insertions(+), 101 deletions(-) diff --git a/p2p/kademlia/dht.go b/p2p/kademlia/dht.go index df2be5c7..d0022274 100644 --- a/p2p/kademlia/dht.go +++ b/p2p/kademlia/dht.go @@ -362,7 +362,7 @@ func (s *DHT) Store(ctx context.Context, data []byte, typ int) (string, error) { // measured success rate for node RPCs is below the configured minimum, an error // is returned. Metrics are not returned through the API. func (s *DHT) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID string) error { - logtrace.Debug(ctx, "Store DB batch begin", logtrace.Fields{ + logtrace.Info(ctx, "DHT StoreBatch begin", logtrace.Fields{ logtrace.FieldModule: "dht", logtrace.FieldTaskID: taskID, "records": len(values), @@ -370,7 +370,7 @@ func (s *DHT) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID s if err := s.store.StoreBatch(ctx, values, typ, true); err != nil { return fmt.Errorf("store batch: %v", err) } - logtrace.Debug(ctx, "Store DB batch done, store network batch begin", logtrace.Fields{ + logtrace.Info(ctx, "DHT StoreBatch: local stored; network begin", logtrace.Fields{ logtrace.FieldModule: "dht", logtrace.FieldTaskID: taskID, }) @@ -380,7 +380,7 @@ func (s *DHT) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID s return fmt.Errorf("iterate batch store: %v", err) } - logtrace.Debug(ctx, "Store network batch workers done", logtrace.Fields{ + logtrace.Info(ctx, "DHT StoreBatch: network done", logtrace.Fields{ logtrace.FieldModule: "dht", logtrace.FieldTaskID: taskID, }) @@ -391,6 +391,7 @@ func (s *DHT) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID s // Retrieve data from the networking using key. Key is the base58 encoded // identifier of the data. func (s *DHT) Retrieve(ctx context.Context, key string, localOnly ...bool) ([]byte, error) { + start := time.Now() decoded := base58.Decode(key) if len(decoded) != B/8 { return nil, fmt.Errorf("invalid key: %v", key) @@ -406,6 +407,7 @@ func (s *DHT) Retrieve(ctx context.Context, key string, localOnly ...bool) ([]by // retrieve the key/value from queries storage value, err := s.store.Retrieve(ctx, decoded) if err == nil && len(value) > 0 { + logtrace.Info(ctx, "DHT Retrieve local hit", logtrace.Fields{"key": hex.EncodeToString(decoded), "ms": time.Since(start).Milliseconds()}) return value, nil } else if err != nil { logtrace.Error(ctx, "Error retrieving key from local storage", logtrace.Fields{ @@ -421,20 +423,23 @@ func (s *DHT) Retrieve(ctx context.Context, key string, localOnly ...bool) ([]by } // if not found locally, iterative find value from kademlia network + logtrace.Info(ctx, "DHT Retrieve network lookup", logtrace.Fields{"key": dbKey}) peerValue, err := s.iterate(ctx, IterateFindValue, decoded, nil, 0) if err != nil { return nil, errors.Errorf("retrieve from peer: %w", err) } if len(peerValue) > 0 { - logtrace.Debug(ctx, "Not found locally, retrieved from other nodes", logtrace.Fields{ + logtrace.Info(ctx, "DHT Retrieve network hit", logtrace.Fields{ logtrace.FieldModule: "dht", "key": dbKey, "data_len": len(peerValue), + "ms": time.Since(start).Milliseconds(), }) } else { - logtrace.Debug(ctx, "Not found locally, not found in other nodes", logtrace.Fields{ + logtrace.Info(ctx, "DHT Retrieve miss", logtrace.Fields{ logtrace.FieldModule: "dht", "key": dbKey, + "ms": time.Since(start).Milliseconds(), }) } @@ -529,6 +534,8 @@ func (s *DHT) GetValueFromNode(ctx context.Context, target []byte, n *Node) ([]b cctx, ccancel := context.WithTimeout(ctx, time.Second*5) defer ccancel() + // Minimal per-RPC visibility + logtrace.Info(ctx, "RPC FindValue send", logtrace.Fields{"node": n.String(), "key": hex.EncodeToString(target)}) response, err := s.network.Call(cctx, request, false) if err != nil { logtrace.Debug(ctx, "Network call request failed", logtrace.Fields{ @@ -538,6 +545,7 @@ func (s *DHT) GetValueFromNode(ctx context.Context, target []byte, n *Node) ([]b }) return nil, fmt.Errorf("network call request %s failed: %w", request.String(), err) } + logtrace.Info(ctx, "RPC FindValue completed", logtrace.Fields{"node": n.String()}) v, ok := response.Data.(*FindValueResponse) if ok && v.Status.Result == ResultOk && len(v.Value) > 0 { @@ -573,7 +581,7 @@ func (s *DHT) doMultiWorkers(ctx context.Context, iterativeType int, target []by // update the running goroutines number++ - logtrace.Debug(ctx, "Start work for node", logtrace.Fields{ + logtrace.Info(ctx, "Start work for node", logtrace.Fields{ logtrace.FieldModule: "p2p", "iterate_type": iterativeType, "node": node.String(), @@ -597,18 +605,35 @@ func (s *DHT) doMultiWorkers(ctx context.Context, iterativeType int, target []by // new a request message request := s.newMessage(messageType, receiver, data) + // Minimal per-RPC visibility + op := "" + switch messageType { + case FindNode: + op = "FindNode" + case FindValue: + op = "FindValue" + default: + op = "RPC" + } + fields := logtrace.Fields{"node": receiver.String()} + if messageType == FindValue { + fields["key"] = hex.EncodeToString(target) + } + logtrace.Info(ctx, "RPC "+op+" send", fields) // send the request and receive the response response, err := s.network.Call(ctx, request, false) if err != nil { - logtrace.Debug(ctx, "Network call request failed", logtrace.Fields{ + logtrace.Info(ctx, "Iterate worker RPC failed", logtrace.Fields{ logtrace.FieldModule: "p2p", logtrace.FieldError: err.Error(), "request": request.String(), + "node": receiver.String(), }) // node is unreachable, remove the node //removedNodes = append(removedNodes, receiver) return } + logtrace.Info(ctx, "RPC "+op+" completed", logtrace.Fields{"node": receiver.String()}) // send the response to message channel responses <- response @@ -637,7 +662,7 @@ func (s *DHT) fetchAndAddLocalKeys(ctx context.Context, hexKeys []string, result batchHexKeys := hexKeys[start:end] - logtrace.Debug(ctx, "Processing batch of local keys", logtrace.Fields{ + logtrace.Info(ctx, "Processing batch of local keys", logtrace.Fields{ logtrace.FieldModule: "dht", "batch_size": len(batchHexKeys), "total_keys": len(hexKeys), @@ -671,6 +696,7 @@ func (s *DHT) fetchAndAddLocalKeys(ctx context.Context, hexKeys []string, result func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, txID string, localOnly ...bool) (result map[string][]byte, err error) { start := time.Now() + logtrace.Info(ctx, "DHT BatchRetrieve begin", logtrace.Fields{"txid": txID, "keys": len(keys), "required": required}) result = make(map[string][]byte) var resMap sync.Map var foundLocalCount int32 @@ -791,6 +817,7 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, wg.Wait() netFound := int(atomic.LoadInt32(&networkFound)) + logtrace.Info(ctx, "DHT BatchRetrieve complete", logtrace.Fields{"txid": txID, "found_local": foundLocalCount, "found_network": netFound, "required": required, "ms": time.Since(start).Milliseconds()}) // Record batch retrieve stats for internal DHT snapshot window s.metrics.RecordBatchRetrieve(len(keys), int(required), int(foundLocalCount), netFound, time.Since(start)) // Also feed retrieve counts into the per-task collector for stream events @@ -1001,7 +1028,7 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, wg.Wait() - logtrace.Debug(ctx, "Iterate batch get values done", logtrace.Fields{ + logtrace.Info(ctx, "Iterate batch get values done", logtrace.Fields{ logtrace.FieldModule: "dht", "found_count": atomic.LoadInt32(&foundCount), }) @@ -1045,10 +1072,12 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, func (s *DHT) doBatchGetValuesCall(ctx context.Context, node *Node, requestKeys map[string]KeyValWithClosest) (map[string]KeyValWithClosest, error) { request := s.newMessage(BatchGetValues, node, &BatchGetValuesRequest{Data: requestKeys}) + logtrace.Info(ctx, "RPC BatchGetValues send", logtrace.Fields{"node": node.String(), "keys": len(requestKeys)}) response, err := s.network.Call(ctx, request, false) if err != nil { return nil, fmt.Errorf("network call request %s failed: %w", request.String(), err) } + logtrace.Info(ctx, "RPC BatchGetValues completed", logtrace.Fields{"node": node.String()}) resp, ok := response.Data.(*BatchGetValuesResponse) if !ok { @@ -1083,7 +1112,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat // find the closest contacts for the target node from queries route tables nl, _ := s.ht.closestContacts(Alpha, target, igList) if len(igList) > 0 { - logtrace.Debug(ctx, "Closest contacts", logtrace.Fields{ + logtrace.Info(ctx, "Closest contacts", logtrace.Fields{ logtrace.FieldModule: "p2p", "nodes": nl.String(), "ignored": s.ignorelist.String(), @@ -1093,7 +1122,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat if nl.Len() == 0 { return nil, nil } - logtrace.Debug(ctx, "Iterate start", logtrace.Fields{ + logtrace.Info(ctx, "Iterate start", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "type": iterativeType, @@ -1107,7 +1136,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat if iterativeType == IterateFindNode { hashedTargetID, _ := utils.Blake3Hash(target) bucket := s.ht.bucketIndex(s.ht.self.HashedID, hashedTargetID) - logtrace.Debug(ctx, "Bucket for target", logtrace.Fields{ + logtrace.Info(ctx, "Bucket for target", logtrace.Fields{ logtrace.FieldModule: "p2p", "target": sKey, }) @@ -1131,7 +1160,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat // Set a maximum number of iterations to prevent indefinite looping maxIterations := 5 // Adjust the maximum iterations as needed - logtrace.Debug(ctx, "Begin iteration", logtrace.Fields{ + logtrace.Info(ctx, "Begin iteration", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1142,7 +1171,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat case <-ctx.Done(): return nil, fmt.Errorf("iterate cancelled: %w", ctx.Err()) case <-timeout: - logtrace.Debug(ctx, "Iteration timed out", logtrace.Fields{ + logtrace.Info(ctx, "Iteration timed out", logtrace.Fields{ logtrace.FieldModule: "p2p", }) return nil, nil @@ -1165,7 +1194,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat } default: - logtrace.Error(ctx, "Unknown message type", logtrace.Fields{ + logtrace.Info(ctx, "Unknown message type", logtrace.Fields{ logtrace.FieldModule: "dht", "type": response.MessageType, }) @@ -1174,7 +1203,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat // Stop search if no more nodes to contact if !searchRest && len(nl.Nodes) == 0 { - logtrace.Debug(ctx, "Search stopped", logtrace.Fields{ + logtrace.Info(ctx, "Search stopped", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1186,7 +1215,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat nl.Comparator = target nl.Sort() - logtrace.Debug(ctx, "Iterate sorted nodes", logtrace.Fields{ + logtrace.Info(ctx, "Iterate sorted nodes", logtrace.Fields{ logtrace.FieldModule: "p2p", "id": base58.Encode(s.ht.self.ID), "iterate": iterativeType, @@ -1223,7 +1252,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat } } - logtrace.Debug(ctx, "Finish iteration without results", logtrace.Fields{ + logtrace.Info(ctx, "Finish iteration without results", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1244,7 +1273,7 @@ func (s *DHT) handleResponses(ctx context.Context, responses <-chan *Message, nl v, ok := response.Data.(*FindValueResponse) if ok { if v.Status.Result == ResultOk && len(v.Value) > 0 { - logtrace.Debug(ctx, "Iterate found value from network", logtrace.Fields{ + logtrace.Info(ctx, "Iterate found value from network", logtrace.Fields{ logtrace.FieldModule: "p2p", }) return nl, v.Value @@ -1274,7 +1303,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] // nl will have the closest nodes to the target value, it will ignore the nodes in igList nl, _ := s.ht.closestContacts(Alpha, target, igList) if len(igList) > 0 { - logtrace.Debug(ctx, "Closest contacts", logtrace.Fields{ + logtrace.Info(ctx, "Closest contacts", logtrace.Fields{ logtrace.FieldModule: "p2p", "nodes": nl.String(), "ignored": s.ignorelist.String(), @@ -1289,7 +1318,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] searchRest := false // keep track of contacted nodes so that we don't hit them again contacted := make(map[string]bool) - logtrace.Debug(ctx, "Begin iteration", logtrace.Fields{ + logtrace.Info(ctx, "Begin iteration", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1298,7 +1327,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] var closestNode *Node var iterationCount int for iterationCount = 0; iterationCount < maxIterations; iterationCount++ { - logtrace.Debug(ctx, "Begin find value", logtrace.Fields{ + logtrace.Info(ctx, "Begin find value", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "nl": nl.Len(), @@ -1307,7 +1336,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] }) if nl.Len() == 0 { - logtrace.Error(ctx, "Nodes list length is 0", logtrace.Fields{ + logtrace.Info(ctx, "Nodes list length is 0", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1318,7 +1347,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] // if the closest node is the same as the last iteration and we don't want to search rest of nodes, we are done if !searchRest && (closestNode != nil && bytes.Equal(nl.Nodes[0].ID, closestNode.ID)) { - logtrace.Debug(ctx, "Closest node is the same as the last iteration", logtrace.Fields{ + logtrace.Info(ctx, "Closest node is the same as the last iteration", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1337,7 +1366,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] nl.Sort() - logtrace.Debug(ctx, "Iteration progress", logtrace.Fields{ + logtrace.Info(ctx, "Iteration progress", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1346,7 +1375,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] }) } - logtrace.Debug(ctx, "Finished iterations without results", logtrace.Fields{ + logtrace.Info(ctx, "Finished iterations without results", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1559,7 +1588,7 @@ func (s *DHT) storeToAlphaNodes(ctx context.Context, nl *NodeList, data []byte, skey, _ := utils.Blake3Hash(data) if finalStoreCount >= int32(Alpha) { - logtrace.Debug(ctx, "Store data to alpha nodes success", logtrace.Fields{ + logtrace.Info(ctx, "Store data to alpha nodes success", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": taskID, "len_total_nodes": nl.Len(), @@ -1569,7 +1598,7 @@ func (s *DHT) storeToAlphaNodes(ctx context.Context, nl *NodeList, data []byte, return nil } - logtrace.Debug(ctx, "Store data to alpha nodes failed", logtrace.Fields{ + logtrace.Info(ctx, "Store data to alpha nodes failed", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": taskID, "store_count": finalStoreCount, @@ -1656,7 +1685,7 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i knownNodes := make(map[string]*Node) hashes := make([][]byte, len(values)) - logtrace.Debug(ctx, "Iterate batch store begin", logtrace.Fields{ + logtrace.Info(ctx, "Iterate batch store begin", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": id, "keys": len(values), @@ -1683,6 +1712,7 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i requests := 0 successful := 0 + logtrace.Info(ctx, "Iterate batch store: dispatching to nodes", logtrace.Fields{"task_id": id, "nodes": len(knownNodes)}) storeResponses := s.batchStoreNetwork(ctx, values, knownNodes, storageMap, typ) for response := range storeResponses { requests++ @@ -1743,14 +1773,14 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i successRate := float64(successful) / float64(requests) * 100 if successRate >= minimumDataStoreSuccessRate { - logtrace.Debug(ctx, "Successful store operations", logtrace.Fields{ + logtrace.Info(ctx, "Successful store operations", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": id, "success_rate": fmt.Sprintf("%.2f%%", successRate), }) return nil } else { - logtrace.Debug(ctx, "Failed to achieve desired success rate", logtrace.Fields{ + logtrace.Info(ctx, "Failed to achieve desired success rate", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": id, "success_rate": fmt.Sprintf("%.2f%%", successRate), @@ -1777,10 +1807,7 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ var wg sync.WaitGroup for key, node := range nodes { - logtrace.Debug(ctx, "Node", logtrace.Fields{ - logtrace.FieldModule: "dht", - "port": node.String(), - }) + logtrace.Info(ctx, "Preparing batch store to node", logtrace.Fields{logtrace.FieldModule: "dht", "node": node.String()}) if s.ignorelist.Banned(node) { logtrace.Debug(ctx, "Ignoring banned node in batch store network call", logtrace.Fields{ logtrace.FieldModule: "dht", @@ -1810,11 +1837,7 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ totalBytes += len(values[idx]) } - logtrace.Debug(ctx, "Batch store to node", logtrace.Fields{ - logtrace.FieldModule: "dht", - "keys": len(toStore), - "size_before_compress": utils.BytesIntToMB(totalBytes), - }) + logtrace.Info(ctx, "RPC BatchStoreData send", logtrace.Fields{logtrace.FieldModule: "dht", "node": receiver.String(), "keys": len(toStore), "size_mb": utils.BytesIntToMB(totalBytes)}) // Skip empty payloads: avoid sending empty store RPCs and do not record no-op metrics. if len(toStore) == 0 { @@ -1835,15 +1858,12 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ s.metrics.IncHotPathBanIncr() } - logtrace.Debug(ctx, "Network call batch store request failed", logtrace.Fields{ - logtrace.FieldModule: "p2p", - logtrace.FieldError: err.Error(), - "request": request.String(), - }) + logtrace.Info(ctx, "RPC BatchStoreData failed", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err.Error(), "node": receiver.String(), "ms": dur}) responses <- &MessageWithError{Error: err, Message: response, KeysCount: len(toStore), Receiver: receiver, DurationMS: dur} return } + logtrace.Info(ctx, "RPC BatchStoreData completed", logtrace.Fields{logtrace.FieldModule: "p2p", "node": receiver.String(), "keys": len(toStore), "ms": dur}) responses <- &MessageWithError{Message: response, KeysCount: len(toStore), Receiver: receiver, DurationMS: dur} } }(node, key) @@ -1856,7 +1876,7 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ } func (s *DHT) batchFindNode(ctx context.Context, payload [][]byte, nodes map[string]*Node, contacted map[string]bool, txid string) (chan *MessageWithError, bool) { - logtrace.Debug(ctx, "Batch find node begin", logtrace.Fields{ + logtrace.Info(ctx, "Batch find node begin", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": txid, "nodes_count": len(nodes), @@ -1927,7 +1947,7 @@ func (s *DHT) batchFindNode(ctx context.Context, payload [][]byte, nodes map[str } wg.Wait() close(responses) - logtrace.Debug(ctx, "Batch find node done", logtrace.Fields{ + logtrace.Info(ctx, "Batch find node done", logtrace.Fields{ logtrace.FieldModule: "dht", "nodes_count": len(nodes), "len_resp": len(responses), diff --git a/p2p/kademlia/fetch_and_store.go b/p2p/kademlia/fetch_and_store.go index d7bc0f28..8f954364 100644 --- a/p2p/kademlia/fetch_and_store.go +++ b/p2p/kademlia/fetch_and_store.go @@ -275,6 +275,8 @@ func (s *DHT) GroupAndBatchFetch(ctx context.Context, repKeys []domain.ToRepKey, // GetBatchValuesFromNode get values from node in bateches func (s *DHT) GetBatchValuesFromNode(ctx context.Context, keys []string, n *Node) (bool, map[string][]byte, []string, error) { logtrace.Debug(ctx, "sending batch fetch request", logtrace.Fields{"node-ip": n.IP, "keys": len(keys)}) + // Minimal per-RPC visibility for background replication path + logtrace.Info(ctx, "RPC BatchFindValues send", logtrace.Fields{"node": n.String(), "keys": len(keys)}) messageType := BatchFindValues @@ -349,6 +351,7 @@ func (s *DHT) GetBatchValuesFromNode(ctx context.Context, keys []string, n *Node } logtrace.Debug(ctx, "batch fetch response rcvd and keys verified", logtrace.Fields{"node-ip": n.IP, "received-keys": len(decompressedMap), "verified-keys": len(retMap), "failed-keys": len(failedKeys)}) + logtrace.Info(ctx, "RPC BatchFindValues completed", logtrace.Fields{"node": n.String(), "received_keys": len(decompressedMap), "verified_keys": len(retMap)}) return v.Done, retMap, failedKeys, nil } diff --git a/p2p/kademlia/network.go b/p2p/kademlia/network.go index c887eab1..3bca8f20 100644 --- a/p2p/kademlia/network.go +++ b/p2p/kademlia/network.go @@ -936,7 +936,7 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, return s.generateResponseMessage(BatchGetValues, message.Sender, ResultFailed, err.Error()) } - logtrace.Debug(ctx, "Batch get values request received", logtrace.Fields{ + logtrace.Info(ctx, "Batch get values request received", logtrace.Fields{ logtrace.FieldModule: "p2p", "from": message.Sender.String(), }) @@ -966,7 +966,7 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, return s.generateResponseMessage(BatchGetValues, message.Sender, ResultFailed, err.Error()) } - logtrace.Debug(ctx, "Batch get values request processed", logtrace.Fields{ + logtrace.Info(ctx, "Batch get values request processed", logtrace.Fields{ logtrace.FieldModule: "p2p", "requested-keys": len(keys), "found": count, @@ -1006,7 +1006,7 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, func (s *Network) handleBatchFindValuesRequest(ctx context.Context, req *BatchFindValuesRequest, ip string, reqID string) (isDone bool, compressedData []byte, err error) { // log.WithContext(ctx).WithField("p2p-req-id", reqID).WithField("keys", len(req.Keys)).WithField("from-ip", ip).Info("batch find values request received") - logtrace.Debug(ctx, "Batch find values request received", logtrace.Fields{ + logtrace.Info(ctx, "Batch find values request received", logtrace.Fields{ logtrace.FieldModule: "p2p", "from": ip, "keys": len(req.Keys), @@ -1015,7 +1015,7 @@ func (s *Network) handleBatchFindValuesRequest(ctx context.Context, req *BatchFi if len(req.Keys) > 0 { // log.WithContext(ctx).WithField("p2p-req-id", reqID).WithField("keys[0]", req.Keys[0]).WithField("keys[len]", req.Keys[len(req.Keys)-1]). // WithField("from-ip", ip).Debug("first & last batch keys") - logtrace.Debug(ctx, "First & last batch keys", logtrace.Fields{ + logtrace.Info(ctx, "First & last batch keys", logtrace.Fields{ logtrace.FieldModule: "p2p", "p2p-req-id": reqID, "keys[0]": req.Keys[0], @@ -1208,7 +1208,7 @@ func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (r } // log.P2P().WithContext(ctx).Info("handle batch store data request received") - logtrace.Debug(ctx, "Handle batch store data request received", logtrace.Fields{ + logtrace.Info(ctx, "Handle batch store data request received", logtrace.Fields{ logtrace.FieldModule: "p2p", "sender": message.Sender.String(), "keys": len(request.Data), @@ -1238,7 +1238,7 @@ func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (r }, } // log.P2P().WithContext(ctx).Info("handle batch store data request processed") - logtrace.Debug(ctx, "Handle batch store data request processed", logtrace.Fields{ + logtrace.Info(ctx, "Handle batch store data request processed", logtrace.Fields{ logtrace.FieldModule: "p2p", "sender": message.Sender.String(), "keys": len(request.Data), diff --git a/p2p/kademlia/rq_symbols.go b/p2p/kademlia/rq_symbols.go index c8ad2000..819d0944 100644 --- a/p2p/kademlia/rq_symbols.go +++ b/p2p/kademlia/rq_symbols.go @@ -16,16 +16,20 @@ const ( ) func (s *DHT) startStoreSymbolsWorker(ctx context.Context) { - logtrace.Debug(ctx, "start delete data worker", logtrace.Fields{logtrace.FieldModule: "p2p"}) + // Minimal visibility for lifecycle + each tick + logtrace.Info(ctx, "rq_symbols worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) for { select { case <-time.After(defaultSoreSymbolsInterval): + tickStart := time.Now() + logtrace.Info(ctx, "rq_symbols: tick", logtrace.Fields{"interval": defaultSoreSymbolsInterval.String()}) if err := s.storeSymbols(ctx); err != nil { logtrace.Error(ctx, "store symbols", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err}) } + logtrace.Info(ctx, "rq_symbols: tick complete", logtrace.Fields{"ms": time.Since(tickStart).Milliseconds()}) case <-ctx.Done(): - logtrace.Error(ctx, "closing store symbols worker", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Info(ctx, "rq_symbols worker stopping", logtrace.Fields{logtrace.FieldModule: "p2p"}) return } } @@ -37,13 +41,26 @@ func (s *DHT) storeSymbols(ctx context.Context) error { return fmt.Errorf("get to do store symbol dirs: %w", err) } + // Minimal visibility: how many dirs to process this tick + logtrace.Info(ctx, "rq_symbols: todo directories", logtrace.Fields{"count": len(dirs)}) + for _, dir := range dirs { - logtrace.Debug(ctx, "rq_symbols worker: start scanning dir & storing raptorQ symbols", logtrace.Fields{"dir": dir, "txid": dir.TXID}) + // Pre-count symbols in this directory + preCount := -1 + if set, rerr := utils.ReadDirFilenames(dir.Dir); rerr == nil { + preCount = len(set) + } + start := time.Now() + logtrace.Info(ctx, "rq_symbols: processing dir", logtrace.Fields{"dir": dir.Dir, "txid": dir.TXID, "symbols": preCount}) if err := s.scanDirAndStoreSymbols(ctx, dir.Dir, dir.TXID); err != nil { logtrace.Error(ctx, "scan and store symbols", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err}) } - - logtrace.Debug(ctx, "rq_symbols worker: scanned dir & stored raptorQ symbols", logtrace.Fields{"dir": dir, "txid": dir.TXID}) + // Post-count remaining symbols + remCount := -1 + if set, rerr := utils.ReadDirFilenames(dir.Dir); rerr == nil { + remCount = len(set) + } + logtrace.Info(ctx, "rq_symbols: processed dir", logtrace.Fields{"dir": dir.Dir, "txid": dir.TXID, "remaining": remCount, "ms": time.Since(start).Milliseconds()}) } return nil diff --git a/supernode/services/cascade/adaptors/p2p.go b/supernode/services/cascade/adaptors/p2p.go index 3621167e..944b9b50 100644 --- a/supernode/services/cascade/adaptors/p2p.go +++ b/supernode/services/cascade/adaptors/p2p.go @@ -57,7 +57,7 @@ type StoreArtefactsRequest struct { } func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) error { - logtrace.Debug(ctx, "About to store artefacts (metadata + symbols)", logtrace.Fields{"taskID": req.TaskID, "id_files": len(req.IDFiles)}) + logtrace.Info(ctx, "StoreArtefacts start", logtrace.Fields{"taskID": req.TaskID, "actionID": req.ActionID, "id_files": len(req.IDFiles), "symbols_dir": req.SymbolsDir}) // Optionally enable per-node store RPC capture for this task if !p.metricsDisabled { @@ -71,7 +71,17 @@ func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, return errors.Wrap(err, "error storing artefacts") } dur := time.Since(start).Milliseconds() - logtrace.Debug(ctx, "artefacts have been stored", logtrace.Fields{"taskID": req.TaskID, "symbols_first_pass": firstPassSymbols, "symbols_total": totalSymbols, "id_files_count": len(req.IDFiles)}) + // After first-pass, log how many symbols remain on disk + remaining := 0 + if req.SymbolsDir != "" { + if keys, werr := walkSymbolTree(req.SymbolsDir); werr == nil { + remaining = len(keys) + } + } + logtrace.Info(ctx, "StoreArtefacts completed", logtrace.Fields{"taskID": req.TaskID, "symbols_first_pass": firstPassSymbols, "symbols_total_available": totalSymbols, "id_files_count": len(req.IDFiles), "symbols_left_on_disk": remaining, "ms": dur}) + if remaining == 0 { + logtrace.Info(ctx, "Symbols directory is empty after first-pass", logtrace.Fields{"taskID": req.TaskID, "dir": req.SymbolsDir}) + } // Record store summary for later event emission cm.SetStoreSummary(req.TaskID, firstPassSymbols, totalSymbols, len(req.IDFiles), dur) return nil @@ -101,7 +111,8 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action if targetCount < 1 && totalAvailable > 0 { targetCount = 1 } - logtrace.Debug(ctx, "first-pass target coverage (symbols)", logtrace.Fields{ + logtrace.Info(ctx, "Symbols discovered in directory", logtrace.Fields{"total_symbols": totalAvailable, "dir": symbolsDir}) + logtrace.Info(ctx, "first-pass target coverage (symbols)", logtrace.Fields{ "total_symbols": totalAvailable, "target_percent": storeSymbolsPercent, "target_count": targetCount, @@ -116,8 +127,8 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action } sort.Strings(keys) // deterministic order inside the sample } - - logtrace.Debug(ctx, "storing RaptorQ symbols", logtrace.Fields{"count": len(keys)}) + logtrace.Info(ctx, "first-pass selected symbols", logtrace.Fields{"selected": len(keys), "of_total": totalAvailable, "dir": symbolsDir}) + logtrace.Info(ctx, "storing RaptorQ symbols", logtrace.Fields{"count": len(keys)}) /* stream in fixed-size batches -------------------------------------- */ @@ -153,6 +164,7 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action payload = append(payload, symBytes...) // Send as the same data type you use for symbols + logtrace.Info(ctx, "RPC StoreBatch (first-batch): metadata + symbols", logtrace.Fields{"taskID": taskID, "metadata_count": len(metadataFiles), "symbols_in_batch": len(symBytes), "payload_total": len(payload)}) bctx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout) bctx = cm.WithTaskID(bctx, taskID) err = p.p2p.StoreBatch(bctx, payload, storage.P2PDataRaptorQSymbol, taskID) @@ -160,6 +172,7 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action if err != nil { return totalSymbols, totalAvailable, fmt.Errorf("p2p store batch (first): %w", err) } + logtrace.Info(ctx, "RPC StoreBatch completed (first-batch)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symBytes)}) totalSymbols += len(symBytes) // No per-RPC metrics propagated from p2p @@ -170,6 +183,14 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action return totalSymbols, totalAvailable, fmt.Errorf("delete symbols: %w", err) } } + // Log remaining symbols in directory after deletion + if rem, werr := walkSymbolTree(symbolsDir); werr == nil { + if left := len(rem); left > 0 { + logtrace.Info(ctx, "symbols left after first-batch", logtrace.Fields{"taskID": taskID, "left": left}) + } else { + logtrace.Info(ctx, "Symbols directory is empty after first-batch", logtrace.Fields{"taskID": taskID, "dir": symbolsDir}) + } + } firstBatchProcessed = true } else { @@ -188,12 +209,20 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action if totalAvailable > 0 { achievedPct = (float64(totalSymbols) / float64(totalAvailable)) * 100.0 } - logtrace.Debug(ctx, "first-pass achieved coverage (symbols)", + logtrace.Info(ctx, "first-pass achieved coverage (symbols)", logtrace.Fields{"achieved_symbols": totalSymbols, "achieved_percent": achievedPct}) if err := p.rqStore.UpdateIsFirstBatchStored(actionID); err != nil { return totalSymbols, totalAvailable, fmt.Errorf("update first-batch flag: %w", err) } + // Final remaining count after first pass flagged + if rem, werr := walkSymbolTree(symbolsDir); werr == nil { + if left := len(rem); left > 0 { + logtrace.Info(ctx, "first-pass completed; symbols remaining on disk", logtrace.Fields{"taskID": taskID, "left": left, "dir": symbolsDir}) + } else { + logtrace.Info(ctx, "first-pass completed; directory empty", logtrace.Fields{"taskID": taskID, "dir": symbolsDir}) + } + } return totalSymbols, totalAvailable, nil @@ -228,7 +257,7 @@ func walkSymbolTree(root string) ([]string, error) { // storeSymbolsInP2P loads a batch of symbols and stores them via P2P. // Returns (ratePct, requests, count, error) where `count` is the number of symbols in this batch. func (c *p2pImpl) storeSymbolsInP2P(ctx context.Context, taskID, root string, fileKeys []string) (int, error) { - logtrace.Debug(ctx, "loading batch symbols", logtrace.Fields{"count": len(fileKeys)}) + logtrace.Info(ctx, "loading batch symbols", logtrace.Fields{"taskID": taskID, "count": len(fileKeys)}) symbols, err := utils.LoadSymbols(root, fileKeys) if err != nil { @@ -239,15 +268,21 @@ func (c *p2pImpl) storeSymbolsInP2P(ctx context.Context, taskID, root string, fi symCtx = cm.WithTaskID(symCtx, taskID) defer cancel() + logtrace.Info(ctx, "RPC StoreBatch (symbols batch)", logtrace.Fields{"taskID": taskID, "symbols_in_batch": len(symbols)}) if err := c.p2p.StoreBatch(symCtx, symbols, storage.P2PDataRaptorQSymbol, taskID); err != nil { return len(symbols), fmt.Errorf("p2p store batch: %w", err) } - logtrace.Debug(ctx, "stored batch symbols", logtrace.Fields{"count": len(symbols)}) + logtrace.Info(ctx, "RPC StoreBatch completed (symbols batch)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symbols)}) if err := utils.DeleteSymbols(ctx, root, fileKeys); err != nil { return len(symbols), fmt.Errorf("delete symbols: %w", err) } - logtrace.Debug(ctx, "deleted batch symbols", logtrace.Fields{"count": len(symbols)}) + // After deletion, log remaining count in directory + left := -1 + if rem, werr := walkSymbolTree(root); werr == nil { + left = len(rem) + } + logtrace.Info(ctx, "deleted batch symbols", logtrace.Fields{"taskID": taskID, "count": len(symbols), "symbols_left_on_disk": left}) // No per-RPC metrics propagated from p2p return len(symbols), nil diff --git a/supernode/services/cascade/download.go b/supernode/services/cascade/download.go index 8271fc51..e0e77a6b 100644 --- a/supernode/services/cascade/download.go +++ b/supernode/services/cascade/download.go @@ -39,7 +39,7 @@ func (task *CascadeRegistrationTask) Download( send func(resp *DownloadResponse) error, ) (err error) { fields := logtrace.Fields{logtrace.FieldMethod: "Download", logtrace.FieldRequest: req} - logtrace.Debug(ctx, "Cascade download request received", fields) + logtrace.Info(ctx, "Cascade download request received", fields) // Ensure task status is finalized regardless of outcome defer func() { @@ -56,7 +56,7 @@ func (task *CascadeRegistrationTask) Download( fields[logtrace.FieldError] = err return task.wrapErr(ctx, "failed to get action", err, fields) } - logtrace.Debug(ctx, "Action retrieved", fields) + logtrace.Info(ctx, "Action retrieved", fields) task.streamDownloadEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", "", send) if actionDetails.GetAction().State != actiontypes.ActionStateDone { @@ -65,25 +65,26 @@ func (task *CascadeRegistrationTask) Download( fields[logtrace.FieldActionState] = actionDetails.GetAction().State return task.wrapErr(ctx, "action not found", err, fields) } - logtrace.Debug(ctx, "Action state validated", fields) + logtrace.Info(ctx, "Action state validated", fields) metadata, err := task.decodeCascadeMetadata(ctx, actionDetails.GetAction().Metadata, fields) if err != nil { fields[logtrace.FieldError] = err.Error() return task.wrapErr(ctx, "error decoding cascade metadata", err, fields) } - logtrace.Debug(ctx, "Cascade metadata decoded", fields) + logtrace.Info(ctx, "Cascade metadata decoded", fields) task.streamDownloadEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", "", send) // Notify: network retrieval phase begins task.streamDownloadEvent(SupernodeEventTypeNetworkRetrieveStarted, "Network retrieval started", "", "", send) + logtrace.Info(ctx, "Starting network retrieval of artefacts", logtrace.Fields{logtrace.FieldActionID: actionDetails.GetAction().ActionID}) filePath, tmpDir, err := task.downloadArtifacts(ctx, actionDetails.GetAction().ActionID, metadata, fields, send) if err != nil { fields[logtrace.FieldError] = err.Error() return task.wrapErr(ctx, "failed to download artifacts", err, fields) } - logtrace.Debug(ctx, "File reconstructed and hash verified", fields) + logtrace.Info(ctx, "File reconstructed and hash verified", fields) // Notify: decode completed, file ready on disk task.streamDownloadEvent(SupernodeEventTypeDecodeCompleted, "Decode completed", filePath, tmpDir, send) @@ -91,7 +92,7 @@ func (task *CascadeRegistrationTask) Download( } func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, actionID string, metadata actiontypes.CascadeMetadata, fields logtrace.Fields, send func(resp *DownloadResponse) error) (string, string, error) { - logtrace.Debug(ctx, "started downloading the artifacts", fields) + logtrace.Info(ctx, "started downloading the artifacts", fields) var ( layout codec.Layout @@ -101,15 +102,19 @@ func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, acti ) for _, indexID := range metadata.RqIdsIds { + iStart := time.Now() + logtrace.Info(ctx, "RPC Retrieve index file", logtrace.Fields{"index_id": indexID}) indexFile, err := task.P2PClient.Retrieve(ctx, indexID) if err != nil || len(indexFile) == 0 { + logtrace.Warn(ctx, "Retrieve index file failed or empty", logtrace.Fields{"index_id": indexID, logtrace.FieldError: fmt.Sprintf("%v", err)}) continue } + logtrace.Info(ctx, "Retrieve index file completed", logtrace.Fields{"index_id": indexID, "bytes": len(indexFile), "ms": time.Since(iStart).Milliseconds()}) // Parse index file to get layout IDs indexData, err := task.parseIndexFile(indexFile) if err != nil { - logtrace.Debug(ctx, "failed to parse index file", fields) + logtrace.Warn(ctx, "failed to parse index file", logtrace.Fields{"index_id": indexID, logtrace.FieldError: err.Error()}) continue } @@ -117,14 +122,14 @@ func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, acti var netMS, decMS int64 layout, netMS, decMS, layoutAttempts, err = task.retrieveLayoutFromIndex(ctx, indexData, fields) if err != nil { - logtrace.Debug(ctx, "failed to retrieve layout from index", fields) + logtrace.Warn(ctx, "failed to retrieve layout from index", logtrace.Fields{"index_id": indexID, logtrace.FieldError: err.Error(), "attempts": layoutAttempts}) continue } layoutFetchMS = netMS layoutDecodeMS = decMS if len(layout.Blocks) > 0 { - logtrace.Debug(ctx, "layout file retrieved via index", fields) + logtrace.Info(ctx, "layout file retrieved via index", logtrace.Fields{"index_id": indexID, "attempts": layoutAttempts, "net_ms": layoutFetchMS, "decode_ms": layoutDecodeMS}) break } } @@ -163,7 +168,7 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( if targetRequiredCount < 1 && totalSymbols > 0 { targetRequiredCount = 1 } - logtrace.Debug(ctx, "Retrieving target-required symbols for decode", fields) + logtrace.Info(ctx, "Retrieving target-required symbols for decode", logtrace.Fields{"total_symbols": totalSymbols, "target_required_percent": targetRequiredPercent, "target_required_count": targetRequiredCount}) if !task.config.MetricsDisabled { cm.StartRetrieveCapture(actionID) @@ -180,6 +185,8 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( if reqCount > totalSymbols { reqCount = totalSymbols } + rStart := time.Now() + logtrace.Info(ctx, "RPC BatchRetrieve symbols", logtrace.Fields{"action_id": actionID, "requested": reqCount, "total_candidates": totalSymbols}) symbols, err := task.P2PClient.BatchRetrieve(ctxRetrieve, allSymbols, reqCount, actionID) if err != nil { fields[logtrace.FieldError] = err.Error() @@ -187,9 +194,12 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( return "", "", fmt.Errorf("batch retrieve symbols: %w", err) } retrieveMS := time.Since(retrieveStart).Milliseconds() + logtrace.Info(ctx, "RPC BatchRetrieve completed", logtrace.Fields{"action_id": actionID, "received": len(symbols), "ms": time.Since(rStart).Milliseconds()}) // Measure decode duration decodeStart := time.Now() + dStart := time.Now() + logtrace.Info(ctx, "RQ Decode start", logtrace.Fields{"action_id": actionID}) decodeInfo, err := task.RQ.Decode(ctx, adaptors.DecodeRequest{ ActionID: actionID, Symbols: symbols, @@ -201,6 +211,7 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( return "", "", fmt.Errorf("decode symbols using RaptorQ: %w", err) } decodeMS := time.Since(decodeStart).Milliseconds() + logtrace.Info(ctx, "RQ Decode completed", logtrace.Fields{"action_id": actionID, "ms": time.Since(dStart).Milliseconds(), "tmp_dir": decodeInfo.DecodeTmpDir, "file_path": decodeInfo.FilePath}) // Set minimal retrieve summary and emit event strictly from internal collector if !task.config.MetricsDisabled { @@ -237,7 +248,17 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( fields[logtrace.FieldError] = err.Error() return "", decodeInfo.DecodeTmpDir, err } - logtrace.Debug(ctx, "File successfully restored and hash verified", fields) + // Log the state of the temporary decode directory + if decodeInfo.DecodeTmpDir != "" { + if set, derr := utils.ReadDirFilenames(decodeInfo.DecodeTmpDir); derr == nil { + if left := len(set); left > 0 { + logtrace.Info(ctx, "Decode tmp directory has files remaining", logtrace.Fields{"dir": decodeInfo.DecodeTmpDir, "left": left}) + } else { + logtrace.Info(ctx, "Decode tmp directory is empty", logtrace.Fields{"dir": decodeInfo.DecodeTmpDir}) + } + } + } + logtrace.Info(ctx, "File successfully restored and hash verified", fields) return decodeInfo.FilePath, decodeInfo.DecodeTmpDir, nil } @@ -279,20 +300,26 @@ func (task *CascadeRegistrationTask) retrieveLayoutFromIndex(ctx context.Context for _, layoutID := range indexData.LayoutIDs { attempts++ t0 := time.Now() + logtrace.Info(ctx, "RPC Retrieve layout file", logtrace.Fields{"layout_id": layoutID, "attempt": attempts}) layoutFile, err := task.P2PClient.Retrieve(ctx, layoutID) - totalFetchMS += time.Since(t0).Milliseconds() + took := time.Since(t0).Milliseconds() + totalFetchMS += took if err != nil || len(layoutFile) == 0 { + logtrace.Warn(ctx, "Retrieve layout file failed or empty", logtrace.Fields{"layout_id": layoutID, "attempt": attempts, "ms": took, logtrace.FieldError: fmt.Sprintf("%v", err)}) continue } t1 := time.Now() layout, _, _, err := parseRQMetadataFile(layoutFile) - totalDecodeMS += time.Since(t1).Milliseconds() + decMS := time.Since(t1).Milliseconds() + totalDecodeMS += decMS if err != nil { + logtrace.Warn(ctx, "Parse layout file failed", logtrace.Fields{"layout_id": layoutID, "attempt": attempts, "decode_ms": decMS, logtrace.FieldError: err.Error()}) continue } if len(layout.Blocks) > 0 { + logtrace.Info(ctx, "Layout file retrieved and parsed", logtrace.Fields{"layout_id": layoutID, "attempt": attempts, "net_ms": took, "decode_ms": decMS}) return layout, totalFetchMS, totalDecodeMS, attempts, nil } } @@ -306,9 +333,12 @@ func (task *CascadeRegistrationTask) CleanupDownload(ctx context.Context, action } // For now, we use actionID as the directory path to maintain compatibility + logtrace.Info(ctx, "Cleanup download directory", logtrace.Fields{"dir": actionID}) if err := os.RemoveAll(actionID); err != nil { + logtrace.Warn(ctx, "Cleanup download directory failed", logtrace.Fields{"dir": actionID, logtrace.FieldError: err.Error()}) return errors.Errorf("failed to delete download directory: %s, :%s", actionID, err.Error()) } + logtrace.Info(ctx, "Cleanup download directory completed", logtrace.Fields{"dir": actionID}) return nil } diff --git a/supernode/services/cascade/helper.go b/supernode/services/cascade/helper.go index b22ec14a..e6197b41 100644 --- a/supernode/services/cascade/helper.go +++ b/supernode/services/cascade/helper.go @@ -36,7 +36,7 @@ func (task *CascadeRegistrationTask) fetchAction(ctx context.Context, actionID s if res.GetAction().ActionID == "" { return nil, task.wrapErr(ctx, "action not found", errors.New(""), f) } - logtrace.Debug(ctx, "action has been retrieved", f) + logtrace.Info(ctx, "action has been retrieved", f) return res.GetAction(), nil } @@ -46,7 +46,7 @@ func (task *CascadeRegistrationTask) ensureIsTopSupernode(ctx context.Context, b if err != nil { return task.wrapErr(ctx, "failed to get top SNs", err, f) } - logtrace.Debug(ctx, "Fetched Top Supernodes", f) + logtrace.Info(ctx, "Fetched Top Supernodes", f) if !supernode.Exists(top.Supernodes, task.config.SupernodeAccountAddress) { // Build information about supernodes for better error context @@ -54,7 +54,7 @@ func (task *CascadeRegistrationTask) ensureIsTopSupernode(ctx context.Context, b for i, sn := range top.Supernodes { addresses[i] = sn.SupernodeAccount } - logtrace.Debug(ctx, "Supernode not in top list", logtrace.Fields{ + logtrace.Info(ctx, "Supernode not in top list", logtrace.Fields{ "currentAddress": task.config.SupernodeAccountAddress, "topSupernodes": addresses, }) @@ -78,7 +78,7 @@ func (task *CascadeRegistrationTask) verifyDataHash(ctx context.Context, dh []by if string(b64) != expected { return task.wrapErr(ctx, "data hash doesn't match", errors.New(""), f) } - logtrace.Debug(ctx, "request data-hash has been matched with the action data-hash", f) + logtrace.Info(ctx, "request data-hash has been matched with the action data-hash", f) return nil } @@ -110,7 +110,7 @@ func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context. if err := task.LumeraClient.Verify(ctx, creator, []byte(indexFileB64), creatorSigBytes); err != nil { return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify creator signature", err, f) } - logtrace.Debug(ctx, "creator signature successfully verified", f) + logtrace.Info(ctx, "creator signature successfully verified", f) // Decode index file to get the layout signature indexFile, err := decodeIndexFile(indexFileB64) @@ -132,7 +132,7 @@ func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context. if err := task.LumeraClient.Verify(ctx, creator, layoutB64, layoutSigBytes); err != nil { return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify layout signature", err, f) } - logtrace.Debug(ctx, "layout signature successfully verified", f) + logtrace.Info(ctx, "layout signature successfully verified", f) return encodedMeta, indexFile.LayoutSignature, nil } @@ -175,6 +175,20 @@ func (task *CascadeRegistrationTask) generateRQIDFiles(ctx context.Context, meta // storeArtefacts persists cascade artefacts (ID files + RaptorQ symbols) via the // P2P adaptor. P2P does not return metrics; cascade summarizes and emits them. func (task *CascadeRegistrationTask) storeArtefacts(ctx context.Context, actionID string, idFiles [][]byte, symbolsDir string, f logtrace.Fields) error { + if f == nil { + f = logtrace.Fields{} + } + lf := logtrace.Fields{ + logtrace.FieldActionID: actionID, + logtrace.FieldTaskID: task.ID(), + "id_files_count": len(idFiles), + "symbols_dir": symbolsDir, + } + for k, v := range f { + lf[k] = v + } + logtrace.Info(ctx, "storeArtefacts invoked", lf) + return task.P2P.StoreArtefacts(ctx, adaptors.StoreArtefactsRequest{ IDFiles: idFiles, SymbolsDir: symbolsDir, @@ -214,7 +228,7 @@ func (task *CascadeRegistrationTask) emitArtefactsStored( b, _ := json.MarshalIndent(payload, "", " ") msg := string(b) fields["metrics_json"] = msg - logtrace.Debug(ctx, "artefacts have been stored", fields) + logtrace.Info(ctx, "artefacts have been stored", fields) task.streamEvent(SupernodeEventTypeArtefactsStored, msg, "", send) // No central state to clear; adaptor returns calls inline } @@ -279,7 +293,7 @@ func (task *CascadeRegistrationTask) verifyActionFee(ctx context.Context, action requiredFee := sdk.NewCoin("ulume", math.NewInt(amount)) // Log the calculated fee - logtrace.Debug(ctx, "calculated required fee", logtrace.Fields{ + logtrace.Info(ctx, "calculated required fee", logtrace.Fields{ "fee": requiredFee.String(), "dataBytes": dataSize, }) @@ -377,6 +391,6 @@ func (task *CascadeRegistrationTask) VerifyDownloadSignature(ctx context.Context return task.wrapErr(ctx, "failed to verify download signature", err, fields) } - logtrace.Debug(ctx, "download signature successfully verified", fields) + logtrace.Info(ctx, "download signature successfully verified", fields) return nil } diff --git a/supernode/services/cascade/register.go b/supernode/services/cascade/register.go index 4b456734..8d6cfd07 100644 --- a/supernode/services/cascade/register.go +++ b/supernode/services/cascade/register.go @@ -46,7 +46,7 @@ func (task *CascadeRegistrationTask) Register( ) (err error) { fields := logtrace.Fields{logtrace.FieldMethod: "Register", logtrace.FieldRequest: req} - logtrace.Debug(ctx, "Cascade registration request received", fields) + logtrace.Info(ctx, "Cascade registration request received", fields) // Ensure task status and resources are finalized regardless of outcome defer func() { @@ -78,14 +78,14 @@ func (task *CascadeRegistrationTask) Register( fields[logtrace.FieldCreator] = action.Creator fields[logtrace.FieldStatus] = action.State fields[logtrace.FieldPrice] = action.Price - logtrace.Debug(ctx, "Action retrieved", fields) + logtrace.Info(ctx, "Action retrieved", fields) task.streamEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", send) /* 2. Verify action fee -------------------------------------------------------- */ if err := task.verifyActionFee(ctx, action, req.DataSize, fields); err != nil { return err } - logtrace.Debug(ctx, "Action fee verified", fields) + logtrace.Info(ctx, "Action fee verified", fields) task.streamEvent(SupernodeEventTypeActionFeeVerified, "Action fee verified", "", send) /* 3. Ensure this super-node is eligible -------------------------------------- */ @@ -93,7 +93,7 @@ func (task *CascadeRegistrationTask) Register( if err := task.ensureIsTopSupernode(ctx, uint64(action.BlockHeight), fields); err != nil { return err } - logtrace.Debug(ctx, "Top supernode eligibility confirmed", fields) + logtrace.Info(ctx, "Top supernode eligibility confirmed", fields) task.streamEvent(SupernodeEventTypeTopSupernodeCheckPassed, "Top supernode eligibility confirmed", "", send) /* 4. Decode cascade metadata -------------------------------------------------- */ @@ -101,14 +101,14 @@ func (task *CascadeRegistrationTask) Register( if err != nil { return err } - logtrace.Debug(ctx, "Cascade metadata decoded", fields) + logtrace.Info(ctx, "Cascade metadata decoded", fields) task.streamEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", send) /* 5. Verify data hash --------------------------------------------------------- */ if err := task.verifyDataHash(ctx, req.DataHash, cascadeMeta.DataHash, fields); err != nil { return err } - logtrace.Debug(ctx, "Data hash verified", fields) + logtrace.Info(ctx, "Data hash verified", fields) task.streamEvent(SupernodeEventTypeDataHashVerified, "Data hash verified", "", send) /* 6. Encode the raw data ------------------------------------------------------ */ @@ -116,7 +116,7 @@ func (task *CascadeRegistrationTask) Register( if err != nil { return err } - logtrace.Debug(ctx, "Input encoded", fields) + logtrace.Info(ctx, "Input encoded", fields) task.streamEvent(SupernodeEventTypeInputEncoded, "Input encoded", "", send) /* 7. Signature verification + layout decode ---------------------------------- */ @@ -126,7 +126,7 @@ func (task *CascadeRegistrationTask) Register( if err != nil { return err } - logtrace.Debug(ctx, "Signature verified", fields) + logtrace.Info(ctx, "Signature verified", fields) task.streamEvent(SupernodeEventTypeSignatureVerified, "Signature verified", "", send) /* 8. Generate RQ-ID files ----------------------------------------------------- */ @@ -134,14 +134,14 @@ func (task *CascadeRegistrationTask) Register( if err != nil { return err } - logtrace.Debug(ctx, "RQID files generated", fields) + logtrace.Info(ctx, "RQID files generated", fields) task.streamEvent(SupernodeEventTypeRQIDsGenerated, "RQID files generated", "", send) /* 9. Consistency checks ------------------------------------------------------- */ if err := verifyIDs(layout, encResp.Metadata); err != nil { return task.wrapErr(ctx, "failed to verify IDs", err, fields) } - logtrace.Debug(ctx, "RQIDs verified", fields) + logtrace.Info(ctx, "RQIDs verified", fields) task.streamEvent(SupernodeEventTypeRqIDsVerified, "RQIDs verified", "", send) /* 10. Simulate finalize to avoid storing artefacts if it would fail ---------- */ @@ -152,7 +152,7 @@ func (task *CascadeRegistrationTask) Register( task.streamEvent(SupernodeEventTypeFinalizeSimulationFailed, "Finalize simulation failed", "", send) return task.wrapErr(ctx, "finalize action simulation failed", err, fields) } - logtrace.Debug(ctx, "Finalize simulation passed", fields) + logtrace.Info(ctx, "Finalize simulation passed", fields) // Transmit as a standard event so SDK can propagate it (dedicated type) task.streamEvent(SupernodeEventTypeFinalizeSimulated, "Finalize simulation passed", "", send) @@ -175,7 +175,7 @@ func (task *CascadeRegistrationTask) Register( } txHash := resp.TxResponse.TxHash fields[logtrace.FieldTxHash] = txHash - logtrace.Debug(ctx, "Action finalized", fields) + logtrace.Info(ctx, "Action finalized", fields) task.streamEvent(SupernodeEventTypeActionFinalized, "Action finalized", txHash, send) return nil From 83462830086c75775e80d523f1f3b1411ca78e7e Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Tue, 30 Sep 2025 08:58:25 +0500 Subject: [PATCH 13/27] Add datadog logs --- .github/workflows/build&release.yml | 7 +- Makefile | 4 +- pkg/logtrace/datadog.go | 204 ++++++++++++++++++++++++++++ pkg/logtrace/log.go | 6 + supernode/cmd/start.go | 8 ++ 5 files changed, 227 insertions(+), 2 deletions(-) create mode 100644 pkg/logtrace/datadog.go diff --git a/.github/workflows/build&release.yml b/.github/workflows/build&release.yml index ead3e013..3dbf21bf 100644 --- a/.github/workflows/build&release.yml +++ b/.github/workflows/build&release.yml @@ -82,6 +82,9 @@ jobs: echo "binary_name=supernode-linux-amd64" >> $GITHUB_OUTPUT - name: Build Release Version + env: + DD_API_KEY: ${{ secrets.DD_API_KEY }} + DD_SITE: ${{ secrets.DD_SITE }} run: | mkdir -p release @@ -94,7 +97,9 @@ jobs: -ldflags="-s -w \ -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.Version=${{ steps.vars.outputs.version }} \ -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.GitCommit=${{ steps.vars.outputs.git_commit }} \ - -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.BuildTime=${{ steps.vars.outputs.build_time }}" \ + -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.BuildTime=${{ steps.vars.outputs.build_time }} \ + -X github.com/LumeraProtocol/supernode/v2/pkg/logtrace.DDAPIKey=${DD_API_KEY} \ + -X github.com/LumeraProtocol/supernode/v2/pkg/logtrace.DDSite=${DD_SITE}" \ -o release/supernode \ ./supernode diff --git a/Makefile b/Makefile index 01272fbf..81773d3b 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,9 @@ BUILD_TIME ?= $(shell date -u '+%Y-%m-%d_%H:%M:%S') # Linker flags for version information LDFLAGS = -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.Version=$(VERSION) \ -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.GitCommit=$(GIT_COMMIT) \ - -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.BuildTime=$(BUILD_TIME) + -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.BuildTime=$(BUILD_TIME) \ + -X github.com/LumeraProtocol/supernode/v2/pkg/logtrace.DDAPIKey=$(DD_API_KEY) \ + -X github.com/LumeraProtocol/supernode/v2/pkg/logtrace.DDSite=$(DD_SITE) # Linker flags for sn-manager SN_MANAGER_LDFLAGS = -X main.Version=$(VERSION) \ diff --git a/pkg/logtrace/datadog.go b/pkg/logtrace/datadog.go new file mode 100644 index 00000000..95830836 --- /dev/null +++ b/pkg/logtrace/datadog.go @@ -0,0 +1,204 @@ +package logtrace + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/json" + "net/http" + "os" + "strings" + "sync" + "time" + + "go.uber.org/zap/zapcore" +) + +// Minimal Datadog Logs Forwarder (hard-coded config) kept separate for cleanliness. + +type ddCfg struct { + APIKey string + Site string // e.g. "datadoghq.com", "datadoghq.eu" + Service string // e.g. used as Datadog 'service'; we will set to node IP + Host string // optional; defaults to machine hostname +} + +var ( + ddOnce sync.Once + ddConfig ddCfg + ddClient = &http.Client{Timeout: 5 * time.Second} + ddQueue chan map[string]any + // Optional build-time injection via -ldflags + // -ldflags "-X github.com/LumeraProtocol/supernode/v2/pkg/logtrace.DDAPIKey=... -X github.com/LumeraProtocol/supernode/v2/pkg/logtrace.DDSite=us5.datadoghq.com" + DDAPIKey string + DDSite string +) + +// SetupDatadog initializes the Datadog forwarding once. +func SetupDatadog(service string) { + ddOnce.Do(func() { + initDatadog(service) + }) +} + +// ForwardDatadog enqueues a log line for forwarding (non-blocking). +func ForwardDatadog(level zapcore.Level, ctx context.Context, msg string, fields Fields) { + ddForward(level, ctx, msg, fields) +} + +// SetDatadogService allows setting the Datadog service (e.g., to the node IP) +func SetDatadogService(service string) { + if s := strings.TrimSpace(service); s != "" { + ddConfig.Service = s + } +} + +// SetDatadogHost sets the Datadog host field (use the supernode identity) +func SetDatadogHost(host string) { + if h := strings.TrimSpace(host); h != "" { + ddConfig.Host = h + } +} + +func initDatadog(service string) { + // Base defaults (site default chosen based on earlier validation) + ddConfig = ddCfg{Site: "us5.datadoghq.com", Service: service, Host: ""} + + // Resolve from env and build flags + apiKey := strings.TrimSpace(os.Getenv("DD_API_KEY")) + if apiKey == "" { + apiKey = strings.TrimSpace(DDAPIKey) + } + + site := strings.TrimSpace(os.Getenv("DD_SITE")) + if site == "" { + site = strings.TrimSpace(DDSite) + if site == "" { + site = ddConfig.Site + } + } + + ddConfig.APIKey = apiKey + ddConfig.Site = site + + // Only enable forwarding when a real key is present + if ddConfig.APIKey == "" { + return + } + + ddQueue = make(chan map[string]any, 256) + go ddLoop() +} + +// ddForward enqueues a single log entry for Datadog intake. +func ddForward(level zapcore.Level, ctx context.Context, msg string, fields Fields) { + if ddQueue == nil { + return + } + + // Map zap level to Datadog status + status := "info" + switch level { + case zapcore.DebugLevel: + status = "debug" + case zapcore.InfoLevel: + status = "info" + case zapcore.WarnLevel: + status = "warn" + case zapcore.ErrorLevel: + status = "error" + case zapcore.FatalLevel: + status = "critical" + } + + // Build a compact attributes map + attrs := map[string]any{} + for k, v := range fields { + attrs[k] = v + } + // Attach correlation ID if present + if cid := extractCorrelationID(ctx); cid != "unknown" { + attrs["correlation_id"] = cid + } + + entry := map[string]any{ + "message": msg, + "status": status, + "service": ddConfig.Service, + "host": ddConfig.Host, + "attributes": attrs, // avoid collisions with top-level fields + } + + select { + case ddQueue <- entry: + default: + // drop if queue is full to avoid blocking critical paths + } +} + +// ddLoop batches log entries and sends to Datadog intake. +func ddLoop() { + ticker := time.NewTicker(2 * time.Second) + defer ticker.Stop() + + batch := make([]map[string]any, 0, 32) + flush := func() { + if len(batch) == 0 { + return + } + // Marshal batch + buf := &bytes.Buffer{} + if err := json.NewEncoder(buf).Encode(batch); err != nil { + batch = batch[:0] + return + } + _ = ddPost(buf.Bytes()) + batch = batch[:0] + } + + for { + select { + case e, ok := <-ddQueue: + if !ok { + flush() + return + } + batch = append(batch, e) + if len(batch) >= 32 { + flush() + } + case <-ticker.C: + flush() + } + } +} + +func ddPost(payload []byte) error { + url := "https://http-intake.logs." + strings.TrimSpace(ddConfig.Site) + "/api/v2/logs" + + // gzip the JSON payload + var gzBuf bytes.Buffer + gw := gzip.NewWriter(&gzBuf) + if _, err := gw.Write(payload); err == nil { + _ = gw.Close() + } else { + _ = gw.Close() + gzBuf = *bytes.NewBuffer(payload) + } + + req, err := http.NewRequest(http.MethodPost, url, &gzBuf) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json") + req.Header.Set("Content-Encoding", "gzip") + req.Header.Set("DD-API-KEY", ddConfig.APIKey) + + resp, err := ddClient.Do(req) + if err != nil { + return err + } + _ = resp.Body.Close() + return nil +} diff --git a/pkg/logtrace/log.go b/pkg/logtrace/log.go index 02b8f36e..d1bcf169 100644 --- a/pkg/logtrace/log.go +++ b/pkg/logtrace/log.go @@ -45,6 +45,9 @@ func Setup(serviceName string) { if err != nil { panic(err) } + + // Initialize Datadog forwarding (minimal integration in separate file) + SetupDatadog(serviceName) } // getLogLevel returns the log level from environment variable LOG_LEVEL @@ -129,6 +132,9 @@ func logWithLevel(level zapcore.Level, ctx context.Context, message string, fiel case zapcore.FatalLevel: logger.Fatal(message, zapFields...) } + + // Forward to Datadog (non-blocking, best-effort) + ForwardDatadog(level, ctx, message, fields) } // Error logs an error message with structured fields diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index 952113d3..4529aec2 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -78,6 +78,14 @@ The supernode will connect to the Lumera network and begin participating in the logtrace.Debug(ctx, "Configuration verification successful", logtrace.Fields{}) + // Set Datadog host to identity and service to latest IP address from chain + logtrace.SetDatadogHost(appConfig.SupernodeConfig.Identity) + if snInfo, err := lumeraClient.SuperNode().GetSupernodeWithLatestAddress(ctx, appConfig.SupernodeConfig.Identity); err == nil && snInfo != nil { + if ip := strings.TrimSpace(snInfo.LatestAddress); ip != "" { + logtrace.SetDatadogService(ip) + } + } + // Initialize RaptorQ store for Cascade processing rqStore, err := initRQStore(ctx, appConfig) if err != nil { From 0e35cf63852d7fc64d39396dc8dcf1024e859ba6 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Tue, 30 Sep 2025 12:48:29 +0500 Subject: [PATCH 14/27] TxResponse --- pkg/lumera/modules/tx/impl.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/lumera/modules/tx/impl.go b/pkg/lumera/modules/tx/impl.go index bcdb694d..2f80ac58 100644 --- a/pkg/lumera/modules/tx/impl.go +++ b/pkg/lumera/modules/tx/impl.go @@ -310,7 +310,9 @@ func (m *module) ProcessTransaction(ctx context.Context, msgs []types.Msg, accou } if len(result.TxResponse.Events) == 0 { - logtrace.Error(ctx, "Failed to retrieve transaction events after 5 attempts", nil) + logtrace.Error(ctx, "Failed to retrieve transaction events after 5 attempts", logtrace.Fields{ + "response": result.TxResponse.String, + }) } return result, nil From 3d8386a48b4d69e7ab794e9c1c801f8599eb79f2 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Tue, 30 Sep 2025 16:16:44 +0500 Subject: [PATCH 15/27] skip check for public field --- go.mod | 4 +- go.sum | 4 +- sdk/README.md | 4 +- .../server/cascade/cascade_action_server.go | 20 +---- supernode/services/cascade/download.go | 74 +++++++++++++++---- 5 files changed, 72 insertions(+), 34 deletions(-) diff --git a/go.mod b/go.mod index 46091df2..a581736e 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ replace ( require ( cosmossdk.io/math v1.5.3 github.com/AlecAivazis/survey/v2 v2.3.7 - github.com/LumeraProtocol/lumera v1.7.0 + github.com/LumeraProtocol/lumera v1.7.2 github.com/LumeraProtocol/rq-go v0.2.1 github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce github.com/cenkalti/backoff/v4 v4.3.0 @@ -34,7 +34,6 @@ require ( github.com/pkg/errors v0.9.1 github.com/shirou/gopsutil/v3 v3.24.5 github.com/spf13/cobra v1.8.1 - github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.10.0 go.uber.org/mock v0.5.2 go.uber.org/ratelimit v0.3.1 @@ -166,6 +165,7 @@ require ( github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.7.1 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/viper v1.19.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect diff --git a/go.sum b/go.sum index d8170371..839f29a2 100644 --- a/go.sum +++ b/go.sum @@ -63,8 +63,8 @@ github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3 github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/LumeraProtocol/lumera v1.7.0 h1:F5zgRBnCtgGfdMB6jz01PFWIzbS8VjQfCu1H9OYt3BU= -github.com/LumeraProtocol/lumera v1.7.0/go.mod h1:c1M+sjewuCvxw+pznwlspUzenDJI8Y+suKB3RFKS2Wo= +github.com/LumeraProtocol/lumera v1.7.2 h1:qA0qwEOfCqW6yY232/MEK6gfLYq4HVYSmbcOCOZqEoc= +github.com/LumeraProtocol/lumera v1.7.2/go.mod h1:c1M+sjewuCvxw+pznwlspUzenDJI8Y+suKB3RFKS2Wo= github.com/LumeraProtocol/rq-go v0.2.1 h1:8B3UzRChLsGMmvZ+UVbJsJj6JZzL9P9iYxbdUwGsQI4= github.com/LumeraProtocol/rq-go v0.2.1/go.mod h1:APnKCZRh1Es2Vtrd2w4kCLgAyaL5Bqrkz/BURoRJ+O8= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= diff --git a/sdk/README.md b/sdk/README.md index b0aecb20..cf2501cf 100644 --- a/sdk/README.md +++ b/sdk/README.md @@ -221,11 +221,13 @@ if err != nil { // taskID can be used to track the download progress ``` +Note: If the action's cascade metadata sets `public: true`, the signature may be left empty to allow anonymous download. + **Parameters:** - `ctx context.Context`: Context for the operation - `actionID string`: ID of the action to download - `outputDir string`: Directory where the downloaded file will be saved -- `signature string`: Base64-encoded signature for download authorization +- `signature string`: Base64-encoded signature for download authorization (leave empty for public cascades) **Signature Creation for Download:** The download signature is created by combining the action ID with the creator's address, signing it, and base64 encoding the result. diff --git a/supernode/node/action/server/cascade/cascade_action_server.go b/supernode/node/action/server/cascade/cascade_action_server.go index 180ebd3d..6a38b750 100644 --- a/supernode/node/action/server/cascade/cascade_action_server.go +++ b/supernode/node/action/server/cascade/cascade_action_server.go @@ -229,21 +229,8 @@ func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeS task := server.factory.NewCascadeRegistrationTask() - // Verify signature if provided - if req.GetSignature() != "" { - // Cast to concrete type to access helper method - if cascadeTask, ok := task.(*cascadeService.CascadeRegistrationTask); ok { - err := cascadeTask.VerifyDownloadSignature(ctx, req.GetActionId(), req.GetSignature()) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "signature verification failed", fields) - return fmt.Errorf("signature verification failed: %w", err) - } - } else { - logtrace.Error(ctx, "unable to cast task to CascadeRegistrationTask", fields) - return fmt.Errorf("unable to verify signature: task type assertion failed") - } - } + // Authorization is enforced inside the task based on metadata.Public. + // If public, signature is skipped; if private, signature is required. var restoredFilePath string var tmpDir string @@ -260,7 +247,8 @@ func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeS }() err := task.Download(ctx, &cascadeService.DownloadRequest{ - ActionID: req.GetActionId(), + ActionID: req.GetActionId(), + Signature: req.GetSignature(), }, func(resp *cascadeService.DownloadResponse) error { grpcResp := &pb.DownloadResponse{ ResponseType: &pb.DownloadResponse_Event{ diff --git a/supernode/services/cascade/download.go b/supernode/services/cascade/download.go index e0e77a6b..ede254a4 100644 --- a/supernode/services/cascade/download.go +++ b/supernode/services/cascade/download.go @@ -24,6 +24,9 @@ const targetRequiredPercent = 17 type DownloadRequest struct { ActionID string + // Signature is required for private downloads. For public cascade + // actions (metadata.Public == true), this is ignored. + Signature string } type DownloadResponse struct { @@ -33,6 +36,12 @@ type DownloadResponse struct { DownloadedDir string } +// Download retrieves a cascade artefact by action ID. +// +// Authorization behavior: +// - If the cascade metadata has Public = true, signature verification is skipped +// and the file is downloadable by anyone. +// - If Public = false, a valid download signature is required. func (task *CascadeRegistrationTask) Download( ctx context.Context, req *DownloadRequest, @@ -53,17 +62,19 @@ func (task *CascadeRegistrationTask) Download( actionDetails, err := task.LumeraClient.GetAction(ctx, req.ActionID) if err != nil { - fields[logtrace.FieldError] = err + // Ensure error is logged as string for consistency + fields[logtrace.FieldError] = err.Error() return task.wrapErr(ctx, "failed to get action", err, fields) } logtrace.Info(ctx, "Action retrieved", fields) task.streamDownloadEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", "", send) if actionDetails.GetAction().State != actiontypes.ActionStateDone { + // Return a clearer error message when action is not yet finalized err = errors.New("action is not in a valid state") fields[logtrace.FieldError] = "action state is not done yet" fields[logtrace.FieldActionState] = actionDetails.GetAction().State - return task.wrapErr(ctx, "action not found", err, fields) + return task.wrapErr(ctx, "action not finalized yet", err, fields) } logtrace.Info(ctx, "Action state validated", fields) @@ -75,6 +86,24 @@ func (task *CascadeRegistrationTask) Download( logtrace.Info(ctx, "Cascade metadata decoded", fields) task.streamDownloadEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", "", send) + // Enforce download authorization based on metadata.Public + // - If public: skip signature verification; allow anonymous downloads + // - If private: require a valid signature + if !metadata.Public { + if req.Signature == "" { + fields[logtrace.FieldError] = "missing signature for private download" + // Provide a descriptive message without a fabricated root error + return task.wrapErr(ctx, "private cascade requires a download signature", nil, fields) + } + if err := task.VerifyDownloadSignature(ctx, req.ActionID, req.Signature); err != nil { + fields[logtrace.FieldError] = err.Error() + return task.wrapErr(ctx, "failed to verify download signature", err, fields) + } + logtrace.Info(ctx, "Download signature verified for private cascade", fields) + } else { + logtrace.Info(ctx, "Public cascade: skipping download signature verification", fields) + } + // Notify: network retrieval phase begins task.streamDownloadEvent(SupernodeEventTypeNetworkRetrieveStarted, "Network retrieval started", "", "", send) @@ -82,6 +111,12 @@ func (task *CascadeRegistrationTask) Download( filePath, tmpDir, err := task.downloadArtifacts(ctx, actionDetails.GetAction().ActionID, metadata, fields, send) if err != nil { fields[logtrace.FieldError] = err.Error() + // Ensure temporary decode directory is cleaned if decode failed after being created + if tmpDir != "" { + if cerr := task.CleanupDownload(ctx, tmpDir); cerr != nil { + logtrace.Warn(ctx, "cleanup of tmp dir after error failed", logtrace.Fields{"tmp_dir": tmpDir, logtrace.FieldError: cerr.Error()}) + } + } return task.wrapErr(ctx, "failed to download artifacts", err, fields) } logtrace.Info(ctx, "File reconstructed and hash verified", fields) @@ -144,6 +179,10 @@ func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, acti return task.restoreFileFromLayout(ctx, layout, metadata.DataHash, actionID, send) } +// restoreFileFromLayout reconstructs the original file from the provided layout +// and a subset of retrieved symbols. The method deduplicates symbol identifiers +// before network retrieval to avoid redundant requests and ensure the requested +// count reflects unique symbols only. func (task *CascadeRegistrationTask) restoreFileFromLayout( ctx context.Context, layout codec.Layout, @@ -155,9 +194,16 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( fields := logtrace.Fields{ logtrace.FieldActionID: actionID, } - var allSymbols []string + // Deduplicate symbols across blocks to avoid redundant requests + symSet := make(map[string]struct{}) for _, block := range layout.Blocks { - allSymbols = append(allSymbols, block.Symbols...) + for _, s := range block.Symbols { + symSet[s] = struct{}{} + } + } + allSymbols := make([]string, 0, len(symSet)) + for s := range symSet { + allSymbols = append(allSymbols, s) } sort.Strings(allSymbols) @@ -327,18 +373,20 @@ func (task *CascadeRegistrationTask) retrieveLayoutFromIndex(ctx context.Context return codec.Layout{}, totalFetchMS, totalDecodeMS, attempts, errors.New("no valid layout found in index") } -func (task *CascadeRegistrationTask) CleanupDownload(ctx context.Context, actionID string) error { - if actionID == "" { - return errors.New("actionID is empty") +// CleanupDownload removes the temporary directory created during decode. +// The parameter is a directory path (not an action ID). +func (task *CascadeRegistrationTask) CleanupDownload(ctx context.Context, dirPath string) error { + if dirPath == "" { + return errors.New("directory path is empty") } - // For now, we use actionID as the directory path to maintain compatibility - logtrace.Info(ctx, "Cleanup download directory", logtrace.Fields{"dir": actionID}) - if err := os.RemoveAll(actionID); err != nil { - logtrace.Warn(ctx, "Cleanup download directory failed", logtrace.Fields{"dir": actionID, logtrace.FieldError: err.Error()}) - return errors.Errorf("failed to delete download directory: %s, :%s", actionID, err.Error()) + // For now, we use tmp directory path as provided by decoder + logtrace.Info(ctx, "Cleanup download directory", logtrace.Fields{"dir": dirPath}) + if err := os.RemoveAll(dirPath); err != nil { + logtrace.Warn(ctx, "Cleanup download directory failed", logtrace.Fields{"dir": dirPath, logtrace.FieldError: err.Error()}) + return errors.Errorf("failed to delete download directory: %s, :%s", dirPath, err.Error()) } - logtrace.Info(ctx, "Cleanup download directory completed", logtrace.Fields{"dir": actionID}) + logtrace.Info(ctx, "Cleanup download directory completed", logtrace.Fields{"dir": dirPath}) return nil } From d32a5c71fd43f0acff3f50c3f3cb8be4d98f4eff Mon Sep 17 00:00:00 2001 From: Matee ullah Malik <46045452+mateeullahmalik@users.noreply.github.com> Date: Wed, 1 Oct 2025 00:24:42 +0500 Subject: [PATCH 16/27] peers+balance (#196) --- pkg/lumera/client.go | 26 +++++++++++++++ pkg/lumera/interface.go | 2 ++ pkg/lumera/lumera_mock.go | 15 +++++++++ pkg/lumera/modules/bank/impl.go | 30 +++++++++++++++++ pkg/lumera/modules/bank/interface.go | 18 ++++++++++ pkg/testutil/lumera.go | 24 ++++++++++++-- sdk/task/helpers.go | 47 +------------------------- sdk/task/manager.go | 12 ++----- sdk/task/task.go | 49 +++++++++++++++++++++++++--- 9 files changed, 160 insertions(+), 63 deletions(-) create mode 100644 pkg/lumera/modules/bank/impl.go create mode 100644 pkg/lumera/modules/bank/interface.go diff --git a/pkg/lumera/client.go b/pkg/lumera/client.go index bac35d68..2e25877c 100644 --- a/pkg/lumera/client.go +++ b/pkg/lumera/client.go @@ -2,10 +2,12 @@ package lumera import ( "context" + "fmt" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action_msg" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/auth" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/bank" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/node" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx" @@ -16,6 +18,7 @@ type lumeraClient struct { authMod auth.Module actionMod action.Module actionMsgMod action_msg.Module + bankMod bank.Module supernodeMod supernode.Module txMod tx.Module nodeMod node.Module @@ -53,12 +56,30 @@ func newClient(ctx context.Context, cfg *Config) (Client, error) { return nil, err } + bankModule, err := bank.NewModule(conn.GetConn()) + if err != nil { + conn.Close() + return nil, err + } + nodeModule, err := node.NewModule(conn.GetConn(), cfg.keyring) if err != nil { conn.Close() return nil, err } + // Preflight: verify configured ChainID matches node's reported network + if nodeInfo, nerr := nodeModule.GetNodeInfo(ctx); nerr != nil { + conn.Close() + return nil, fmt.Errorf("failed to get node info for chain verification: %w", nerr) + } else if nodeInfo != nil && nodeInfo.DefaultNodeInfo != nil { + // Cosmos SDK exposes chain-id in DefaultNodeInfo.Network + if reported := nodeInfo.DefaultNodeInfo.Network; reported != "" && reported != cfg.ChainID { + conn.Close() + return nil, fmt.Errorf("chain ID mismatch: configured=%s node=%s", cfg.ChainID, reported) + } + } + actionMsgModule, err := action_msg.NewModule( conn.GetConn(), authModule, // For account info @@ -77,6 +98,7 @@ func newClient(ctx context.Context, cfg *Config) (Client, error) { authMod: authModule, actionMod: actionModule, actionMsgMod: actionMsgModule, + bankMod: bankModule, supernodeMod: supernodeModule, txMod: txModule, nodeMod: nodeModule, @@ -96,6 +118,10 @@ func (c *lumeraClient) ActionMsg() action_msg.Module { return c.actionMsgMod } +func (c *lumeraClient) Bank() bank.Module { + return c.bankMod +} + func (c *lumeraClient) SuperNode() supernode.Module { return c.supernodeMod } diff --git a/pkg/lumera/interface.go b/pkg/lumera/interface.go index eba47684..2fb25c13 100644 --- a/pkg/lumera/interface.go +++ b/pkg/lumera/interface.go @@ -7,6 +7,7 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action_msg" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/auth" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/bank" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/node" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx" @@ -18,6 +19,7 @@ type Client interface { Action() action.Module ActionMsg() action_msg.Module SuperNode() supernode.Module + Bank() bank.Module Tx() tx.Module Node() node.Module diff --git a/pkg/lumera/lumera_mock.go b/pkg/lumera/lumera_mock.go index 25d30789..e19ddfdb 100644 --- a/pkg/lumera/lumera_mock.go +++ b/pkg/lumera/lumera_mock.go @@ -15,6 +15,7 @@ import ( action "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action" action_msg "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action_msg" auth "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/auth" + bank "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/bank" node "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/node" supernode "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" tx "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx" @@ -87,6 +88,20 @@ func (mr *MockClientMockRecorder) Auth() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Auth", reflect.TypeOf((*MockClient)(nil).Auth)) } +// Bank mocks base method. +func (m *MockClient) Bank() bank.Module { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Bank") + ret0, _ := ret[0].(bank.Module) + return ret0 +} + +// Bank indicates an expected call of Bank. +func (mr *MockClientMockRecorder) Bank() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bank", reflect.TypeOf((*MockClient)(nil).Bank)) +} + // Close mocks base method. func (m *MockClient) Close() error { m.ctrl.T.Helper() diff --git a/pkg/lumera/modules/bank/impl.go b/pkg/lumera/modules/bank/impl.go new file mode 100644 index 00000000..157eb97f --- /dev/null +++ b/pkg/lumera/modules/bank/impl.go @@ -0,0 +1,30 @@ +package bank + +import ( + "context" + "fmt" + + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + "google.golang.org/grpc" +) + +type module struct { + client banktypes.QueryClient +} + +func newModule(conn *grpc.ClientConn) (Module, error) { + if conn == nil { + return nil, fmt.Errorf("connection cannot be nil") + } + return &module{client: banktypes.NewQueryClient(conn)}, nil +} + +func (m *module) Balance(ctx context.Context, address string, denom string) (*banktypes.QueryBalanceResponse, error) { + if address == "" { + return nil, fmt.Errorf("address cannot be empty") + } + if denom == "" { + return nil, fmt.Errorf("denom cannot be empty") + } + return m.client.Balance(ctx, &banktypes.QueryBalanceRequest{Address: address, Denom: denom}) +} diff --git a/pkg/lumera/modules/bank/interface.go b/pkg/lumera/modules/bank/interface.go new file mode 100644 index 00000000..b88093cf --- /dev/null +++ b/pkg/lumera/modules/bank/interface.go @@ -0,0 +1,18 @@ +//go:generate mockgen -destination=bank_mock.go -package=bank -source=interface.go +package bank + +import ( + "context" + + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + "google.golang.org/grpc" +) + +// Module provides access to Cosmos SDK bank queries. +type Module interface { + // Balance returns the balance for a specific denom at an address. + Balance(ctx context.Context, address string, denom string) (*banktypes.QueryBalanceResponse, error) +} + +// NewModule constructs a bank Module backed by the given gRPC connection. +func NewModule(conn *grpc.ClientConn) (Module, error) { return newModule(conn) } diff --git a/pkg/testutil/lumera.go b/pkg/testutil/lumera.go index 3f556a97..a4d09814 100644 --- a/pkg/testutil/lumera.go +++ b/pkg/testutil/lumera.go @@ -9,15 +9,18 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action_msg" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/auth" + bankmod "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/bank" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/node" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx" + sdkmath "cosmossdk.io/math" cmtservice "github.com/cosmos/cosmos-sdk/client/grpc/cmtservice" "github.com/cosmos/cosmos-sdk/crypto/keyring" sdktypes "github.com/cosmos/cosmos-sdk/types" sdktx "github.com/cosmos/cosmos-sdk/types/tx" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" ) // MockLumeraClient implements the lumera.Client interface for testing purposes @@ -25,6 +28,7 @@ type MockLumeraClient struct { authMod *MockAuthModule actionMod *MockActionModule actionMsgMod *MockActionMsgModule + bankMod *MockBankModule supernodeMod *MockSupernodeModule txMod *MockTxModule nodeMod *MockNodeModule @@ -36,6 +40,7 @@ type MockLumeraClient struct { func NewMockLumeraClient(kr keyring.Keyring, addresses []string) (lumera.Client, error) { actionMod := &MockActionModule{} actionMsgMod := &MockActionMsgModule{} + bankMod := &MockBankModule{} supernodeMod := &MockSupernodeModule{addresses: addresses} txMod := &MockTxModule{} nodeMod := &MockNodeModule{} @@ -44,6 +49,7 @@ func NewMockLumeraClient(kr keyring.Keyring, addresses []string) (lumera.Client, authMod: &MockAuthModule{}, actionMod: actionMod, actionMsgMod: actionMsgMod, + bankMod: bankMod, supernodeMod: supernodeMod, txMod: txMod, nodeMod: nodeMod, @@ -67,6 +73,11 @@ func (c *MockLumeraClient) ActionMsg() action_msg.Module { return c.actionMsgMod } +// Bank returns the Bank module client +func (c *MockLumeraClient) Bank() bankmod.Module { + return c.bankMod +} + // SuperNode returns the SuperNode module client func (c *MockLumeraClient) SuperNode() supernode.Module { return c.supernodeMod @@ -87,6 +98,15 @@ func (c *MockLumeraClient) Close() error { return nil } +// MockBankModule implements the bank.Module interface for testing +type MockBankModule struct{} + +// Balance returns a positive balance for any address/denom to pass checks by default +func (m *MockBankModule) Balance(ctx context.Context, address string, denom string) (*banktypes.QueryBalanceResponse, error) { + // Return >= 1 LUME in micro units to satisfy threshold checks + return &banktypes.QueryBalanceResponse{Balance: &sdktypes.Coin{Denom: denom, Amount: sdkmath.NewInt(1_000_000)}}, nil +} + // MockAuthModule implements the auth.Module interface for testing type MockAuthModule struct{} @@ -124,8 +144,8 @@ type MockActionMsgModule struct{} // RequestAction mocks the behavior of requesting an action. func (m *MockActionMsgModule) RequestAction(ctx context.Context, actionType, metadata, price, expirationTime string) (*sdktx.BroadcastTxResponse, error) { - // Mock implementation returns success with empty result - return &sdktx.BroadcastTxResponse{}, nil + // Mock implementation returns success with empty result + return &sdktx.BroadcastTxResponse{}, nil } // FinalizeCascadeAction implements the required method from action_msg.Module interface diff --git a/sdk/task/helpers.go b/sdk/task/helpers.go index f887aeb2..2ea8bcaa 100644 --- a/sdk/task/helpers.go +++ b/sdk/task/helpers.go @@ -3,21 +3,16 @@ package task import ( "context" "encoding/base64" - "errors" "fmt" "os" "path/filepath" "strings" "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" - snsvc "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" - "github.com/LumeraProtocol/supernode/v2/sdk/net" ) const maxFileSize = 1 * 1024 * 1024 * 1024 // 1GB limit -var ErrNoPeersConnected = errors.New("no P2P peers connected on available supernodes") - // ValidateFileSize checks if a file size is within the allowed 1GB limit func ValidateFileSize(filePath string) error { fileInfo, err := os.Stat(filePath) @@ -105,47 +100,7 @@ func (m *ManagerImpl) validateSignature(ctx context.Context, action lumera.Actio return nil } -// checkSupernodesPeerConnectivity verifies that at least one supernode has P2P peers connected -func (m *ManagerImpl) checkSupernodesPeerConnectivity(ctx context.Context, blockHeight int64) error { - // Fetch supernodes for the action's block height - supernodes, err := m.lumeraClient.GetSupernodes(ctx, blockHeight) - if err != nil { - return fmt.Errorf("failed to get supernodes: %w", err) - } - - if len(supernodes) == 0 { - return fmt.Errorf("no supernodes available for block height %d", blockHeight) - } - - // Check each supernode for peer connectivity - factoryCfg := net.FactoryConfig{ - LocalCosmosAddress: m.config.Account.LocalCosmosAddress, - PeerType: m.config.Account.PeerType, - } - clientFactory := net.NewClientFactory(ctx, m.logger, m.keyring, m.lumeraClient, factoryCfg) - - for _, sn := range supernodes { - client, err := clientFactory.CreateClient(ctx, sn) - if err != nil { - continue // Skip this supernode if we can't connect - } - - // Request peer info and P2P metrics to assess connectivity - ctxWithMetrics := snsvc.WithIncludeP2PMetrics(ctx) - status, err := client.GetSupernodeStatus(ctxWithMetrics) - client.Close(ctx) - if err != nil { - continue // Skip this supernode if we can't get status - } - - // Check if this supernode has peers - if status.Network.PeersCount > 1 { - return nil // Found at least one supernode with peers - } - } - - return ErrNoPeersConnected -} +// (Removed) Peers connectivity preflight is now enforced during discovery in isServing. func (m *ManagerImpl) validateDownloadAction(ctx context.Context, actionID string) (lumera.Action, error) { action, err := m.lumeraClient.GetAction(ctx, actionID) diff --git a/sdk/task/manager.go b/sdk/task/manager.go index 052088f3..c5a65bf4 100644 --- a/sdk/task/manager.go +++ b/sdk/task/manager.go @@ -107,11 +107,7 @@ func (m *ManagerImpl) CreateCascadeTask(ctx context.Context, filePath string, ac return "", err } - // Check peer connectivity before creating task - if err := m.checkSupernodesPeerConnectivity(taskCtx, action.Height); err != nil { - cancel() // Clean up if peer check fails - return "", err - } + // Peer connectivity is now validated during discovery health checks taskID := uuid.New().String()[:8] @@ -280,11 +276,7 @@ func (m *ManagerImpl) CreateDownloadTask(ctx context.Context, actionID string, o return "", fmt.Errorf("no filename found in cascade metadata") } - // Check peer connectivity before creating task - if err := m.checkSupernodesPeerConnectivity(taskCtx, action.Height); err != nil { - cancel() // Clean up if peer check fails - return "", err - } + // Peer connectivity is now validated during discovery health checks // Ensure the output path includes the correct filename finalOutputPath := path.Join(outputDir, action.ID, metadata.FileName) diff --git a/sdk/task/task.go b/sdk/task/task.go index 976725a0..97295902 100644 --- a/sdk/task/task.go +++ b/sdk/task/task.go @@ -6,9 +6,13 @@ import ( "fmt" "sync" + sdkmath "cosmossdk.io/math" "github.com/LumeraProtocol/supernode/v2/pkg/errgroup" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + plumera "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + txmod "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx" "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" + snsvc "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" "github.com/LumeraProtocol/supernode/v2/sdk/config" "github.com/LumeraProtocol/supernode/v2/sdk/event" "github.com/LumeraProtocol/supernode/v2/sdk/log" @@ -85,10 +89,6 @@ func (t *BaseTask) fetchSupernodes(ctx context.Context, height int64) (lumera.Su return nil, errors.New("no supernodes found") } - if len(sns) > 10 { - sns = sns[:10] - } - // Keep only SERVING nodes (done in parallel – keeps latency flat) healthy := make(lumera.Supernodes, 0, len(sns)) eg, ctx := errgroup.WithContext(ctx) @@ -131,6 +131,45 @@ func (t *BaseTask) isServing(parent context.Context, sn lumera.Supernode) bool { } defer client.Close(ctx) + // First check gRPC health resp, err := client.HealthCheck(ctx) - return err == nil && resp.Status == grpc_health_v1.HealthCheckResponse_SERVING + if err != nil || resp.Status != grpc_health_v1.HealthCheckResponse_SERVING { + return false + } + + // Then check P2P peers count via status (include P2P metrics) + status, err := client.GetSupernodeStatus(snsvc.WithIncludeP2PMetrics(ctx)) + if err != nil { + return false + } + if status.Network.PeersCount <= 1 { + return false + } + + // Finally, ensure the supernode account has a positive balance in the default fee denom. + // Use pkg/lumera to query bank balance from the chain. + cfg, err := plumera.NewConfig(t.config.Lumera.GRPCAddr, t.config.Lumera.ChainID, t.config.Account.KeyName, t.keyring) + if err != nil { + logtrace.Debug(ctx, "Failed to build lumera client config for balance check", logtrace.Fields{"error": err.Error()}) + return false + } + lc, err := plumera.NewClient(ctx, cfg) + if err != nil { + logtrace.Debug(ctx, "Failed to create lumera client for balance check", logtrace.Fields{"error": err.Error()}) + return false + } + defer lc.Close() + + denom := txmod.DefaultFeeDenom // base denom (micro), e.g., "ulume" + bal, err := lc.Bank().Balance(ctx, sn.CosmosAddress, denom) + if err != nil || bal == nil || bal.Balance == nil { + return false + } + // Require at least 1 LUME = 10^6 micro (ulume) + min := sdkmath.NewInt(1_000_000) + if bal.Balance.Amount.LT(min) { + return false + } + + return true } From 7b85f4d23fef4ab9b0c5351738a018f48fbe03d5 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Wed, 1 Oct 2025 21:12:10 +0500 Subject: [PATCH 17/27] Tx mode block --- pkg/logtrace/log.go | 28 ++++++++++------------------ pkg/lumera/connection.go | 34 ---------------------------------- pkg/lumera/modules/tx/impl.go | 31 +------------------------------ 3 files changed, 11 insertions(+), 82 deletions(-) diff --git a/pkg/logtrace/log.go b/pkg/logtrace/log.go index d1bcf169..e1cefbb8 100644 --- a/pkg/logtrace/log.go +++ b/pkg/logtrace/log.go @@ -93,12 +93,10 @@ func logWithLevel(level zapcore.Level, ctx context.Context, message string, fiel Setup("unknown-service") // Fallback if Setup wasn't called } - // Always enrich logs with the correlation ID. - // allFields := make(Fields, len(fields)+1) - // for k, v := range fields { - // allFields[k] = v - // } - // allFields[FieldCorrelationID] = extractCorrelationID(ctx) + // Drop early if below the configured level (keeps Datadog in sync) + if !logger.Core().Enabled(level) { + return + } // Convert the map to a slice of zap.Field zapFields := make([]zap.Field, 0, len(fields)) @@ -119,18 +117,12 @@ func logWithLevel(level zapcore.Level, ctx context.Context, message string, fiel } } - // Log with the structured fields. - switch level { - case zapcore.DebugLevel: - logger.Debug(message, zapFields...) - case zapcore.InfoLevel: - logger.Info(message, zapFields...) - case zapcore.WarnLevel: - logger.Warn(message, zapFields...) - case zapcore.ErrorLevel: - logger.Error(message, zapFields...) - case zapcore.FatalLevel: - logger.Fatal(message, zapFields...) + // Log with the structured fields using a level check/write + if ce := logger.Check(level, message); ce != nil { + ce.Write(zapFields...) + } else { + // Should not happen due to early Enabled check, but guard anyway + return } // Forward to Datadog (non-blocking, best-effort) diff --git a/pkg/lumera/connection.go b/pkg/lumera/connection.go index 06c39748..8abdc0f5 100644 --- a/pkg/lumera/connection.go +++ b/pkg/lumera/connection.go @@ -14,8 +14,6 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" - "os" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" ) @@ -132,9 +130,6 @@ func newGRPCConnection(ctx context.Context, rawAddr string) (Connection, error) "scheme": scheme, }) - // Start a monitor to terminate the app if connection is lost - go monitorConnection(ctx, firstConn) - return &grpcConnection{conn: firstConn}, nil } @@ -275,35 +270,6 @@ func createGRPCConnection(ctx context.Context, hostPort string, creds credential } } -// monitorConnection watches the connection state and exits the process if the -// connection transitions to Shutdown or remains in TransientFailure beyond a grace period. -func monitorConnection(ctx context.Context, conn *grpc.ClientConn) { - for { - state := conn.GetState() - switch state { - case connectivity.Shutdown: - logtrace.Error(ctx, "gRPC connection shutdown", logtrace.Fields{"action": "exit"}) - os.Exit(1) - case connectivity.TransientFailure: - // Allow some time to recover to Ready - gctx, cancel := context.WithTimeout(ctx, reconnectionGracePeriod) - for conn.GetState() == connectivity.TransientFailure { - if !conn.WaitForStateChange(gctx, connectivity.TransientFailure) { - cancel() - logtrace.Error(ctx, "gRPC connection lost (transient failure)", logtrace.Fields{"grace": reconnectionGracePeriod.String(), "action": "exit"}) - os.Exit(1) - } - } - cancel() - default: - // Idle/Connecting/Ready: just wait for state change - if !conn.WaitForStateChange(ctx, state) { - return - } - } - } -} - // Close closes the gRPC connection. func (c *grpcConnection) Close() error { if c.conn != nil { diff --git a/pkg/lumera/modules/tx/impl.go b/pkg/lumera/modules/tx/impl.go index 2f80ac58..e8f2c0ae 100644 --- a/pkg/lumera/modules/tx/impl.go +++ b/pkg/lumera/modules/tx/impl.go @@ -5,7 +5,6 @@ import ( "fmt" "math" "strconv" - "time" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" lumeracodec "github.com/LumeraProtocol/supernode/v2/pkg/lumera/codec" @@ -117,7 +116,7 @@ func (m *module) BuildAndSignTransaction(ctx context.Context, msgs []types.Msg, WithCodec(encCfg.Codec). WithTxConfig(encCfg.TxConfig). WithKeyring(config.Keyring). - WithBroadcastMode("sync") + WithBroadcastMode("block") // Create transaction factory factory := tx.Factory{}. @@ -287,34 +286,6 @@ func (m *module) ProcessTransaction(ctx context.Context, msgs []types.Msg, accou return result, fmt.Errorf("failed to broadcast transaction: %w", err) } - if result != nil && result.TxResponse != nil && result.TxResponse.Code == 0 && len(result.TxResponse.Events) == 0 { - logtrace.Debug(ctx, "Transaction broadcast successful, waiting for inclusion to get events...", nil) - - // Retry 5 times with 1 second intervals - var txResp *sdktx.GetTxResponse - for i := 0; i < 5; i++ { - time.Sleep(1 * time.Second) - - txResp, err = m.GetTransaction(ctx, result.TxResponse.TxHash) - if err == nil && txResp != nil && txResp.TxResponse != nil { - // Successfully got the transaction with events - logtrace.Debug(ctx, fmt.Sprintf("Retrieved transaction with %d events", len(txResp.TxResponse.Events)), nil) - result.TxResponse = txResp.TxResponse - break - } - - if err != nil { - logtrace.Warn(ctx, fmt.Sprintf("Attempt %d: failed to query transaction: %v", i+1, err), nil) - } - } - } - - if len(result.TxResponse.Events) == 0 { - logtrace.Error(ctx, "Failed to retrieve transaction events after 5 attempts", logtrace.Fields{ - "response": result.TxResponse.String, - }) - } - return result, nil } From 34dfc95e8239f4970d57ecdc41d39006926d0819 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Thu, 2 Oct 2025 16:02:21 +0500 Subject: [PATCH 18/27] fix :tx Broadcst method cleanup --- pkg/lumera/modules/tx/impl.go | 39 ++++++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/pkg/lumera/modules/tx/impl.go b/pkg/lumera/modules/tx/impl.go index e8f2c0ae..6ac625ca 100644 --- a/pkg/lumera/modules/tx/impl.go +++ b/pkg/lumera/modules/tx/impl.go @@ -47,6 +47,18 @@ func newModule(conn *grpc.ClientConn) (Module, error) { // SimulateTransaction simulates a transaction with given messages and returns gas used func (m *module) SimulateTransaction(ctx context.Context, msgs []types.Msg, accountInfo *authtypes.BaseAccount, config *TxConfig) (*sdktx.SimulateResponse, error) { + if config == nil { + return nil, fmt.Errorf("tx config cannot be nil") + } + if accountInfo == nil { + return nil, fmt.Errorf("account info cannot be nil") + } + if config.Keyring == nil { + return nil, fmt.Errorf("keyring cannot be nil") + } + if config.KeyName == "" { + return nil, fmt.Errorf("key name cannot be empty") + } // Create encoding config and client context encCfg := lumeracodec.GetEncodingConfig() clientCtx := client.Context{}. @@ -108,6 +120,18 @@ func (m *module) SimulateTransaction(ctx context.Context, msgs []types.Msg, acco // BuildAndSignTransaction builds and signs a transaction with the given parameters func (m *module) BuildAndSignTransaction(ctx context.Context, msgs []types.Msg, accountInfo *authtypes.BaseAccount, gasLimit uint64, fee string, config *TxConfig) ([]byte, error) { + if config == nil { + return nil, fmt.Errorf("tx config cannot be nil") + } + if accountInfo == nil { + return nil, fmt.Errorf("account info cannot be nil") + } + if config.Keyring == nil { + return nil, fmt.Errorf("keyring cannot be nil") + } + if config.KeyName == "" { + return nil, fmt.Errorf("key name cannot be empty") + } // Create encoding config encCfg := lumeracodec.GetEncodingConfig() @@ -115,10 +139,9 @@ func (m *module) BuildAndSignTransaction(ctx context.Context, msgs []types.Msg, clientCtx := client.Context{}. WithCodec(encCfg.Codec). WithTxConfig(encCfg.TxConfig). - WithKeyring(config.Keyring). - WithBroadcastMode("block") + WithKeyring(config.Keyring) - // Create transaction factory + // Create transaction factory factory := tx.Factory{}. WithTxConfig(clientCtx.TxConfig). WithKeybase(config.Keyring). @@ -126,7 +149,6 @@ func (m *module) BuildAndSignTransaction(ctx context.Context, msgs []types.Msg, WithSequence(accountInfo.Sequence). WithChainID(config.ChainID). WithGas(gasLimit). - WithGasAdjustment(config.GasAdjustment). WithSignMode(signingtypes.SignMode_SIGN_MODE_DIRECT). WithFees(fee) @@ -156,10 +178,7 @@ func (m *module) BuildAndSignTransaction(ctx context.Context, msgs []types.Msg, // BroadcastTransaction broadcasts a signed transaction and returns the result func (m *module) BroadcastTransaction(ctx context.Context, txBytes []byte) (*sdktx.BroadcastTxResponse, error) { // Broadcast transaction - req := &sdktx.BroadcastTxRequest{ - TxBytes: txBytes, - Mode: sdktx.BroadcastMode_BROADCAST_MODE_SYNC, - } + req := &sdktx.BroadcastTxRequest{TxBytes: txBytes, Mode: sdktx.BroadcastMode_BROADCAST_MODE_SYNC} resp, err := m.client.BroadcastTx(ctx, req) @@ -272,7 +291,7 @@ func (m *module) ProcessTransaction(ctx context.Context, msgs []types.Msg, accou // Step 3: Calculate fee based on adjusted gas fee := m.CalculateFee(gasToUse, config) - logtrace.Debug(ctx, fmt.Sprintf("using simulated gas and calculated fee | simulatedGas=%d adjustedGas=%d fee=%s", simulatedGasUsed, gasToUse, fee), nil) + logtrace.Debug(ctx, fmt.Sprintf("using simulated gas and calculated fee | simulatedGas=%d gasToUse=%d fee=%s", simulatedGasUsed, gasToUse, fee), nil) // Step 4: Build and sign transaction txBytes, err := m.BuildAndSignTransaction(ctx, msgs, accountInfo, gasToUse, fee, config) @@ -280,7 +299,7 @@ func (m *module) ProcessTransaction(ctx context.Context, msgs []types.Msg, accou return nil, fmt.Errorf("failed to build and sign transaction: %w", err) } - // Step 5: Broadcast transaction + // Step 5: Broadcast transaction (SYNC mode) result, err := m.BroadcastTransaction(ctx, txBytes) if err != nil { return result, fmt.Errorf("failed to broadcast transaction: %w", err) From 7bd1a0347c35612981abd85114dfdf198c692dbb Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Thu, 2 Oct 2025 17:25:01 +0500 Subject: [PATCH 19/27] Logs --- p2p/kademlia/dht.go | 80 +++++++++++----------- p2p/kademlia/fetch_and_store.go | 4 +- p2p/kademlia/network.go | 43 +++++++++++- p2p/kademlia/rq_symbols.go | 19 +++-- pkg/logtrace/log.go | 21 ++++-- supernode/services/cascade/adaptors/p2p.go | 62 ++++++++--------- supernode/services/cascade/download.go | 54 +++++++-------- supernode/services/cascade/helper.go | 38 +++++----- supernode/services/cascade/register.go | 8 ++- 9 files changed, 195 insertions(+), 134 deletions(-) diff --git a/p2p/kademlia/dht.go b/p2p/kademlia/dht.go index d0022274..67a2982b 100644 --- a/p2p/kademlia/dht.go +++ b/p2p/kademlia/dht.go @@ -362,7 +362,7 @@ func (s *DHT) Store(ctx context.Context, data []byte, typ int) (string, error) { // measured success rate for node RPCs is below the configured minimum, an error // is returned. Metrics are not returned through the API. func (s *DHT) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID string) error { - logtrace.Info(ctx, "DHT StoreBatch begin", logtrace.Fields{ + logtrace.Debug(ctx, "DHT StoreBatch begin", logtrace.Fields{ logtrace.FieldModule: "dht", logtrace.FieldTaskID: taskID, "records": len(values), @@ -370,7 +370,7 @@ func (s *DHT) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID s if err := s.store.StoreBatch(ctx, values, typ, true); err != nil { return fmt.Errorf("store batch: %v", err) } - logtrace.Info(ctx, "DHT StoreBatch: local stored; network begin", logtrace.Fields{ + logtrace.Debug(ctx, "DHT StoreBatch: local stored; network begin", logtrace.Fields{ logtrace.FieldModule: "dht", logtrace.FieldTaskID: taskID, }) @@ -380,7 +380,7 @@ func (s *DHT) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID s return fmt.Errorf("iterate batch store: %v", err) } - logtrace.Info(ctx, "DHT StoreBatch: network done", logtrace.Fields{ + logtrace.Debug(ctx, "DHT StoreBatch: network done", logtrace.Fields{ logtrace.FieldModule: "dht", logtrace.FieldTaskID: taskID, }) @@ -407,7 +407,7 @@ func (s *DHT) Retrieve(ctx context.Context, key string, localOnly ...bool) ([]by // retrieve the key/value from queries storage value, err := s.store.Retrieve(ctx, decoded) if err == nil && len(value) > 0 { - logtrace.Info(ctx, "DHT Retrieve local hit", logtrace.Fields{"key": hex.EncodeToString(decoded), "ms": time.Since(start).Milliseconds()}) + logtrace.Debug(ctx, "DHT Retrieve local hit", logtrace.Fields{"key": hex.EncodeToString(decoded), "ms": time.Since(start).Milliseconds()}) return value, nil } else if err != nil { logtrace.Error(ctx, "Error retrieving key from local storage", logtrace.Fields{ @@ -423,20 +423,20 @@ func (s *DHT) Retrieve(ctx context.Context, key string, localOnly ...bool) ([]by } // if not found locally, iterative find value from kademlia network - logtrace.Info(ctx, "DHT Retrieve network lookup", logtrace.Fields{"key": dbKey}) + logtrace.Debug(ctx, "DHT Retrieve network lookup", logtrace.Fields{"key": dbKey}) peerValue, err := s.iterate(ctx, IterateFindValue, decoded, nil, 0) if err != nil { return nil, errors.Errorf("retrieve from peer: %w", err) } if len(peerValue) > 0 { - logtrace.Info(ctx, "DHT Retrieve network hit", logtrace.Fields{ + logtrace.Debug(ctx, "DHT Retrieve network hit", logtrace.Fields{ logtrace.FieldModule: "dht", "key": dbKey, "data_len": len(peerValue), "ms": time.Since(start).Milliseconds(), }) } else { - logtrace.Info(ctx, "DHT Retrieve miss", logtrace.Fields{ + logtrace.Debug(ctx, "DHT Retrieve miss", logtrace.Fields{ logtrace.FieldModule: "dht", "key": dbKey, "ms": time.Since(start).Milliseconds(), @@ -535,7 +535,7 @@ func (s *DHT) GetValueFromNode(ctx context.Context, target []byte, n *Node) ([]b defer ccancel() // Minimal per-RPC visibility - logtrace.Info(ctx, "RPC FindValue send", logtrace.Fields{"node": n.String(), "key": hex.EncodeToString(target)}) + logtrace.Debug(ctx, "RPC FindValue send", logtrace.Fields{"node": n.String(), "key": hex.EncodeToString(target)}) response, err := s.network.Call(cctx, request, false) if err != nil { logtrace.Debug(ctx, "Network call request failed", logtrace.Fields{ @@ -545,7 +545,7 @@ func (s *DHT) GetValueFromNode(ctx context.Context, target []byte, n *Node) ([]b }) return nil, fmt.Errorf("network call request %s failed: %w", request.String(), err) } - logtrace.Info(ctx, "RPC FindValue completed", logtrace.Fields{"node": n.String()}) + logtrace.Debug(ctx, "RPC FindValue completed", logtrace.Fields{"node": n.String()}) v, ok := response.Data.(*FindValueResponse) if ok && v.Status.Result == ResultOk && len(v.Value) > 0 { @@ -581,7 +581,7 @@ func (s *DHT) doMultiWorkers(ctx context.Context, iterativeType int, target []by // update the running goroutines number++ - logtrace.Info(ctx, "Start work for node", logtrace.Fields{ + logtrace.Debug(ctx, "Start work for node", logtrace.Fields{ logtrace.FieldModule: "p2p", "iterate_type": iterativeType, "node": node.String(), @@ -619,11 +619,11 @@ func (s *DHT) doMultiWorkers(ctx context.Context, iterativeType int, target []by if messageType == FindValue { fields["key"] = hex.EncodeToString(target) } - logtrace.Info(ctx, "RPC "+op+" send", fields) + logtrace.Debug(ctx, "RPC "+op+" send", fields) // send the request and receive the response response, err := s.network.Call(ctx, request, false) if err != nil { - logtrace.Info(ctx, "Iterate worker RPC failed", logtrace.Fields{ + logtrace.Debug(ctx, "Iterate worker RPC failed", logtrace.Fields{ logtrace.FieldModule: "p2p", logtrace.FieldError: err.Error(), "request": request.String(), @@ -633,7 +633,7 @@ func (s *DHT) doMultiWorkers(ctx context.Context, iterativeType int, target []by //removedNodes = append(removedNodes, receiver) return } - logtrace.Info(ctx, "RPC "+op+" completed", logtrace.Fields{"node": receiver.String()}) + logtrace.Debug(ctx, "RPC "+op+" completed", logtrace.Fields{"node": receiver.String()}) // send the response to message channel responses <- response @@ -662,7 +662,7 @@ func (s *DHT) fetchAndAddLocalKeys(ctx context.Context, hexKeys []string, result batchHexKeys := hexKeys[start:end] - logtrace.Info(ctx, "Processing batch of local keys", logtrace.Fields{ + logtrace.Debug(ctx, "Processing batch of local keys", logtrace.Fields{ logtrace.FieldModule: "dht", "batch_size": len(batchHexKeys), "total_keys": len(hexKeys), @@ -696,7 +696,7 @@ func (s *DHT) fetchAndAddLocalKeys(ctx context.Context, hexKeys []string, result func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, txID string, localOnly ...bool) (result map[string][]byte, err error) { start := time.Now() - logtrace.Info(ctx, "DHT BatchRetrieve begin", logtrace.Fields{"txid": txID, "keys": len(keys), "required": required}) + logtrace.Debug(ctx, "DHT BatchRetrieve begin", logtrace.Fields{"txid": txID, "keys": len(keys), "required": required}) result = make(map[string][]byte) var resMap sync.Map var foundLocalCount int32 @@ -1028,7 +1028,7 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, wg.Wait() - logtrace.Info(ctx, "Iterate batch get values done", logtrace.Fields{ + logtrace.Debug(ctx, "Iterate batch get values done", logtrace.Fields{ logtrace.FieldModule: "dht", "found_count": atomic.LoadInt32(&foundCount), }) @@ -1112,7 +1112,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat // find the closest contacts for the target node from queries route tables nl, _ := s.ht.closestContacts(Alpha, target, igList) if len(igList) > 0 { - logtrace.Info(ctx, "Closest contacts", logtrace.Fields{ + logtrace.Debug(ctx, "Closest contacts", logtrace.Fields{ logtrace.FieldModule: "p2p", "nodes": nl.String(), "ignored": s.ignorelist.String(), @@ -1122,7 +1122,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat if nl.Len() == 0 { return nil, nil } - logtrace.Info(ctx, "Iterate start", logtrace.Fields{ + logtrace.Debug(ctx, "Iterate start", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "type": iterativeType, @@ -1136,7 +1136,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat if iterativeType == IterateFindNode { hashedTargetID, _ := utils.Blake3Hash(target) bucket := s.ht.bucketIndex(s.ht.self.HashedID, hashedTargetID) - logtrace.Info(ctx, "Bucket for target", logtrace.Fields{ + logtrace.Debug(ctx, "Bucket for target", logtrace.Fields{ logtrace.FieldModule: "p2p", "target": sKey, }) @@ -1160,7 +1160,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat // Set a maximum number of iterations to prevent indefinite looping maxIterations := 5 // Adjust the maximum iterations as needed - logtrace.Info(ctx, "Begin iteration", logtrace.Fields{ + logtrace.Debug(ctx, "Begin iteration", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1171,7 +1171,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat case <-ctx.Done(): return nil, fmt.Errorf("iterate cancelled: %w", ctx.Err()) case <-timeout: - logtrace.Info(ctx, "Iteration timed out", logtrace.Fields{ + logtrace.Debug(ctx, "Iteration timed out", logtrace.Fields{ logtrace.FieldModule: "p2p", }) return nil, nil @@ -1194,7 +1194,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat } default: - logtrace.Info(ctx, "Unknown message type", logtrace.Fields{ + logtrace.Debug(ctx, "Unknown message type", logtrace.Fields{ logtrace.FieldModule: "dht", "type": response.MessageType, }) @@ -1203,7 +1203,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat // Stop search if no more nodes to contact if !searchRest && len(nl.Nodes) == 0 { - logtrace.Info(ctx, "Search stopped", logtrace.Fields{ + logtrace.Debug(ctx, "Search stopped", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1215,7 +1215,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat nl.Comparator = target nl.Sort() - logtrace.Info(ctx, "Iterate sorted nodes", logtrace.Fields{ + logtrace.Debug(ctx, "Iterate sorted nodes", logtrace.Fields{ logtrace.FieldModule: "p2p", "id": base58.Encode(s.ht.self.ID), "iterate": iterativeType, @@ -1252,7 +1252,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat } } - logtrace.Info(ctx, "Finish iteration without results", logtrace.Fields{ + logtrace.Debug(ctx, "Finish iteration without results", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1273,7 +1273,7 @@ func (s *DHT) handleResponses(ctx context.Context, responses <-chan *Message, nl v, ok := response.Data.(*FindValueResponse) if ok { if v.Status.Result == ResultOk && len(v.Value) > 0 { - logtrace.Info(ctx, "Iterate found value from network", logtrace.Fields{ + logtrace.Debug(ctx, "Iterate found value from network", logtrace.Fields{ logtrace.FieldModule: "p2p", }) return nl, v.Value @@ -1303,7 +1303,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] // nl will have the closest nodes to the target value, it will ignore the nodes in igList nl, _ := s.ht.closestContacts(Alpha, target, igList) if len(igList) > 0 { - logtrace.Info(ctx, "Closest contacts", logtrace.Fields{ + logtrace.Debug(ctx, "Closest contacts", logtrace.Fields{ logtrace.FieldModule: "p2p", "nodes": nl.String(), "ignored": s.ignorelist.String(), @@ -1318,7 +1318,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] searchRest := false // keep track of contacted nodes so that we don't hit them again contacted := make(map[string]bool) - logtrace.Info(ctx, "Begin iteration", logtrace.Fields{ + logtrace.Debug(ctx, "Begin iteration", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1327,7 +1327,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] var closestNode *Node var iterationCount int for iterationCount = 0; iterationCount < maxIterations; iterationCount++ { - logtrace.Info(ctx, "Begin find value", logtrace.Fields{ + logtrace.Debug(ctx, "Begin find value", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "nl": nl.Len(), @@ -1336,7 +1336,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] }) if nl.Len() == 0 { - logtrace.Info(ctx, "Nodes list length is 0", logtrace.Fields{ + logtrace.Debug(ctx, "Nodes list length is 0", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1347,7 +1347,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] // if the closest node is the same as the last iteration and we don't want to search rest of nodes, we are done if !searchRest && (closestNode != nil && bytes.Equal(nl.Nodes[0].ID, closestNode.ID)) { - logtrace.Info(ctx, "Closest node is the same as the last iteration", logtrace.Fields{ + logtrace.Debug(ctx, "Closest node is the same as the last iteration", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1366,7 +1366,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] nl.Sort() - logtrace.Info(ctx, "Iteration progress", logtrace.Fields{ + logtrace.Debug(ctx, "Iteration progress", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1375,7 +1375,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] }) } - logtrace.Info(ctx, "Finished iterations without results", logtrace.Fields{ + logtrace.Debug(ctx, "Finished iterations without results", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1588,7 +1588,7 @@ func (s *DHT) storeToAlphaNodes(ctx context.Context, nl *NodeList, data []byte, skey, _ := utils.Blake3Hash(data) if finalStoreCount >= int32(Alpha) { - logtrace.Info(ctx, "Store data to alpha nodes success", logtrace.Fields{ + logtrace.Debug(ctx, "Store data to alpha nodes success", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": taskID, "len_total_nodes": nl.Len(), @@ -1598,7 +1598,7 @@ func (s *DHT) storeToAlphaNodes(ctx context.Context, nl *NodeList, data []byte, return nil } - logtrace.Info(ctx, "Store data to alpha nodes failed", logtrace.Fields{ + logtrace.Debug(ctx, "Store data to alpha nodes failed", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": taskID, "store_count": finalStoreCount, @@ -1712,7 +1712,7 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i requests := 0 successful := 0 - logtrace.Info(ctx, "Iterate batch store: dispatching to nodes", logtrace.Fields{"task_id": id, "nodes": len(knownNodes)}) + logtrace.Debug(ctx, "Iterate batch store: dispatching to nodes", logtrace.Fields{"task_id": id, "nodes": len(knownNodes)}) storeResponses := s.batchStoreNetwork(ctx, values, knownNodes, storageMap, typ) for response := range storeResponses { requests++ @@ -1807,7 +1807,7 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ var wg sync.WaitGroup for key, node := range nodes { - logtrace.Info(ctx, "Preparing batch store to node", logtrace.Fields{logtrace.FieldModule: "dht", "node": node.String()}) + logtrace.Debug(ctx, "Preparing batch store to node", logtrace.Fields{logtrace.FieldModule: "dht", "node": node.String()}) if s.ignorelist.Banned(node) { logtrace.Debug(ctx, "Ignoring banned node in batch store network call", logtrace.Fields{ logtrace.FieldModule: "dht", @@ -1858,7 +1858,7 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ s.metrics.IncHotPathBanIncr() } - logtrace.Info(ctx, "RPC BatchStoreData failed", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err.Error(), "node": receiver.String(), "ms": dur}) + logtrace.Error(ctx, "RPC BatchStoreData failed", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err.Error(), "node": receiver.String(), "ms": dur}) responses <- &MessageWithError{Error: err, Message: response, KeysCount: len(toStore), Receiver: receiver, DurationMS: dur} return } @@ -1876,7 +1876,7 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ } func (s *DHT) batchFindNode(ctx context.Context, payload [][]byte, nodes map[string]*Node, contacted map[string]bool, txid string) (chan *MessageWithError, bool) { - logtrace.Info(ctx, "Batch find node begin", logtrace.Fields{ + logtrace.Debug(ctx, "Batch find node begin", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": txid, "nodes_count": len(nodes), @@ -1947,7 +1947,7 @@ func (s *DHT) batchFindNode(ctx context.Context, payload [][]byte, nodes map[str } wg.Wait() close(responses) - logtrace.Info(ctx, "Batch find node done", logtrace.Fields{ + logtrace.Debug(ctx, "Batch find node done", logtrace.Fields{ logtrace.FieldModule: "dht", "nodes_count": len(nodes), "len_resp": len(responses), diff --git a/p2p/kademlia/fetch_and_store.go b/p2p/kademlia/fetch_and_store.go index 8f954364..6344095d 100644 --- a/p2p/kademlia/fetch_and_store.go +++ b/p2p/kademlia/fetch_and_store.go @@ -276,7 +276,7 @@ func (s *DHT) GroupAndBatchFetch(ctx context.Context, repKeys []domain.ToRepKey, func (s *DHT) GetBatchValuesFromNode(ctx context.Context, keys []string, n *Node) (bool, map[string][]byte, []string, error) { logtrace.Debug(ctx, "sending batch fetch request", logtrace.Fields{"node-ip": n.IP, "keys": len(keys)}) // Minimal per-RPC visibility for background replication path - logtrace.Info(ctx, "RPC BatchFindValues send", logtrace.Fields{"node": n.String(), "keys": len(keys)}) + logtrace.Debug(ctx, "RPC BatchFindValues send", logtrace.Fields{"node": n.String(), "keys": len(keys)}) messageType := BatchFindValues @@ -351,7 +351,7 @@ func (s *DHT) GetBatchValuesFromNode(ctx context.Context, keys []string, n *Node } logtrace.Debug(ctx, "batch fetch response rcvd and keys verified", logtrace.Fields{"node-ip": n.IP, "received-keys": len(decompressedMap), "verified-keys": len(retMap), "failed-keys": len(failedKeys)}) - logtrace.Info(ctx, "RPC BatchFindValues completed", logtrace.Fields{"node": n.String(), "received_keys": len(decompressedMap), "verified_keys": len(retMap)}) + logtrace.Debug(ctx, "RPC BatchFindValues completed", logtrace.Fields{"node": n.String(), "received_keys": len(decompressedMap), "verified_keys": len(retMap)}) return v.Done, retMap, failedKeys, nil } diff --git a/p2p/kademlia/network.go b/p2p/kademlia/network.go index 3bca8f20..2088fcc5 100644 --- a/p2p/kademlia/network.go +++ b/p2p/kademlia/network.go @@ -592,6 +592,18 @@ func (s *Network) Call(ctx context.Context, request *Message, isLong bool) (*Mes // pool key: bech32@ip:port (bech32 identity is your invariant) idStr := string(request.Receiver.ID) remoteAddr := fmt.Sprintf("%s@%s:%d", idStr, strings.TrimSpace(request.Receiver.IP), request.Receiver.Port) + // Log raw RPC start (reduce noise: Info only for high-signal messages) + startFields := logtrace.Fields{ + logtrace.FieldModule: "p2p", + "remote": remoteAddr, + "message": msgName(request.MessageType), + "timeout_ms": int64(timeout / time.Millisecond), + } + if isHighSignalMsg(request.MessageType) { + logtrace.Info(ctx, fmt.Sprintf("RPC %s start remote=%s timeout_ms=%d", msgName(request.MessageType), remoteAddr, int64(timeout/time.Millisecond)), startFields) + } else { + logtrace.Debug(ctx, fmt.Sprintf("RPC %s start remote=%s timeout_ms=%d", msgName(request.MessageType), remoteAddr, int64(timeout/time.Millisecond)), startFields) + } // try get from pool s.connPoolMtx.Lock() @@ -633,6 +645,7 @@ func (s *Network) Call(ctx context.Context, request *Message, isLong bool) (*Mes // ---- retryable RPC helpers ------------------------------------------------- func (s *Network) rpcOnceWrapper(ctx context.Context, cw *connWrapper, remoteAddr string, data []byte, timeout time.Duration, msgType int) (*Message, error) { + start := time.Now() writeDL := calcWriteDeadline(timeout, len(data), 1.0) // target ~1 MB/s retried := false @@ -717,11 +730,18 @@ func (s *Network) rpcOnceWrapper(ctx context.Context, cw *connWrapper, remoteAdd s.dropFromPool(remoteAddr, cw) return nil, errors.Errorf("conn read: %w", e) } + // Single-line completion for successful outbound RPC + if isHighSignalMsg(msgType) { + logtrace.Info(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds()}) + } else { + logtrace.Debug(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds()}) + } return r, nil } } func (s *Network) rpcOnceNonWrapper(ctx context.Context, conn net.Conn, remoteAddr string, data []byte, timeout time.Duration, msgType int) (*Message, error) { + start := time.Now() sizeMB := float64(len(data)) / (1024.0 * 1024.0) // data is your gob-encoded message throughputFloor := 8.0 // MB/s (~64 Mbps) est := time.Duration(sizeMB / throughputFloor * float64(time.Second)) @@ -801,6 +821,11 @@ Retry: s.dropFromPool(remoteAddr, conn) return nil, errors.Errorf("conn read: %w", err) } + if isHighSignalMsg(msgType) { + logtrace.Info(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds()}) + } else { + logtrace.Debug(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds()}) + } return resp, nil } @@ -936,7 +961,7 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, return s.generateResponseMessage(BatchGetValues, message.Sender, ResultFailed, err.Error()) } - logtrace.Info(ctx, "Batch get values request received", logtrace.Fields{ + logtrace.Debug(ctx, "Batch get values request received", logtrace.Fields{ logtrace.FieldModule: "p2p", "from": message.Sender.String(), }) @@ -1006,7 +1031,7 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, func (s *Network) handleBatchFindValuesRequest(ctx context.Context, req *BatchFindValuesRequest, ip string, reqID string) (isDone bool, compressedData []byte, err error) { // log.WithContext(ctx).WithField("p2p-req-id", reqID).WithField("keys", len(req.Keys)).WithField("from-ip", ip).Info("batch find values request received") - logtrace.Info(ctx, "Batch find values request received", logtrace.Fields{ + logtrace.Debug(ctx, "Batch find values request received", logtrace.Fields{ logtrace.FieldModule: "p2p", "from": ip, "keys": len(req.Keys), @@ -1015,7 +1040,7 @@ func (s *Network) handleBatchFindValuesRequest(ctx context.Context, req *BatchFi if len(req.Keys) > 0 { // log.WithContext(ctx).WithField("p2p-req-id", reqID).WithField("keys[0]", req.Keys[0]).WithField("keys[len]", req.Keys[len(req.Keys)-1]). // WithField("from-ip", ip).Debug("first & last batch keys") - logtrace.Info(ctx, "First & last batch keys", logtrace.Fields{ + logtrace.Debug(ctx, "First & last batch keys", logtrace.Fields{ logtrace.FieldModule: "p2p", "p2p-req-id": reqID, "keys[0]": req.Keys[0], @@ -1468,6 +1493,18 @@ func msgName(t int) string { } } +// isHighSignalMsg returns true for message types that are heavy and relevant +// to artefact store/retrieve visibility. Lightweight chatter like Ping or +// FindNode is excluded to avoid log noise at Info level. +func isHighSignalMsg(t int) bool { + switch t { + case BatchStoreData, BatchGetValues, BatchFindValues: + return true + default: + return false + } +} + func (s *Network) HandleMetricsSnapshot() map[string]HandleCounters { out := make(map[string]HandleCounters) s.metrics.Range(func(k, v any) bool { diff --git a/p2p/kademlia/rq_symbols.go b/p2p/kademlia/rq_symbols.go index 819d0944..98e9c2ad 100644 --- a/p2p/kademlia/rq_symbols.go +++ b/p2p/kademlia/rq_symbols.go @@ -17,19 +17,16 @@ const ( func (s *DHT) startStoreSymbolsWorker(ctx context.Context) { // Minimal visibility for lifecycle + each tick - logtrace.Info(ctx, "rq_symbols worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "rq_symbols worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) for { select { case <-time.After(defaultSoreSymbolsInterval): - tickStart := time.Now() - logtrace.Info(ctx, "rq_symbols: tick", logtrace.Fields{"interval": defaultSoreSymbolsInterval.String()}) if err := s.storeSymbols(ctx); err != nil { logtrace.Error(ctx, "store symbols", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err}) } - logtrace.Info(ctx, "rq_symbols: tick complete", logtrace.Fields{"ms": time.Since(tickStart).Milliseconds()}) case <-ctx.Done(): - logtrace.Info(ctx, "rq_symbols worker stopping", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "rq_symbols worker stopping", logtrace.Fields{logtrace.FieldModule: "p2p"}) return } } @@ -42,7 +39,9 @@ func (s *DHT) storeSymbols(ctx context.Context) error { } // Minimal visibility: how many dirs to process this tick - logtrace.Info(ctx, "rq_symbols: todo directories", logtrace.Fields{"count": len(dirs)}) + if len(dirs) > 0 { + logtrace.Info(ctx, "rq_symbols: todo directories", logtrace.Fields{"count": len(dirs)}) + } for _, dir := range dirs { // Pre-count symbols in this directory @@ -83,7 +82,7 @@ func (s *DHT) scanDirAndStoreSymbols(ctx context.Context, dir, txid string) erro } sort.Strings(keys) - logtrace.Debug(ctx, "p2p-worker: storing ALL RaptorQ symbols", logtrace.Fields{"txid": txid, "dir": dir, "total": len(keys)}) + logtrace.Info(ctx, "p2p-worker: storing ALL RaptorQ symbols", logtrace.Fields{"txid": txid, "dir": dir, "total": len(keys)}) // Batch-flush at loadSymbolsBatchSize for start := 0; start < len(keys); { @@ -108,6 +107,10 @@ func (s *DHT) scanDirAndStoreSymbols(ctx context.Context, dir, txid string) erro // 2. Load → StoreBatch → Delete for a slice of keys // --------------------------------------------------------------------- func (s *DHT) storeSymbolsInP2P(ctx context.Context, dir string, keys []string) error { + // Per-batch visibility for background worker + logtrace.Info(ctx, "rq_symbols: worker StoreBatch send", logtrace.Fields{"dir": dir, "keys": len(keys)}) + + start := time.Now() loaded, err := utils.LoadSymbols(dir, keys) if err != nil { return fmt.Errorf("load symbols: %w", err) @@ -117,6 +120,8 @@ func (s *DHT) storeSymbolsInP2P(ctx context.Context, dir string, keys []string) return fmt.Errorf("p2p store batch: %w", err) } + logtrace.Info(ctx, "rq_symbols: worker StoreBatch completed", logtrace.Fields{"dir": dir, "keys": len(loaded), "ms": time.Since(start).Milliseconds()}) + if err := utils.DeleteSymbols(ctx, dir, keys); err != nil { return fmt.Errorf("delete symbols: %w", err) } diff --git a/pkg/logtrace/log.go b/pkg/logtrace/log.go index e1cefbb8..0c19d9f1 100644 --- a/pkg/logtrace/log.go +++ b/pkg/logtrace/log.go @@ -16,7 +16,10 @@ type ContextKey string // CorrelationIDKey is the key for storing correlation ID in context const CorrelationIDKey ContextKey = "correlation_id" -var logger *zap.Logger +var ( + logger *zap.Logger + minLevel zapcore.Level = zapcore.InfoLevel // effective minimum log level +) // Setup initializes the logger for readable output in all modes. func Setup(serviceName string) { @@ -34,7 +37,11 @@ func Setup(serviceName string) { config.DisableStacktrace = true // Always respect the LOG_LEVEL environment variable. - config.Level = zap.NewAtomicLevelAt(getLogLevel()) + lvl := getLogLevel() + config.Level = zap.NewAtomicLevelAt(lvl) + // Persist the effective minimum so non-core sinks (e.g., Datadog) can + // filter entries consistently with the console logger. + minLevel = lvl // Build the logger from the customized config. if tracingEnabled { @@ -52,7 +59,7 @@ func Setup(serviceName string) { // getLogLevel returns the log level from environment variable LOG_LEVEL func getLogLevel() zapcore.Level { - levelStr := strings.ToLower(os.Getenv("LOG_LEVEL")) + levelStr := "info" switch levelStr { case "debug": return zapcore.DebugLevel @@ -125,8 +132,12 @@ func logWithLevel(level zapcore.Level, ctx context.Context, message string, fiel return } - // Forward to Datadog (non-blocking, best-effort) - ForwardDatadog(level, ctx, message, fields) + // Forward to Datadog (non-blocking, best-effort) only if level is enabled + // for the current configuration. This prevents forwarding debug entries + // when the logger is configured for info and above. + if level >= minLevel { + ForwardDatadog(level, ctx, message, fields) + } } // Error logs an error message with structured fields diff --git a/supernode/services/cascade/adaptors/p2p.go b/supernode/services/cascade/adaptors/p2p.go index 944b9b50..98e0a8a6 100644 --- a/supernode/services/cascade/adaptors/p2p.go +++ b/supernode/services/cascade/adaptors/p2p.go @@ -57,7 +57,7 @@ type StoreArtefactsRequest struct { } func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) error { - logtrace.Info(ctx, "StoreArtefacts start", logtrace.Fields{"taskID": req.TaskID, "actionID": req.ActionID, "id_files": len(req.IDFiles), "symbols_dir": req.SymbolsDir}) + logtrace.Info(ctx, "StoreArtefacts start", logtrace.Fields{"taskID": req.TaskID, "actionID": req.ActionID, "id_files": len(req.IDFiles), "symbols_dir": req.SymbolsDir}) // Optionally enable per-node store RPC capture for this task if !p.metricsDisabled { @@ -78,10 +78,10 @@ func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, remaining = len(keys) } } - logtrace.Info(ctx, "StoreArtefacts completed", logtrace.Fields{"taskID": req.TaskID, "symbols_first_pass": firstPassSymbols, "symbols_total_available": totalSymbols, "id_files_count": len(req.IDFiles), "symbols_left_on_disk": remaining, "ms": dur}) - if remaining == 0 { - logtrace.Info(ctx, "Symbols directory is empty after first-pass", logtrace.Fields{"taskID": req.TaskID, "dir": req.SymbolsDir}) - } + logtrace.Info(ctx, "StoreArtefacts completed", logtrace.Fields{"taskID": req.TaskID, "symbols_first_pass": firstPassSymbols, "symbols_total_available": totalSymbols, "id_files_count": len(req.IDFiles), "symbols_left_on_disk": remaining, "ms": dur}) + if remaining == 0 { + logtrace.Info(ctx, "Symbols directory is empty after first-pass", logtrace.Fields{"taskID": req.TaskID, "dir": req.SymbolsDir}) + } // Record store summary for later event emission cm.SetStoreSummary(req.TaskID, firstPassSymbols, totalSymbols, len(req.IDFiles), dur) return nil @@ -111,12 +111,12 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action if targetCount < 1 && totalAvailable > 0 { targetCount = 1 } - logtrace.Info(ctx, "Symbols discovered in directory", logtrace.Fields{"total_symbols": totalAvailable, "dir": symbolsDir}) - logtrace.Info(ctx, "first-pass target coverage (symbols)", logtrace.Fields{ - "total_symbols": totalAvailable, - "target_percent": storeSymbolsPercent, - "target_count": targetCount, - }) + logtrace.Info(ctx, "Symbols discovered in directory", logtrace.Fields{"total_symbols": totalAvailable, "dir": symbolsDir}) + logtrace.Info(ctx, "first-pass target coverage (symbols)", logtrace.Fields{ + "total_symbols": totalAvailable, + "target_percent": storeSymbolsPercent, + "target_count": targetCount, + }) /* down-sample if we exceed the “big directory” threshold ------------- */ if len(keys) > loadSymbolsBatchSize { @@ -127,8 +127,8 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action } sort.Strings(keys) // deterministic order inside the sample } - logtrace.Info(ctx, "first-pass selected symbols", logtrace.Fields{"selected": len(keys), "of_total": totalAvailable, "dir": symbolsDir}) - logtrace.Info(ctx, "storing RaptorQ symbols", logtrace.Fields{"count": len(keys)}) + logtrace.Info(ctx, "first-pass selected symbols", logtrace.Fields{"selected": len(keys), "of_total": totalAvailable, "dir": symbolsDir}) + logtrace.Info(ctx, "storing RaptorQ symbols", logtrace.Fields{"count": len(keys)}) /* stream in fixed-size batches -------------------------------------- */ @@ -164,7 +164,7 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action payload = append(payload, symBytes...) // Send as the same data type you use for symbols - logtrace.Info(ctx, "RPC StoreBatch (first-batch): metadata + symbols", logtrace.Fields{"taskID": taskID, "metadata_count": len(metadataFiles), "symbols_in_batch": len(symBytes), "payload_total": len(payload)}) + logtrace.Info(ctx, "RPC StoreBatch (first-batch): metadata + symbols", logtrace.Fields{"taskID": taskID, "metadata_count": len(metadataFiles), "symbols_in_batch": len(symBytes), "payload_total": len(payload)}) bctx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout) bctx = cm.WithTaskID(bctx, taskID) err = p.p2p.StoreBatch(bctx, payload, storage.P2PDataRaptorQSymbol, taskID) @@ -172,7 +172,7 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action if err != nil { return totalSymbols, totalAvailable, fmt.Errorf("p2p store batch (first): %w", err) } - logtrace.Info(ctx, "RPC StoreBatch completed (first-batch)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symBytes)}) + logtrace.Info(ctx, "RPC StoreBatch completed (first-batch)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symBytes)}) totalSymbols += len(symBytes) // No per-RPC metrics propagated from p2p @@ -185,11 +185,11 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action } // Log remaining symbols in directory after deletion if rem, werr := walkSymbolTree(symbolsDir); werr == nil { - if left := len(rem); left > 0 { - logtrace.Info(ctx, "symbols left after first-batch", logtrace.Fields{"taskID": taskID, "left": left}) - } else { - logtrace.Info(ctx, "Symbols directory is empty after first-batch", logtrace.Fields{"taskID": taskID, "dir": symbolsDir}) - } + if left := len(rem); left > 0 { + logtrace.Info(ctx, "symbols left after first-batch", logtrace.Fields{"taskID": taskID, "left": left}) + } else { + logtrace.Info(ctx, "Symbols directory is empty after first-batch", logtrace.Fields{"taskID": taskID, "dir": symbolsDir}) + } } firstBatchProcessed = true @@ -209,19 +209,19 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action if totalAvailable > 0 { achievedPct = (float64(totalSymbols) / float64(totalAvailable)) * 100.0 } - logtrace.Info(ctx, "first-pass achieved coverage (symbols)", - logtrace.Fields{"achieved_symbols": totalSymbols, "achieved_percent": achievedPct}) + logtrace.Info(ctx, "first-pass achieved coverage (symbols)", + logtrace.Fields{"achieved_symbols": totalSymbols, "achieved_percent": achievedPct}) if err := p.rqStore.UpdateIsFirstBatchStored(actionID); err != nil { return totalSymbols, totalAvailable, fmt.Errorf("update first-batch flag: %w", err) } // Final remaining count after first pass flagged if rem, werr := walkSymbolTree(symbolsDir); werr == nil { - if left := len(rem); left > 0 { - logtrace.Info(ctx, "first-pass completed; symbols remaining on disk", logtrace.Fields{"taskID": taskID, "left": left, "dir": symbolsDir}) - } else { - logtrace.Info(ctx, "first-pass completed; directory empty", logtrace.Fields{"taskID": taskID, "dir": symbolsDir}) - } + if left := len(rem); left > 0 { + logtrace.Info(ctx, "first-pass completed; symbols remaining on disk", logtrace.Fields{"taskID": taskID, "left": left, "dir": symbolsDir}) + } else { + logtrace.Info(ctx, "first-pass completed; directory empty", logtrace.Fields{"taskID": taskID, "dir": symbolsDir}) + } } return totalSymbols, totalAvailable, nil @@ -257,7 +257,7 @@ func walkSymbolTree(root string) ([]string, error) { // storeSymbolsInP2P loads a batch of symbols and stores them via P2P. // Returns (ratePct, requests, count, error) where `count` is the number of symbols in this batch. func (c *p2pImpl) storeSymbolsInP2P(ctx context.Context, taskID, root string, fileKeys []string) (int, error) { - logtrace.Info(ctx, "loading batch symbols", logtrace.Fields{"taskID": taskID, "count": len(fileKeys)}) + logtrace.Debug(ctx, "loading batch symbols", logtrace.Fields{"taskID": taskID, "count": len(fileKeys)}) symbols, err := utils.LoadSymbols(root, fileKeys) if err != nil { @@ -268,11 +268,11 @@ func (c *p2pImpl) storeSymbolsInP2P(ctx context.Context, taskID, root string, fi symCtx = cm.WithTaskID(symCtx, taskID) defer cancel() - logtrace.Info(ctx, "RPC StoreBatch (symbols batch)", logtrace.Fields{"taskID": taskID, "symbols_in_batch": len(symbols)}) + logtrace.Info(ctx, "RPC StoreBatch (symbols batch)", logtrace.Fields{"taskID": taskID, "symbols_in_batch": len(symbols)}) if err := c.p2p.StoreBatch(symCtx, symbols, storage.P2PDataRaptorQSymbol, taskID); err != nil { return len(symbols), fmt.Errorf("p2p store batch: %w", err) } - logtrace.Info(ctx, "RPC StoreBatch completed (symbols batch)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symbols)}) + logtrace.Info(ctx, "RPC StoreBatch completed (symbols batch)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symbols)}) if err := utils.DeleteSymbols(ctx, root, fileKeys); err != nil { return len(symbols), fmt.Errorf("delete symbols: %w", err) @@ -282,7 +282,7 @@ func (c *p2pImpl) storeSymbolsInP2P(ctx context.Context, taskID, root string, fi if rem, werr := walkSymbolTree(root); werr == nil { left = len(rem) } - logtrace.Info(ctx, "deleted batch symbols", logtrace.Fields{"taskID": taskID, "count": len(symbols), "symbols_left_on_disk": left}) + logtrace.Debug(ctx, "deleted batch symbols", logtrace.Fields{"taskID": taskID, "count": len(symbols), "symbols_left_on_disk": left}) // No per-RPC metrics propagated from p2p return len(symbols), nil diff --git a/supernode/services/cascade/download.go b/supernode/services/cascade/download.go index ede254a4..f923028a 100644 --- a/supernode/services/cascade/download.go +++ b/supernode/services/cascade/download.go @@ -48,7 +48,7 @@ func (task *CascadeRegistrationTask) Download( send func(resp *DownloadResponse) error, ) (err error) { fields := logtrace.Fields{logtrace.FieldMethod: "Download", logtrace.FieldRequest: req} - logtrace.Info(ctx, "Cascade download request received", fields) + logtrace.Info(ctx, "Cascade download request received", fields) // Ensure task status is finalized regardless of outcome defer func() { @@ -66,7 +66,7 @@ func (task *CascadeRegistrationTask) Download( fields[logtrace.FieldError] = err.Error() return task.wrapErr(ctx, "failed to get action", err, fields) } - logtrace.Info(ctx, "Action retrieved", fields) + logtrace.Info(ctx, "Action retrieved", fields) task.streamDownloadEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", "", send) if actionDetails.GetAction().State != actiontypes.ActionStateDone { @@ -76,14 +76,14 @@ func (task *CascadeRegistrationTask) Download( fields[logtrace.FieldActionState] = actionDetails.GetAction().State return task.wrapErr(ctx, "action not finalized yet", err, fields) } - logtrace.Info(ctx, "Action state validated", fields) + logtrace.Info(ctx, "Action state validated", fields) metadata, err := task.decodeCascadeMetadata(ctx, actionDetails.GetAction().Metadata, fields) if err != nil { fields[logtrace.FieldError] = err.Error() return task.wrapErr(ctx, "error decoding cascade metadata", err, fields) } - logtrace.Info(ctx, "Cascade metadata decoded", fields) + logtrace.Info(ctx, "Cascade metadata decoded", fields) task.streamDownloadEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", "", send) // Enforce download authorization based on metadata.Public @@ -99,15 +99,15 @@ func (task *CascadeRegistrationTask) Download( fields[logtrace.FieldError] = err.Error() return task.wrapErr(ctx, "failed to verify download signature", err, fields) } - logtrace.Info(ctx, "Download signature verified for private cascade", fields) - } else { - logtrace.Info(ctx, "Public cascade: skipping download signature verification", fields) - } + logtrace.Info(ctx, "Download signature verified for private cascade", fields) + } else { + logtrace.Info(ctx, "Public cascade: skipping download signature verification", fields) + } // Notify: network retrieval phase begins - task.streamDownloadEvent(SupernodeEventTypeNetworkRetrieveStarted, "Network retrieval started", "", "", send) + task.streamDownloadEvent(SupernodeEventTypeNetworkRetrieveStarted, "Network retrieval started", "", "", send) - logtrace.Info(ctx, "Starting network retrieval of artefacts", logtrace.Fields{logtrace.FieldActionID: actionDetails.GetAction().ActionID}) + logtrace.Info(ctx, "Starting network retrieval of artefacts", logtrace.Fields{logtrace.FieldActionID: actionDetails.GetAction().ActionID}) filePath, tmpDir, err := task.downloadArtifacts(ctx, actionDetails.GetAction().ActionID, metadata, fields, send) if err != nil { fields[logtrace.FieldError] = err.Error() @@ -119,7 +119,7 @@ func (task *CascadeRegistrationTask) Download( } return task.wrapErr(ctx, "failed to download artifacts", err, fields) } - logtrace.Info(ctx, "File reconstructed and hash verified", fields) + logtrace.Debug(ctx, "File reconstructed and hash verified", fields) // Notify: decode completed, file ready on disk task.streamDownloadEvent(SupernodeEventTypeDecodeCompleted, "Decode completed", filePath, tmpDir, send) @@ -127,7 +127,7 @@ func (task *CascadeRegistrationTask) Download( } func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, actionID string, metadata actiontypes.CascadeMetadata, fields logtrace.Fields, send func(resp *DownloadResponse) error) (string, string, error) { - logtrace.Info(ctx, "started downloading the artifacts", fields) + logtrace.Debug(ctx, "started downloading the artifacts", fields) var ( layout codec.Layout @@ -138,13 +138,13 @@ func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, acti for _, indexID := range metadata.RqIdsIds { iStart := time.Now() - logtrace.Info(ctx, "RPC Retrieve index file", logtrace.Fields{"index_id": indexID}) + logtrace.Debug(ctx, "RPC Retrieve index file", logtrace.Fields{"index_id": indexID}) indexFile, err := task.P2PClient.Retrieve(ctx, indexID) if err != nil || len(indexFile) == 0 { logtrace.Warn(ctx, "Retrieve index file failed or empty", logtrace.Fields{"index_id": indexID, logtrace.FieldError: fmt.Sprintf("%v", err)}) continue } - logtrace.Info(ctx, "Retrieve index file completed", logtrace.Fields{"index_id": indexID, "bytes": len(indexFile), "ms": time.Since(iStart).Milliseconds()}) + logtrace.Debug(ctx, "Retrieve index file completed", logtrace.Fields{"index_id": indexID, "bytes": len(indexFile), "ms": time.Since(iStart).Milliseconds()}) // Parse index file to get layout IDs indexData, err := task.parseIndexFile(indexFile) @@ -164,7 +164,7 @@ func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, acti layoutDecodeMS = decMS if len(layout.Blocks) > 0 { - logtrace.Info(ctx, "layout file retrieved via index", logtrace.Fields{"index_id": indexID, "attempts": layoutAttempts, "net_ms": layoutFetchMS, "decode_ms": layoutDecodeMS}) + logtrace.Debug(ctx, "layout file retrieved via index", logtrace.Fields{"index_id": indexID, "attempts": layoutAttempts, "net_ms": layoutFetchMS, "decode_ms": layoutDecodeMS}) break } } @@ -214,7 +214,7 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( if targetRequiredCount < 1 && totalSymbols > 0 { targetRequiredCount = 1 } - logtrace.Info(ctx, "Retrieving target-required symbols for decode", logtrace.Fields{"total_symbols": totalSymbols, "target_required_percent": targetRequiredPercent, "target_required_count": targetRequiredCount}) + logtrace.Info(ctx, "Retrieving target-required symbols for decode", logtrace.Fields{"total_symbols": totalSymbols, "target_required_percent": targetRequiredPercent, "target_required_count": targetRequiredCount}) if !task.config.MetricsDisabled { cm.StartRetrieveCapture(actionID) @@ -232,7 +232,7 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( reqCount = totalSymbols } rStart := time.Now() - logtrace.Info(ctx, "RPC BatchRetrieve symbols", logtrace.Fields{"action_id": actionID, "requested": reqCount, "total_candidates": totalSymbols}) + logtrace.Info(ctx, "RPC BatchRetrieve symbols", logtrace.Fields{"action_id": actionID, "requested": reqCount, "total_candidates": totalSymbols}) symbols, err := task.P2PClient.BatchRetrieve(ctxRetrieve, allSymbols, reqCount, actionID) if err != nil { fields[logtrace.FieldError] = err.Error() @@ -240,12 +240,12 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( return "", "", fmt.Errorf("batch retrieve symbols: %w", err) } retrieveMS := time.Since(retrieveStart).Milliseconds() - logtrace.Info(ctx, "RPC BatchRetrieve completed", logtrace.Fields{"action_id": actionID, "received": len(symbols), "ms": time.Since(rStart).Milliseconds()}) + logtrace.Info(ctx, "RPC BatchRetrieve completed", logtrace.Fields{"action_id": actionID, "received": len(symbols), "ms": time.Since(rStart).Milliseconds()}) // Measure decode duration decodeStart := time.Now() dStart := time.Now() - logtrace.Info(ctx, "RQ Decode start", logtrace.Fields{"action_id": actionID}) + logtrace.Info(ctx, "RQ Decode start", logtrace.Fields{"action_id": actionID}) decodeInfo, err := task.RQ.Decode(ctx, adaptors.DecodeRequest{ ActionID: actionID, Symbols: symbols, @@ -257,7 +257,7 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( return "", "", fmt.Errorf("decode symbols using RaptorQ: %w", err) } decodeMS := time.Since(decodeStart).Milliseconds() - logtrace.Info(ctx, "RQ Decode completed", logtrace.Fields{"action_id": actionID, "ms": time.Since(dStart).Milliseconds(), "tmp_dir": decodeInfo.DecodeTmpDir, "file_path": decodeInfo.FilePath}) + logtrace.Info(ctx, "RQ Decode completed", logtrace.Fields{"action_id": actionID, "ms": time.Since(dStart).Milliseconds(), "tmp_dir": decodeInfo.DecodeTmpDir, "file_path": decodeInfo.FilePath}) // Set minimal retrieve summary and emit event strictly from internal collector if !task.config.MetricsDisabled { @@ -298,13 +298,13 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( if decodeInfo.DecodeTmpDir != "" { if set, derr := utils.ReadDirFilenames(decodeInfo.DecodeTmpDir); derr == nil { if left := len(set); left > 0 { - logtrace.Info(ctx, "Decode tmp directory has files remaining", logtrace.Fields{"dir": decodeInfo.DecodeTmpDir, "left": left}) + logtrace.Debug(ctx, "Decode tmp directory has files remaining", logtrace.Fields{"dir": decodeInfo.DecodeTmpDir, "left": left}) } else { - logtrace.Info(ctx, "Decode tmp directory is empty", logtrace.Fields{"dir": decodeInfo.DecodeTmpDir}) + logtrace.Debug(ctx, "Decode tmp directory is empty", logtrace.Fields{"dir": decodeInfo.DecodeTmpDir}) } } } - logtrace.Info(ctx, "File successfully restored and hash verified", fields) + logtrace.Info(ctx, "File successfully restored and hash verified", fields) return decodeInfo.FilePath, decodeInfo.DecodeTmpDir, nil } @@ -346,7 +346,7 @@ func (task *CascadeRegistrationTask) retrieveLayoutFromIndex(ctx context.Context for _, layoutID := range indexData.LayoutIDs { attempts++ t0 := time.Now() - logtrace.Info(ctx, "RPC Retrieve layout file", logtrace.Fields{"layout_id": layoutID, "attempt": attempts}) + logtrace.Debug(ctx, "RPC Retrieve layout file", logtrace.Fields{"layout_id": layoutID, "attempt": attempts}) layoutFile, err := task.P2PClient.Retrieve(ctx, layoutID) took := time.Since(t0).Milliseconds() totalFetchMS += took @@ -365,7 +365,7 @@ func (task *CascadeRegistrationTask) retrieveLayoutFromIndex(ctx context.Context } if len(layout.Blocks) > 0 { - logtrace.Info(ctx, "Layout file retrieved and parsed", logtrace.Fields{"layout_id": layoutID, "attempt": attempts, "net_ms": took, "decode_ms": decMS}) + logtrace.Debug(ctx, "Layout file retrieved and parsed", logtrace.Fields{"layout_id": layoutID, "attempt": attempts, "net_ms": took, "decode_ms": decMS}) return layout, totalFetchMS, totalDecodeMS, attempts, nil } } @@ -381,12 +381,12 @@ func (task *CascadeRegistrationTask) CleanupDownload(ctx context.Context, dirPat } // For now, we use tmp directory path as provided by decoder - logtrace.Info(ctx, "Cleanup download directory", logtrace.Fields{"dir": dirPath}) + logtrace.Debug(ctx, "Cleanup download directory", logtrace.Fields{"dir": dirPath}) if err := os.RemoveAll(dirPath); err != nil { logtrace.Warn(ctx, "Cleanup download directory failed", logtrace.Fields{"dir": dirPath, logtrace.FieldError: err.Error()}) return errors.Errorf("failed to delete download directory: %s, :%s", dirPath, err.Error()) } - logtrace.Info(ctx, "Cleanup download directory completed", logtrace.Fields{"dir": dirPath}) + logtrace.Debug(ctx, "Cleanup download directory completed", logtrace.Fields{"dir": dirPath}) return nil } diff --git a/supernode/services/cascade/helper.go b/supernode/services/cascade/helper.go index e6197b41..bdb87b6e 100644 --- a/supernode/services/cascade/helper.go +++ b/supernode/services/cascade/helper.go @@ -36,7 +36,7 @@ func (task *CascadeRegistrationTask) fetchAction(ctx context.Context, actionID s if res.GetAction().ActionID == "" { return nil, task.wrapErr(ctx, "action not found", errors.New(""), f) } - logtrace.Info(ctx, "action has been retrieved", f) + logtrace.Debug(ctx, "action has been retrieved", f) return res.GetAction(), nil } @@ -46,7 +46,7 @@ func (task *CascadeRegistrationTask) ensureIsTopSupernode(ctx context.Context, b if err != nil { return task.wrapErr(ctx, "failed to get top SNs", err, f) } - logtrace.Info(ctx, "Fetched Top Supernodes", f) + logtrace.Debug(ctx, "Fetched Top Supernodes", f) if !supernode.Exists(top.Supernodes, task.config.SupernodeAccountAddress) { // Build information about supernodes for better error context @@ -54,7 +54,7 @@ func (task *CascadeRegistrationTask) ensureIsTopSupernode(ctx context.Context, b for i, sn := range top.Supernodes { addresses[i] = sn.SupernodeAccount } - logtrace.Info(ctx, "Supernode not in top list", logtrace.Fields{ + logtrace.Debug(ctx, "Supernode not in top list", logtrace.Fields{ "currentAddress": task.config.SupernodeAccountAddress, "topSupernodes": addresses, }) @@ -78,7 +78,7 @@ func (task *CascadeRegistrationTask) verifyDataHash(ctx context.Context, dh []by if string(b64) != expected { return task.wrapErr(ctx, "data hash doesn't match", errors.New(""), f) } - logtrace.Info(ctx, "request data-hash has been matched with the action data-hash", f) + logtrace.Debug(ctx, "request data-hash has been matched with the action data-hash", f) return nil } @@ -110,7 +110,7 @@ func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context. if err := task.LumeraClient.Verify(ctx, creator, []byte(indexFileB64), creatorSigBytes); err != nil { return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify creator signature", err, f) } - logtrace.Info(ctx, "creator signature successfully verified", f) + logtrace.Debug(ctx, "creator signature successfully verified", f) // Decode index file to get the layout signature indexFile, err := decodeIndexFile(indexFileB64) @@ -132,7 +132,7 @@ func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context. if err := task.LumeraClient.Verify(ctx, creator, layoutB64, layoutSigBytes); err != nil { return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify layout signature", err, f) } - logtrace.Info(ctx, "layout signature successfully verified", f) + logtrace.Debug(ctx, "layout signature successfully verified", f) return encodedMeta, indexFile.LayoutSignature, nil } @@ -187,14 +187,18 @@ func (task *CascadeRegistrationTask) storeArtefacts(ctx context.Context, actionI for k, v := range f { lf[k] = v } - logtrace.Info(ctx, "storeArtefacts invoked", lf) - - return task.P2P.StoreArtefacts(ctx, adaptors.StoreArtefactsRequest{ - IDFiles: idFiles, - SymbolsDir: symbolsDir, - TaskID: task.ID(), - ActionID: actionID, - }, f) + logtrace.Info(ctx, "storeArtefacts invoked", lf) + + if err := task.P2P.StoreArtefacts(ctx, adaptors.StoreArtefactsRequest{ + IDFiles: idFiles, + SymbolsDir: symbolsDir, + TaskID: task.ID(), + ActionID: actionID, + }, f); err != nil { + // Log and wrap to ensure a proper error line and context + return task.wrapErr(ctx, "failed to store artefacts", err, lf) + } + return nil } func (task *CascadeRegistrationTask) wrapErr(ctx context.Context, msg string, err error, f logtrace.Fields) error { @@ -228,7 +232,7 @@ func (task *CascadeRegistrationTask) emitArtefactsStored( b, _ := json.MarshalIndent(payload, "", " ") msg := string(b) fields["metrics_json"] = msg - logtrace.Info(ctx, "artefacts have been stored", fields) + logtrace.Debug(ctx, "artefacts have been stored", fields) task.streamEvent(SupernodeEventTypeArtefactsStored, msg, "", send) // No central state to clear; adaptor returns calls inline } @@ -293,7 +297,7 @@ func (task *CascadeRegistrationTask) verifyActionFee(ctx context.Context, action requiredFee := sdk.NewCoin("ulume", math.NewInt(amount)) // Log the calculated fee - logtrace.Info(ctx, "calculated required fee", logtrace.Fields{ + logtrace.Debug(ctx, "calculated required fee", logtrace.Fields{ "fee": requiredFee.String(), "dataBytes": dataSize, }) @@ -391,6 +395,6 @@ func (task *CascadeRegistrationTask) VerifyDownloadSignature(ctx context.Context return task.wrapErr(ctx, "failed to verify download signature", err, fields) } - logtrace.Info(ctx, "download signature successfully verified", fields) + logtrace.Debug(ctx, "download signature successfully verified", fields) return nil } diff --git a/supernode/services/cascade/register.go b/supernode/services/cascade/register.go index 8d6cfd07..faac4309 100644 --- a/supernode/services/cascade/register.go +++ b/supernode/services/cascade/register.go @@ -116,6 +116,8 @@ func (task *CascadeRegistrationTask) Register( if err != nil { return err } + // Promote to Info and include symbols directory for quick visibility + fields["symbols_dir"] = encResp.SymbolsDir logtrace.Info(ctx, "Input encoded", fields) task.streamEvent(SupernodeEventTypeInputEncoded, "Input encoded", "", send) @@ -134,6 +136,8 @@ func (task *CascadeRegistrationTask) Register( if err != nil { return err } + // Include count of ID files generated for visibility + fields["id_files_count"] = len(rqidResp.RedundantMetadataFiles) logtrace.Info(ctx, "RQID files generated", fields) task.streamEvent(SupernodeEventTypeRQIDsGenerated, "RQID files generated", "", send) @@ -147,7 +151,7 @@ func (task *CascadeRegistrationTask) Register( /* 10. Simulate finalize to avoid storing artefacts if it would fail ---------- */ if _, err := task.LumeraClient.SimulateFinalizeAction(ctx, action.ActionID, rqidResp.RQIDs); err != nil { fields[logtrace.FieldError] = err.Error() - logtrace.Debug(ctx, "Finalize simulation failed", fields) + logtrace.Info(ctx, "Finalize simulation failed", fields) // Emit explicit simulation failure event for client visibility task.streamEvent(SupernodeEventTypeFinalizeSimulationFailed, "Finalize simulation failed", "", send) return task.wrapErr(ctx, "finalize action simulation failed", err, fields) @@ -170,7 +174,7 @@ func (task *CascadeRegistrationTask) Register( resp, err := task.LumeraClient.FinalizeAction(ctx, action.ActionID, rqidResp.RQIDs) if err != nil { fields[logtrace.FieldError] = err.Error() - logtrace.Debug(ctx, "Finalize action error", fields) + logtrace.Info(ctx, "Finalize action error", fields) return task.wrapErr(ctx, "failed to finalize action", err, fields) } txHash := resp.TxResponse.TxHash From ee927a05825f676d60c23f5063ec8b7fe4175c8a Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Thu, 2 Oct 2025 18:00:17 +0500 Subject: [PATCH 20/27] High Signal Logs --- p2p/kademlia/dht.go | 16 +++--- p2p/kademlia/message.go | 3 ++ p2p/kademlia/network.go | 59 ++++++++++++++-------- p2p/kademlia/rq_symbols.go | 16 +++--- pkg/logtrace/log.go | 5 ++ supernode/services/cascade/adaptors/p2p.go | 57 ++++++++++----------- supernode/services/cascade/download.go | 36 +++++++------ supernode/services/cascade/helper.go | 24 ++++----- supernode/services/cascade/register.go | 32 +++++++----- 9 files changed, 142 insertions(+), 106 deletions(-) diff --git a/p2p/kademlia/dht.go b/p2p/kademlia/dht.go index 67a2982b..9a6204a5 100644 --- a/p2p/kademlia/dht.go +++ b/p2p/kademlia/dht.go @@ -817,7 +817,7 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, wg.Wait() netFound := int(atomic.LoadInt32(&networkFound)) - logtrace.Info(ctx, "DHT BatchRetrieve complete", logtrace.Fields{"txid": txID, "found_local": foundLocalCount, "found_network": netFound, "required": required, "ms": time.Since(start).Milliseconds()}) + logtrace.Info(ctx, "dht: batch retrieve summary", logtrace.Fields{"txid": txID, "found_local": foundLocalCount, "found_network": netFound, "required": required, "ms": time.Since(start).Milliseconds()}) // Record batch retrieve stats for internal DHT snapshot window s.metrics.RecordBatchRetrieve(len(keys), int(required), int(foundLocalCount), netFound, time.Since(start)) // Also feed retrieve counts into the per-task collector for stream events @@ -1072,12 +1072,12 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, func (s *DHT) doBatchGetValuesCall(ctx context.Context, node *Node, requestKeys map[string]KeyValWithClosest) (map[string]KeyValWithClosest, error) { request := s.newMessage(BatchGetValues, node, &BatchGetValuesRequest{Data: requestKeys}) - logtrace.Info(ctx, "RPC BatchGetValues send", logtrace.Fields{"node": node.String(), "keys": len(requestKeys)}) + logtrace.Info(ctx, "dht: batch get send", logtrace.Fields{"node": node.String(), "keys": len(requestKeys)}) response, err := s.network.Call(ctx, request, false) if err != nil { return nil, fmt.Errorf("network call request %s failed: %w", request.String(), err) } - logtrace.Info(ctx, "RPC BatchGetValues completed", logtrace.Fields{"node": node.String()}) + logtrace.Info(ctx, "dht: batch get ok", logtrace.Fields{"node": node.String()}) resp, ok := response.Data.(*BatchGetValuesResponse) if !ok { @@ -1685,7 +1685,7 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i knownNodes := make(map[string]*Node) hashes := make([][]byte, len(values)) - logtrace.Info(ctx, "Iterate batch store begin", logtrace.Fields{ + logtrace.Info(ctx, "dht: batch store start", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": id, "keys": len(values), @@ -1773,14 +1773,14 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i successRate := float64(successful) / float64(requests) * 100 if successRate >= minimumDataStoreSuccessRate { - logtrace.Info(ctx, "Successful store operations", logtrace.Fields{ + logtrace.Info(ctx, "dht: batch store ok", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": id, "success_rate": fmt.Sprintf("%.2f%%", successRate), }) return nil } else { - logtrace.Info(ctx, "Failed to achieve desired success rate", logtrace.Fields{ + logtrace.Info(ctx, "dht: batch store below threshold", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": id, "success_rate": fmt.Sprintf("%.2f%%", successRate), @@ -1837,7 +1837,7 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ totalBytes += len(values[idx]) } - logtrace.Info(ctx, "RPC BatchStoreData send", logtrace.Fields{logtrace.FieldModule: "dht", "node": receiver.String(), "keys": len(toStore), "size_mb": utils.BytesIntToMB(totalBytes)}) + logtrace.Info(ctx, "dht: batch store RPC send", logtrace.Fields{logtrace.FieldModule: "dht", "node": receiver.String(), "keys": len(toStore), "size_mb": utils.BytesIntToMB(totalBytes)}) // Skip empty payloads: avoid sending empty store RPCs and do not record no-op metrics. if len(toStore) == 0 { @@ -1863,7 +1863,7 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ return } - logtrace.Info(ctx, "RPC BatchStoreData completed", logtrace.Fields{logtrace.FieldModule: "p2p", "node": receiver.String(), "keys": len(toStore), "ms": dur}) + logtrace.Info(ctx, "dht: batch store RPC ok", logtrace.Fields{logtrace.FieldModule: "p2p", "node": receiver.String(), "keys": len(toStore), "ms": dur}) responses <- &MessageWithError{Message: response, KeysCount: len(toStore), Receiver: receiver, DurationMS: dur} } }(node, key) diff --git a/p2p/kademlia/message.go b/p2p/kademlia/message.go index 0baef37c..a6483b2d 100644 --- a/p2p/kademlia/message.go +++ b/p2p/kademlia/message.go @@ -66,6 +66,9 @@ type Message struct { Receiver *Node // the receiver node MessageType int // the message type Data interface{} // the real data for the request + // CorrelationID carries a best-effort trace identifier so that logs + // across nodes can be joined in external systems. + CorrelationID string } func (m *Message) String() string { diff --git a/p2p/kademlia/network.go b/p2p/kademlia/network.go index 2088fcc5..d03e10cd 100644 --- a/p2p/kademlia/network.go +++ b/p2p/kademlia/network.go @@ -196,7 +196,7 @@ func (s *Network) handleFindValue(ctx context.Context, message *Message) (res [] request, ok := message.Data.(*FindValueRequest) if !ok { err := errors.New("invalid FindValueRequest") - return s.generateResponseMessage(FindValue, message.Sender, ResultFailed, err.Error()) + return s.generateResponseMessage(ctx, FindValue, message.Sender, ResultFailed, err.Error()) } // add the sender to queries hash table @@ -251,7 +251,7 @@ func (s *Network) handleStoreData(ctx context.Context, message *Message) (res [] request, ok := message.Data.(*StoreDataRequest) if !ok { err := errors.New("invalid StoreDataRequest") - return s.generateResponseMessage(StoreData, message.Sender, ResultFailed, err.Error()) + return s.generateResponseMessage(ctx, StoreData, message.Sender, ResultFailed, err.Error()) } logtrace.Debug(ctx, "Handle store data", logtrace.Fields{logtrace.FieldModule: "p2p", "message": message.String()}) @@ -267,7 +267,7 @@ func (s *Network) handleStoreData(ctx context.Context, message *Message) (res [] // store the data to queries storage if err := s.dht.store.Store(ctx, key, request.Data, request.Type, false); err != nil { err = errors.Errorf("store the data: %w", err) - return s.generateResponseMessage(StoreData, message.Sender, ResultFailed, err.Error()) + return s.generateResponseMessage(ctx, StoreData, message.Sender, ResultFailed, err.Error()) } } @@ -292,13 +292,13 @@ func (s *Network) handleReplicate(ctx context.Context, message *Message) (res [] request, ok := message.Data.(*ReplicateDataRequest) if !ok { err := errors.New("invalid ReplicateDataRequest") - return s.generateResponseMessage(Replicate, message.Sender, ResultFailed, err.Error()) + return s.generateResponseMessage(ctx, Replicate, message.Sender, ResultFailed, err.Error()) } logtrace.Debug(ctx, "Handle replicate data", logtrace.Fields{logtrace.FieldModule: "p2p", "message": message.String()}) if err := s.handleReplicateRequest(ctx, request, message.Sender.ID, message.Sender.IP, message.Sender.Port); err != nil { - return s.generateResponseMessage(Replicate, message.Sender, ResultFailed, err.Error()) + return s.generateResponseMessage(ctx, Replicate, message.Sender, ResultFailed, err.Error()) } response := &ReplicateDataResponse{ @@ -347,7 +347,7 @@ func (s *Network) handleReplicateRequest(ctx context.Context, req *ReplicateData return nil } -func (s *Network) handlePing(_ context.Context, message *Message) ([]byte, error) { +func (s *Network) handlePing(ctx context.Context, message *Message) ([]byte, error) { // new a response message resMsg := s.dht.newMessage(Ping, message.Sender, nil) @@ -412,6 +412,11 @@ func (s *Network) handleConn(ctx context.Context, rawConn net.Conn) { }) return } + // stitch correlation id into context for downstream handler logs + if request != nil && strings.TrimSpace(request.CorrelationID) != "" { + ctx = logtrace.CtxWithCorrelationID(ctx, request.CorrelationID) + } + reqID := uuid.New().String() mt := request.MessageType @@ -605,6 +610,13 @@ func (s *Network) Call(ctx context.Context, request *Message, isLong bool) (*Mes logtrace.Debug(ctx, fmt.Sprintf("RPC %s start remote=%s timeout_ms=%d", msgName(request.MessageType), remoteAddr, int64(timeout/time.Millisecond)), startFields) } + // Attach correlation id only for high‑signal messages (store/retrieve batches) + if isHighSignalMsg(request.MessageType) { + if cid := logtrace.CorrelationIDFromContext(ctx); cid != "unknown" { + request.CorrelationID = cid + } + } + // try get from pool s.connPoolMtx.Lock() conn, err := s.connPool.Get(remoteAddr) @@ -873,7 +885,7 @@ func (s *Network) handleBatchFindValues(ctx context.Context, message *Message, r if err := s.sem.Acquire(ctxWithTimeout, 1); err != nil { logtrace.Error(ctx, "Failed to acquire semaphore within 1 minute", logtrace.Fields{logtrace.FieldModule: "p2p"}) // failed to acquire semaphore within 1 minute - return s.generateResponseMessage(BatchFindValues, message.Sender, ResultFailed, errorBusy) + return s.generateResponseMessage(ctx, BatchFindValues, message.Sender, ResultFailed, errorBusy) } logtrace.Debug(ctx, "Semaphore acquired after waiting", logtrace.Fields{logtrace.FieldModule: "p2p"}) } @@ -899,18 +911,18 @@ func (s *Network) handleBatchFindValues(ctx context.Context, message *Message, r err = errors.New("unknown error") } - res, _ = s.generateResponseMessage(BatchFindValues, message.Sender, ResultFailed, err.Error()) + res, _ = s.generateResponseMessage(ctx, BatchFindValues, message.Sender, ResultFailed, err.Error()) } }() request, ok := message.Data.(*BatchFindValuesRequest) if !ok { - return s.generateResponseMessage(BatchFindValues, message.Sender, ResultFailed, "invalid BatchFindValueRequest") + return s.generateResponseMessage(ctx, BatchFindValues, message.Sender, ResultFailed, "invalid BatchFindValueRequest") } isDone, data, err := s.handleBatchFindValuesRequest(ctx, request, message.Sender.IP, reqID) if err != nil { - return s.generateResponseMessage(BatchFindValues, message.Sender, ResultFailed, err.Error()) + return s.generateResponseMessage(ctx, BatchFindValues, message.Sender, ResultFailed, err.Error()) } response := &BatchFindValuesResponse{ @@ -922,6 +934,7 @@ func (s *Network) handleBatchFindValues(ctx context.Context, message *Message, r } resMsg := s.dht.newMessage(BatchFindValues, message.Sender, response) + resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx) return s.encodeMesage(resMsg) } @@ -958,7 +971,7 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, Error: err.Error(), }) appended = true - return s.generateResponseMessage(BatchGetValues, message.Sender, ResultFailed, err.Error()) + return s.generateResponseMessage(ctx, BatchGetValues, message.Sender, ResultFailed, err.Error()) } logtrace.Debug(ctx, "Batch get values request received", logtrace.Fields{ @@ -988,10 +1001,10 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, Error: err.Error(), }) appended = true - return s.generateResponseMessage(BatchGetValues, message.Sender, ResultFailed, err.Error()) + return s.generateResponseMessage(ctx, BatchGetValues, message.Sender, ResultFailed, err.Error()) } - logtrace.Info(ctx, "Batch get values request processed", logtrace.Fields{ + logtrace.Info(ctx, "network: batch get values ok", logtrace.Fields{ logtrace.FieldModule: "p2p", "requested-keys": len(keys), "found": count, @@ -1016,6 +1029,7 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, // new a response message resMsg := s.dht.newMessage(BatchGetValues, message.Sender, response) + resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx) s.appendRetrieveEntry(message.Sender.IP, RecentBatchRetrieveEntry{ TimeUnix: time.Now().UTC().Unix(), SenderID: string(message.Sender.ID), @@ -1229,11 +1243,11 @@ func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (r Error: err.Error(), }) appended = true - return s.generateResponseMessage(BatchStoreData, message.Sender, ResultFailed, err.Error()) + return s.generateResponseMessage(ctx, BatchStoreData, message.Sender, ResultFailed, err.Error()) } // log.P2P().WithContext(ctx).Info("handle batch store data request received") - logtrace.Info(ctx, "Handle batch store data request received", logtrace.Fields{ + logtrace.Info(ctx, "network: batch store recv", logtrace.Fields{ logtrace.FieldModule: "p2p", "sender": message.Sender.String(), "keys": len(request.Data), @@ -1254,7 +1268,7 @@ func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (r Error: err.Error(), }) appended = true - return s.generateResponseMessage(BatchStoreData, message.Sender, ResultFailed, err.Error()) + return s.generateResponseMessage(ctx, BatchStoreData, message.Sender, ResultFailed, err.Error()) } response := &StoreDataResponse{ @@ -1263,7 +1277,7 @@ func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (r }, } // log.P2P().WithContext(ctx).Info("handle batch store data request processed") - logtrace.Info(ctx, "Handle batch store data request processed", logtrace.Fields{ + logtrace.Info(ctx, "network: batch store ok", logtrace.Fields{ logtrace.FieldModule: "p2p", "sender": message.Sender.String(), "keys": len(request.Data), @@ -1271,6 +1285,7 @@ func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (r // new a response message resMsg := s.dht.newMessage(BatchStoreData, message.Sender, response) + resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx) s.appendStoreEntry(message.Sender.IP, RecentBatchStoreEntry{ TimeUnix: time.Now().UTC().Unix(), SenderID: string(message.Sender.ID), @@ -1294,7 +1309,7 @@ func (s *Network) handleBatchFindNode(ctx context.Context, message *Message) (re request, ok := message.Data.(*BatchFindNodeRequest) if !ok { err := errors.New("invalid FindNodeRequest") - return s.generateResponseMessage(BatchFindNode, message.Sender, ResultFailed, err.Error()) + return s.generateResponseMessage(ctx, BatchFindNode, message.Sender, ResultFailed, err.Error()) } // add the sender to queries hash table @@ -1329,7 +1344,7 @@ func (s *Network) handleBatchFindNode(ctx context.Context, message *Message) (re return s.encodeMesage(resMsg) } -func (s *Network) generateResponseMessage(messageType int, receiver *Node, result ResultType, errMsg string) ([]byte, error) { +func (s *Network) generateResponseMessage(ctx context.Context, messageType int, receiver *Node, result ResultType, errMsg string) ([]byte, error) { responseStatus := ResponseStatus{ Result: result, ErrMsg: errMsg, @@ -1357,6 +1372,10 @@ func (s *Network) generateResponseMessage(messageType int, receiver *Node, resul } resMsg := s.dht.newMessage(messageType, receiver, response) + // propagate correlation id on responses too, but only for high‑signal messages + if isHighSignalMsg(messageType) { + resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx) + } return s.encodeMesage(resMsg) } @@ -1378,7 +1397,7 @@ func (s *Network) handlePanic(ctx context.Context, sender *Node, messageType int err = errors.New("unknown error") } - if res, err := s.generateResponseMessage(messageType, sender, ResultFailed, err.Error()); err != nil { + if res, err := s.generateResponseMessage(ctx, messageType, sender, ResultFailed, err.Error()); err != nil { // log.WithContext(ctx).Errorf("Error generating response message: %v", err) logtrace.Error(ctx, "Error generating response message", logtrace.Fields{ logtrace.FieldModule: "p2p", diff --git a/p2p/kademlia/rq_symbols.go b/p2p/kademlia/rq_symbols.go index 98e9c2ad..f1956dc4 100644 --- a/p2p/kademlia/rq_symbols.go +++ b/p2p/kademlia/rq_symbols.go @@ -40,26 +40,28 @@ func (s *DHT) storeSymbols(ctx context.Context) error { // Minimal visibility: how many dirs to process this tick if len(dirs) > 0 { - logtrace.Info(ctx, "rq_symbols: todo directories", logtrace.Fields{"count": len(dirs)}) + logtrace.Info(ctx, "worker: symbols todo", logtrace.Fields{"count": len(dirs)}) } for _, dir := range dirs { + // Use txid as correlation id so worker logs join with register flow + wctx := logtrace.CtxWithCorrelationID(ctx, dir.TXID) // Pre-count symbols in this directory preCount := -1 if set, rerr := utils.ReadDirFilenames(dir.Dir); rerr == nil { preCount = len(set) } start := time.Now() - logtrace.Info(ctx, "rq_symbols: processing dir", logtrace.Fields{"dir": dir.Dir, "txid": dir.TXID, "symbols": preCount}) - if err := s.scanDirAndStoreSymbols(ctx, dir.Dir, dir.TXID); err != nil { - logtrace.Error(ctx, "scan and store symbols", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err}) + logtrace.Info(wctx, "worker: dir start", logtrace.Fields{"dir": dir.Dir, "txid": dir.TXID, "symbols": preCount}) + if err := s.scanDirAndStoreSymbols(wctx, dir.Dir, dir.TXID); err != nil { + logtrace.Error(wctx, "scan and store symbols", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err}) } // Post-count remaining symbols remCount := -1 if set, rerr := utils.ReadDirFilenames(dir.Dir); rerr == nil { remCount = len(set) } - logtrace.Info(ctx, "rq_symbols: processed dir", logtrace.Fields{"dir": dir.Dir, "txid": dir.TXID, "remaining": remCount, "ms": time.Since(start).Milliseconds()}) + logtrace.Info(wctx, "worker: dir done", logtrace.Fields{"dir": dir.Dir, "txid": dir.TXID, "remaining": remCount, "ms": time.Since(start).Milliseconds()}) } return nil @@ -108,7 +110,7 @@ func (s *DHT) scanDirAndStoreSymbols(ctx context.Context, dir, txid string) erro // --------------------------------------------------------------------- func (s *DHT) storeSymbolsInP2P(ctx context.Context, dir string, keys []string) error { // Per-batch visibility for background worker - logtrace.Info(ctx, "rq_symbols: worker StoreBatch send", logtrace.Fields{"dir": dir, "keys": len(keys)}) + logtrace.Info(ctx, "worker: batch send", logtrace.Fields{"dir": dir, "keys": len(keys)}) start := time.Now() loaded, err := utils.LoadSymbols(dir, keys) @@ -120,7 +122,7 @@ func (s *DHT) storeSymbolsInP2P(ctx context.Context, dir string, keys []string) return fmt.Errorf("p2p store batch: %w", err) } - logtrace.Info(ctx, "rq_symbols: worker StoreBatch completed", logtrace.Fields{"dir": dir, "keys": len(loaded), "ms": time.Since(start).Milliseconds()}) + logtrace.Info(ctx, "worker: batch ok", logtrace.Fields{"dir": dir, "keys": len(loaded), "ms": time.Since(start).Milliseconds()}) if err := utils.DeleteSymbols(ctx, dir, keys); err != nil { return fmt.Errorf("delete symbols: %w", err) diff --git a/pkg/logtrace/log.go b/pkg/logtrace/log.go index 0c19d9f1..2a35fe38 100644 --- a/pkg/logtrace/log.go +++ b/pkg/logtrace/log.go @@ -86,6 +86,11 @@ func CtxWithCorrelationID(ctx context.Context, correlationID string) context.Con return context.WithValue(ctx, CorrelationIDKey, correlationID) } +// CorrelationIDFromContext returns the correlation ID from context or "unknown". +func CorrelationIDFromContext(ctx context.Context) string { + return extractCorrelationID(ctx) +} + // extractCorrelationID retrieves the correlation ID from context func extractCorrelationID(ctx context.Context) string { if correlationID, ok := ctx.Value(CorrelationIDKey).(string); ok { diff --git a/supernode/services/cascade/adaptors/p2p.go b/supernode/services/cascade/adaptors/p2p.go index 98e0a8a6..7b5a51e4 100644 --- a/supernode/services/cascade/adaptors/p2p.go +++ b/supernode/services/cascade/adaptors/p2p.go @@ -57,7 +57,7 @@ type StoreArtefactsRequest struct { } func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) error { - logtrace.Info(ctx, "StoreArtefacts start", logtrace.Fields{"taskID": req.TaskID, "actionID": req.ActionID, "id_files": len(req.IDFiles), "symbols_dir": req.SymbolsDir}) + logtrace.Info(ctx, "store: p2p start", logtrace.Fields{"taskID": req.TaskID, "actionID": req.ActionID, "id_files": len(req.IDFiles), "symbols_dir": req.SymbolsDir}) // Optionally enable per-node store RPC capture for this task if !p.metricsDisabled { @@ -78,10 +78,10 @@ func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, remaining = len(keys) } } - logtrace.Info(ctx, "StoreArtefacts completed", logtrace.Fields{"taskID": req.TaskID, "symbols_first_pass": firstPassSymbols, "symbols_total_available": totalSymbols, "id_files_count": len(req.IDFiles), "symbols_left_on_disk": remaining, "ms": dur}) - if remaining == 0 { - logtrace.Info(ctx, "Symbols directory is empty after first-pass", logtrace.Fields{"taskID": req.TaskID, "dir": req.SymbolsDir}) - } + logtrace.Info(ctx, "store: first-pass complete", logtrace.Fields{"taskID": req.TaskID, "symbols_first_pass": firstPassSymbols, "symbols_total_available": totalSymbols, "id_files_count": len(req.IDFiles), "symbols_left_on_disk": remaining, "ms": dur}) + if remaining == 0 { + logtrace.Info(ctx, "store: dir empty after first-pass", logtrace.Fields{"taskID": req.TaskID, "dir": req.SymbolsDir}) + } // Record store summary for later event emission cm.SetStoreSummary(req.TaskID, firstPassSymbols, totalSymbols, len(req.IDFiles), dur) return nil @@ -111,12 +111,12 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action if targetCount < 1 && totalAvailable > 0 { targetCount = 1 } - logtrace.Info(ctx, "Symbols discovered in directory", logtrace.Fields{"total_symbols": totalAvailable, "dir": symbolsDir}) - logtrace.Info(ctx, "first-pass target coverage (symbols)", logtrace.Fields{ - "total_symbols": totalAvailable, - "target_percent": storeSymbolsPercent, - "target_count": targetCount, - }) + logtrace.Info(ctx, "store: symbols discovered", logtrace.Fields{"total_symbols": totalAvailable, "dir": symbolsDir}) + logtrace.Info(ctx, "store: target coverage", logtrace.Fields{ + "total_symbols": totalAvailable, + "target_percent": storeSymbolsPercent, + "target_count": targetCount, + }) /* down-sample if we exceed the “big directory” threshold ------------- */ if len(keys) > loadSymbolsBatchSize { @@ -127,8 +127,8 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action } sort.Strings(keys) // deterministic order inside the sample } - logtrace.Info(ctx, "first-pass selected symbols", logtrace.Fields{"selected": len(keys), "of_total": totalAvailable, "dir": symbolsDir}) - logtrace.Info(ctx, "storing RaptorQ symbols", logtrace.Fields{"count": len(keys)}) + logtrace.Info(ctx, "store: selected symbols", logtrace.Fields{"selected": len(keys), "of_total": totalAvailable, "dir": symbolsDir}) + logtrace.Info(ctx, "store: sending symbols", logtrace.Fields{"count": len(keys)}) /* stream in fixed-size batches -------------------------------------- */ @@ -164,7 +164,7 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action payload = append(payload, symBytes...) // Send as the same data type you use for symbols - logtrace.Info(ctx, "RPC StoreBatch (first-batch): metadata + symbols", logtrace.Fields{"taskID": taskID, "metadata_count": len(metadataFiles), "symbols_in_batch": len(symBytes), "payload_total": len(payload)}) + logtrace.Info(ctx, "store: batch send (first)", logtrace.Fields{"taskID": taskID, "metadata_count": len(metadataFiles), "symbols_in_batch": len(symBytes), "payload_total": len(payload)}) bctx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout) bctx = cm.WithTaskID(bctx, taskID) err = p.p2p.StoreBatch(bctx, payload, storage.P2PDataRaptorQSymbol, taskID) @@ -172,7 +172,7 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action if err != nil { return totalSymbols, totalAvailable, fmt.Errorf("p2p store batch (first): %w", err) } - logtrace.Info(ctx, "RPC StoreBatch completed (first-batch)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symBytes)}) + logtrace.Info(ctx, "store: batch ok (first)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symBytes)}) totalSymbols += len(symBytes) // No per-RPC metrics propagated from p2p @@ -185,11 +185,11 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action } // Log remaining symbols in directory after deletion if rem, werr := walkSymbolTree(symbolsDir); werr == nil { - if left := len(rem); left > 0 { - logtrace.Info(ctx, "symbols left after first-batch", logtrace.Fields{"taskID": taskID, "left": left}) - } else { - logtrace.Info(ctx, "Symbols directory is empty after first-batch", logtrace.Fields{"taskID": taskID, "dir": symbolsDir}) - } + if left := len(rem); left > 0 { + logtrace.Info(ctx, "store: remaining after first batch", logtrace.Fields{"taskID": taskID, "left": left}) + } else { + logtrace.Info(ctx, "store: dir empty after first batch", logtrace.Fields{"taskID": taskID, "dir": symbolsDir}) + } } firstBatchProcessed = true @@ -209,19 +209,18 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action if totalAvailable > 0 { achievedPct = (float64(totalSymbols) / float64(totalAvailable)) * 100.0 } - logtrace.Info(ctx, "first-pass achieved coverage (symbols)", - logtrace.Fields{"achieved_symbols": totalSymbols, "achieved_percent": achievedPct}) + logtrace.Info(ctx, "store: coverage", logtrace.Fields{"achieved_symbols": totalSymbols, "achieved_percent": achievedPct}) if err := p.rqStore.UpdateIsFirstBatchStored(actionID); err != nil { return totalSymbols, totalAvailable, fmt.Errorf("update first-batch flag: %w", err) } // Final remaining count after first pass flagged if rem, werr := walkSymbolTree(symbolsDir); werr == nil { - if left := len(rem); left > 0 { - logtrace.Info(ctx, "first-pass completed; symbols remaining on disk", logtrace.Fields{"taskID": taskID, "left": left, "dir": symbolsDir}) - } else { - logtrace.Info(ctx, "first-pass completed; directory empty", logtrace.Fields{"taskID": taskID, "dir": symbolsDir}) - } + if left := len(rem); left > 0 { + logtrace.Info(ctx, "store: remaining after first-pass", logtrace.Fields{"taskID": taskID, "left": left, "dir": symbolsDir}) + } else { + logtrace.Info(ctx, "store: directory empty after first-pass", logtrace.Fields{"taskID": taskID, "dir": symbolsDir}) + } } return totalSymbols, totalAvailable, nil @@ -268,11 +267,11 @@ func (c *p2pImpl) storeSymbolsInP2P(ctx context.Context, taskID, root string, fi symCtx = cm.WithTaskID(symCtx, taskID) defer cancel() - logtrace.Info(ctx, "RPC StoreBatch (symbols batch)", logtrace.Fields{"taskID": taskID, "symbols_in_batch": len(symbols)}) + logtrace.Info(ctx, "store: batch send (symbols)", logtrace.Fields{"taskID": taskID, "symbols_in_batch": len(symbols)}) if err := c.p2p.StoreBatch(symCtx, symbols, storage.P2PDataRaptorQSymbol, taskID); err != nil { return len(symbols), fmt.Errorf("p2p store batch: %w", err) } - logtrace.Info(ctx, "RPC StoreBatch completed (symbols batch)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symbols)}) + logtrace.Info(ctx, "store: batch ok (symbols)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symbols)}) if err := utils.DeleteSymbols(ctx, root, fileKeys); err != nil { return len(symbols), fmt.Errorf("delete symbols: %w", err) diff --git a/supernode/services/cascade/download.go b/supernode/services/cascade/download.go index f923028a..43d2e3b8 100644 --- a/supernode/services/cascade/download.go +++ b/supernode/services/cascade/download.go @@ -47,8 +47,12 @@ func (task *CascadeRegistrationTask) Download( req *DownloadRequest, send func(resp *DownloadResponse) error, ) (err error) { + // Seed correlation ID from actionID for downstream logs + if req != nil && req.ActionID != "" { + ctx = logtrace.CtxWithCorrelationID(ctx, req.ActionID) + } fields := logtrace.Fields{logtrace.FieldMethod: "Download", logtrace.FieldRequest: req} - logtrace.Info(ctx, "Cascade download request received", fields) + logtrace.Info(ctx, "download: request", fields) // Ensure task status is finalized regardless of outcome defer func() { @@ -66,7 +70,7 @@ func (task *CascadeRegistrationTask) Download( fields[logtrace.FieldError] = err.Error() return task.wrapErr(ctx, "failed to get action", err, fields) } - logtrace.Info(ctx, "Action retrieved", fields) + logtrace.Info(ctx, "download: action fetched", fields) task.streamDownloadEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", "", send) if actionDetails.GetAction().State != actiontypes.ActionStateDone { @@ -76,14 +80,14 @@ func (task *CascadeRegistrationTask) Download( fields[logtrace.FieldActionState] = actionDetails.GetAction().State return task.wrapErr(ctx, "action not finalized yet", err, fields) } - logtrace.Info(ctx, "Action state validated", fields) + logtrace.Info(ctx, "download: action state ok", fields) metadata, err := task.decodeCascadeMetadata(ctx, actionDetails.GetAction().Metadata, fields) if err != nil { fields[logtrace.FieldError] = err.Error() return task.wrapErr(ctx, "error decoding cascade metadata", err, fields) } - logtrace.Info(ctx, "Cascade metadata decoded", fields) + logtrace.Info(ctx, "download: metadata decoded", fields) task.streamDownloadEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", "", send) // Enforce download authorization based on metadata.Public @@ -99,15 +103,15 @@ func (task *CascadeRegistrationTask) Download( fields[logtrace.FieldError] = err.Error() return task.wrapErr(ctx, "failed to verify download signature", err, fields) } - logtrace.Info(ctx, "Download signature verified for private cascade", fields) - } else { - logtrace.Info(ctx, "Public cascade: skipping download signature verification", fields) - } + logtrace.Info(ctx, "download: signature verified", fields) + } else { + logtrace.Info(ctx, "download: public cascade (no signature)", fields) + } // Notify: network retrieval phase begins - task.streamDownloadEvent(SupernodeEventTypeNetworkRetrieveStarted, "Network retrieval started", "", "", send) + task.streamDownloadEvent(SupernodeEventTypeNetworkRetrieveStarted, "Network retrieval started", "", "", send) - logtrace.Info(ctx, "Starting network retrieval of artefacts", logtrace.Fields{logtrace.FieldActionID: actionDetails.GetAction().ActionID}) + logtrace.Info(ctx, "download: network retrieval start", logtrace.Fields{logtrace.FieldActionID: actionDetails.GetAction().ActionID}) filePath, tmpDir, err := task.downloadArtifacts(ctx, actionDetails.GetAction().ActionID, metadata, fields, send) if err != nil { fields[logtrace.FieldError] = err.Error() @@ -214,7 +218,7 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( if targetRequiredCount < 1 && totalSymbols > 0 { targetRequiredCount = 1 } - logtrace.Info(ctx, "Retrieving target-required symbols for decode", logtrace.Fields{"total_symbols": totalSymbols, "target_required_percent": targetRequiredPercent, "target_required_count": targetRequiredCount}) + logtrace.Info(ctx, "download: plan symbols", logtrace.Fields{"total_symbols": totalSymbols, "target_required_percent": targetRequiredPercent, "target_required_count": targetRequiredCount}) if !task.config.MetricsDisabled { cm.StartRetrieveCapture(actionID) @@ -232,7 +236,7 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( reqCount = totalSymbols } rStart := time.Now() - logtrace.Info(ctx, "RPC BatchRetrieve symbols", logtrace.Fields{"action_id": actionID, "requested": reqCount, "total_candidates": totalSymbols}) + logtrace.Info(ctx, "download: batch retrieve start", logtrace.Fields{"action_id": actionID, "requested": reqCount, "total_candidates": totalSymbols}) symbols, err := task.P2PClient.BatchRetrieve(ctxRetrieve, allSymbols, reqCount, actionID) if err != nil { fields[logtrace.FieldError] = err.Error() @@ -240,12 +244,12 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( return "", "", fmt.Errorf("batch retrieve symbols: %w", err) } retrieveMS := time.Since(retrieveStart).Milliseconds() - logtrace.Info(ctx, "RPC BatchRetrieve completed", logtrace.Fields{"action_id": actionID, "received": len(symbols), "ms": time.Since(rStart).Milliseconds()}) + logtrace.Info(ctx, "download: batch retrieve ok", logtrace.Fields{"action_id": actionID, "received": len(symbols), "ms": time.Since(rStart).Milliseconds()}) // Measure decode duration decodeStart := time.Now() dStart := time.Now() - logtrace.Info(ctx, "RQ Decode start", logtrace.Fields{"action_id": actionID}) + logtrace.Info(ctx, "download: decode start", logtrace.Fields{"action_id": actionID}) decodeInfo, err := task.RQ.Decode(ctx, adaptors.DecodeRequest{ ActionID: actionID, Symbols: symbols, @@ -257,7 +261,7 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( return "", "", fmt.Errorf("decode symbols using RaptorQ: %w", err) } decodeMS := time.Since(decodeStart).Milliseconds() - logtrace.Info(ctx, "RQ Decode completed", logtrace.Fields{"action_id": actionID, "ms": time.Since(dStart).Milliseconds(), "tmp_dir": decodeInfo.DecodeTmpDir, "file_path": decodeInfo.FilePath}) + logtrace.Info(ctx, "download: decode ok", logtrace.Fields{"action_id": actionID, "ms": time.Since(dStart).Milliseconds(), "tmp_dir": decodeInfo.DecodeTmpDir, "file_path": decodeInfo.FilePath}) // Set minimal retrieve summary and emit event strictly from internal collector if !task.config.MetricsDisabled { @@ -304,7 +308,7 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( } } } - logtrace.Info(ctx, "File successfully restored and hash verified", fields) + logtrace.Info(ctx, "download: file verified", fields) return decodeInfo.FilePath, decodeInfo.DecodeTmpDir, nil } diff --git a/supernode/services/cascade/helper.go b/supernode/services/cascade/helper.go index bdb87b6e..fea1d47e 100644 --- a/supernode/services/cascade/helper.go +++ b/supernode/services/cascade/helper.go @@ -187,18 +187,18 @@ func (task *CascadeRegistrationTask) storeArtefacts(ctx context.Context, actionI for k, v := range f { lf[k] = v } - logtrace.Info(ctx, "storeArtefacts invoked", lf) - - if err := task.P2P.StoreArtefacts(ctx, adaptors.StoreArtefactsRequest{ - IDFiles: idFiles, - SymbolsDir: symbolsDir, - TaskID: task.ID(), - ActionID: actionID, - }, f); err != nil { - // Log and wrap to ensure a proper error line and context - return task.wrapErr(ctx, "failed to store artefacts", err, lf) - } - return nil + logtrace.Info(ctx, "store: first-pass begin", lf) + + if err := task.P2P.StoreArtefacts(ctx, adaptors.StoreArtefactsRequest{ + IDFiles: idFiles, + SymbolsDir: symbolsDir, + TaskID: task.ID(), + ActionID: actionID, + }, f); err != nil { + // Log and wrap to ensure a proper error line and context + return task.wrapErr(ctx, "failed to store artefacts", err, lf) + } + return nil } func (task *CascadeRegistrationTask) wrapErr(ctx context.Context, msg string, err error, f logtrace.Fields) error { diff --git a/supernode/services/cascade/register.go b/supernode/services/cascade/register.go index faac4309..3afac227 100644 --- a/supernode/services/cascade/register.go +++ b/supernode/services/cascade/register.go @@ -44,9 +44,13 @@ func (task *CascadeRegistrationTask) Register( req *RegisterRequest, send func(resp *RegisterResponse) error, ) (err error) { + // Seed correlation ID from actionID so logs across layers can be joined + if req != nil && req.ActionID != "" { + ctx = logtrace.CtxWithCorrelationID(ctx, req.ActionID) + } fields := logtrace.Fields{logtrace.FieldMethod: "Register", logtrace.FieldRequest: req} - logtrace.Info(ctx, "Cascade registration request received", fields) + logtrace.Info(ctx, "register: request", fields) // Ensure task status and resources are finalized regardless of outcome defer func() { @@ -78,14 +82,14 @@ func (task *CascadeRegistrationTask) Register( fields[logtrace.FieldCreator] = action.Creator fields[logtrace.FieldStatus] = action.State fields[logtrace.FieldPrice] = action.Price - logtrace.Info(ctx, "Action retrieved", fields) + logtrace.Info(ctx, "register: action fetched", fields) task.streamEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", send) /* 2. Verify action fee -------------------------------------------------------- */ if err := task.verifyActionFee(ctx, action, req.DataSize, fields); err != nil { return err } - logtrace.Info(ctx, "Action fee verified", fields) + logtrace.Info(ctx, "register: fee verified", fields) task.streamEvent(SupernodeEventTypeActionFeeVerified, "Action fee verified", "", send) /* 3. Ensure this super-node is eligible -------------------------------------- */ @@ -93,7 +97,7 @@ func (task *CascadeRegistrationTask) Register( if err := task.ensureIsTopSupernode(ctx, uint64(action.BlockHeight), fields); err != nil { return err } - logtrace.Info(ctx, "Top supernode eligibility confirmed", fields) + logtrace.Info(ctx, "register: top supernode confirmed", fields) task.streamEvent(SupernodeEventTypeTopSupernodeCheckPassed, "Top supernode eligibility confirmed", "", send) /* 4. Decode cascade metadata -------------------------------------------------- */ @@ -101,14 +105,14 @@ func (task *CascadeRegistrationTask) Register( if err != nil { return err } - logtrace.Info(ctx, "Cascade metadata decoded", fields) + logtrace.Info(ctx, "register: metadata decoded", fields) task.streamEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", send) /* 5. Verify data hash --------------------------------------------------------- */ if err := task.verifyDataHash(ctx, req.DataHash, cascadeMeta.DataHash, fields); err != nil { return err } - logtrace.Info(ctx, "Data hash verified", fields) + logtrace.Info(ctx, "register: data hash matched", fields) task.streamEvent(SupernodeEventTypeDataHashVerified, "Data hash verified", "", send) /* 6. Encode the raw data ------------------------------------------------------ */ @@ -118,7 +122,7 @@ func (task *CascadeRegistrationTask) Register( } // Promote to Info and include symbols directory for quick visibility fields["symbols_dir"] = encResp.SymbolsDir - logtrace.Info(ctx, "Input encoded", fields) + logtrace.Info(ctx, "register: input encoded", fields) task.streamEvent(SupernodeEventTypeInputEncoded, "Input encoded", "", send) /* 7. Signature verification + layout decode ---------------------------------- */ @@ -128,7 +132,7 @@ func (task *CascadeRegistrationTask) Register( if err != nil { return err } - logtrace.Info(ctx, "Signature verified", fields) + logtrace.Info(ctx, "register: signature verified", fields) task.streamEvent(SupernodeEventTypeSignatureVerified, "Signature verified", "", send) /* 8. Generate RQ-ID files ----------------------------------------------------- */ @@ -138,25 +142,25 @@ func (task *CascadeRegistrationTask) Register( } // Include count of ID files generated for visibility fields["id_files_count"] = len(rqidResp.RedundantMetadataFiles) - logtrace.Info(ctx, "RQID files generated", fields) + logtrace.Info(ctx, "register: rqid files generated", fields) task.streamEvent(SupernodeEventTypeRQIDsGenerated, "RQID files generated", "", send) /* 9. Consistency checks ------------------------------------------------------- */ if err := verifyIDs(layout, encResp.Metadata); err != nil { return task.wrapErr(ctx, "failed to verify IDs", err, fields) } - logtrace.Info(ctx, "RQIDs verified", fields) + logtrace.Info(ctx, "register: rqids validated", fields) task.streamEvent(SupernodeEventTypeRqIDsVerified, "RQIDs verified", "", send) /* 10. Simulate finalize to avoid storing artefacts if it would fail ---------- */ if _, err := task.LumeraClient.SimulateFinalizeAction(ctx, action.ActionID, rqidResp.RQIDs); err != nil { fields[logtrace.FieldError] = err.Error() - logtrace.Info(ctx, "Finalize simulation failed", fields) + logtrace.Info(ctx, "register: finalize simulation failed", fields) // Emit explicit simulation failure event for client visibility task.streamEvent(SupernodeEventTypeFinalizeSimulationFailed, "Finalize simulation failed", "", send) return task.wrapErr(ctx, "finalize action simulation failed", err, fields) } - logtrace.Info(ctx, "Finalize simulation passed", fields) + logtrace.Info(ctx, "register: finalize simulation passed", fields) // Transmit as a standard event so SDK can propagate it (dedicated type) task.streamEvent(SupernodeEventTypeFinalizeSimulated, "Finalize simulation passed", "", send) @@ -174,12 +178,12 @@ func (task *CascadeRegistrationTask) Register( resp, err := task.LumeraClient.FinalizeAction(ctx, action.ActionID, rqidResp.RQIDs) if err != nil { fields[logtrace.FieldError] = err.Error() - logtrace.Info(ctx, "Finalize action error", fields) + logtrace.Info(ctx, "register: finalize action error", fields) return task.wrapErr(ctx, "failed to finalize action", err, fields) } txHash := resp.TxResponse.TxHash fields[logtrace.FieldTxHash] = txHash - logtrace.Info(ctx, "Action finalized", fields) + logtrace.Info(ctx, "register: action finalized", fields) task.streamEvent(SupernodeEventTypeActionFinalized, "Action finalized", txHash, send) return nil From 18c2a70ec45d9d875d98b74986f2225e8bc5d45f Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Thu, 2 Oct 2025 21:28:25 +0500 Subject: [PATCH 21/27] enhance tracing --- p2p/kademlia/dht.go | 41 ++++++++--- p2p/kademlia/message.go | 2 + p2p/kademlia/network.go | 98 +++++++++++++++----------- p2p/kademlia/rq_symbols.go | 40 +++++------ pkg/logtrace/datadog.go | 12 ++-- pkg/logtrace/fields.go | 2 + pkg/logtrace/log.go | 19 +++++ sdk/adapters/lumera/adapter.go | 19 +++++ sdk/task/task.go | 17 +---- supernode/services/cascade/download.go | 3 +- supernode/services/cascade/helper.go | 2 + supernode/services/cascade/register.go | 3 +- 12 files changed, 164 insertions(+), 94 deletions(-) diff --git a/p2p/kademlia/dht.go b/p2p/kademlia/dht.go index 9a6204a5..0bca2c45 100644 --- a/p2p/kademlia/dht.go +++ b/p2p/kademlia/dht.go @@ -817,7 +817,11 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, wg.Wait() netFound := int(atomic.LoadInt32(&networkFound)) - logtrace.Info(ctx, "dht: batch retrieve summary", logtrace.Fields{"txid": txID, "found_local": foundLocalCount, "found_network": netFound, "required": required, "ms": time.Since(start).Milliseconds()}) +{ + f := logtrace.Fields{"txid": txID, "found_local": foundLocalCount, "found_network": netFound, "required": required, "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } + logtrace.Info(ctx, "dht: batch retrieve summary", f) +} // Record batch retrieve stats for internal DHT snapshot window s.metrics.RecordBatchRetrieve(len(keys), int(required), int(foundLocalCount), netFound, time.Since(start)) // Also feed retrieve counts into the per-task collector for stream events @@ -1072,12 +1076,20 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, func (s *DHT) doBatchGetValuesCall(ctx context.Context, node *Node, requestKeys map[string]KeyValWithClosest) (map[string]KeyValWithClosest, error) { request := s.newMessage(BatchGetValues, node, &BatchGetValuesRequest{Data: requestKeys}) - logtrace.Info(ctx, "dht: batch get send", logtrace.Fields{"node": node.String(), "keys": len(requestKeys)}) +{ + f := logtrace.Fields{"node": node.String(), "keys": len(requestKeys), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } + logtrace.Info(ctx, "dht: batch get send", f) +} response, err := s.network.Call(ctx, request, false) if err != nil { return nil, fmt.Errorf("network call request %s failed: %w", request.String(), err) } - logtrace.Info(ctx, "dht: batch get ok", logtrace.Fields{"node": node.String()}) +{ + f := logtrace.Fields{"node": node.String(), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } + logtrace.Info(ctx, "dht: batch get ok", f) +} resp, ok := response.Data.(*BatchGetValuesResponse) if !ok { @@ -1685,12 +1697,11 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i knownNodes := make(map[string]*Node) hashes := make([][]byte, len(values)) - logtrace.Info(ctx, "dht: batch store start", logtrace.Fields{ - logtrace.FieldModule: "dht", - "task_id": id, - "keys": len(values), - "len_nodes": len(s.ht.nodes()), - }) +{ + f := logtrace.Fields{logtrace.FieldModule: "dht", "task_id": id, "keys": len(values), "len_nodes": len(s.ht.nodes()), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } + logtrace.Info(ctx, "dht: batch store start", f) +} for i := 0; i < len(values); i++ { target, _ := utils.Blake3Hash(values[i]) hashes[i] = target @@ -1837,7 +1848,11 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ totalBytes += len(values[idx]) } - logtrace.Info(ctx, "dht: batch store RPC send", logtrace.Fields{logtrace.FieldModule: "dht", "node": receiver.String(), "keys": len(toStore), "size_mb": utils.BytesIntToMB(totalBytes)}) + { + f := logtrace.Fields{logtrace.FieldModule: "dht", "node": receiver.String(), "keys": len(toStore), "size_mb": utils.BytesIntToMB(totalBytes), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } + logtrace.Info(ctx, "dht: batch store RPC send", f) + } // Skip empty payloads: avoid sending empty store RPCs and do not record no-op metrics. if len(toStore) == 0 { @@ -1863,7 +1878,11 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ return } - logtrace.Info(ctx, "dht: batch store RPC ok", logtrace.Fields{logtrace.FieldModule: "p2p", "node": receiver.String(), "keys": len(toStore), "ms": dur}) + { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "node": receiver.String(), "keys": len(toStore), "ms": dur, logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } + logtrace.Info(ctx, "dht: batch store RPC ok", f) + } responses <- &MessageWithError{Message: response, KeysCount: len(toStore), Receiver: receiver, DurationMS: dur} } }(node, key) diff --git a/p2p/kademlia/message.go b/p2p/kademlia/message.go index a6483b2d..4f778d1f 100644 --- a/p2p/kademlia/message.go +++ b/p2p/kademlia/message.go @@ -69,6 +69,8 @@ type Message struct { // CorrelationID carries a best-effort trace identifier so that logs // across nodes can be joined in external systems. CorrelationID string + // Origin carries the phase that produced this message (first_pass | worker | download) + Origin string } func (m *Message) String() string { diff --git a/p2p/kademlia/network.go b/p2p/kademlia/network.go index d03e10cd..dc0552fc 100644 --- a/p2p/kademlia/network.go +++ b/p2p/kademlia/network.go @@ -412,10 +412,15 @@ func (s *Network) handleConn(ctx context.Context, rawConn net.Conn) { }) return } - // stitch correlation id into context for downstream handler logs - if request != nil && strings.TrimSpace(request.CorrelationID) != "" { - ctx = logtrace.CtxWithCorrelationID(ctx, request.CorrelationID) - } + // stitch correlation + origin into context for downstream handler logs + if request != nil { + if s := strings.TrimSpace(request.CorrelationID); s != "" { + ctx = logtrace.CtxWithCorrelationID(ctx, s) + } + if o := strings.TrimSpace(request.Origin); o != "" { + ctx = logtrace.CtxWithOrigin(ctx, o) + } + } reqID := uuid.New().String() mt := request.MessageType @@ -598,12 +603,17 @@ func (s *Network) Call(ctx context.Context, request *Message, isLong bool) (*Mes idStr := string(request.Receiver.ID) remoteAddr := fmt.Sprintf("%s@%s:%d", idStr, strings.TrimSpace(request.Receiver.IP), request.Receiver.Port) // Log raw RPC start (reduce noise: Info only for high-signal messages) - startFields := logtrace.Fields{ - logtrace.FieldModule: "p2p", - "remote": remoteAddr, - "message": msgName(request.MessageType), - "timeout_ms": int64(timeout / time.Millisecond), - } + startFields := logtrace.Fields{ + logtrace.FieldModule: "p2p", + "remote": remoteAddr, + "message": msgName(request.MessageType), + "timeout_ms": int64(timeout / time.Millisecond), + } + // Tag role/origin for filtering + startFields[logtrace.FieldRole] = "client" + if o := logtrace.OriginFromContext(ctx); o != "" { + startFields[logtrace.FieldOrigin] = o + } if isHighSignalMsg(request.MessageType) { logtrace.Info(ctx, fmt.Sprintf("RPC %s start remote=%s timeout_ms=%d", msgName(request.MessageType), remoteAddr, int64(timeout/time.Millisecond)), startFields) } else { @@ -611,11 +621,14 @@ func (s *Network) Call(ctx context.Context, request *Message, isLong bool) (*Mes } // Attach correlation id only for high‑signal messages (store/retrieve batches) - if isHighSignalMsg(request.MessageType) { - if cid := logtrace.CorrelationIDFromContext(ctx); cid != "unknown" { - request.CorrelationID = cid - } - } + if isHighSignalMsg(request.MessageType) { + if cid := logtrace.CorrelationIDFromContext(ctx); cid != "unknown" { + request.CorrelationID = cid + } + if o := logtrace.OriginFromContext(ctx); o != "" { + request.Origin = o + } + } // try get from pool s.connPoolMtx.Lock() @@ -743,11 +756,13 @@ func (s *Network) rpcOnceWrapper(ctx context.Context, cw *connWrapper, remoteAdd return nil, errors.Errorf("conn read: %w", e) } // Single-line completion for successful outbound RPC - if isHighSignalMsg(msgType) { - logtrace.Info(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds()}) - } else { - logtrace.Debug(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds()}) - } + if isHighSignalMsg(msgType) { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } + logtrace.Info(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), f) + } else { + logtrace.Debug(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"}) + } return r, nil } } @@ -833,11 +848,13 @@ Retry: s.dropFromPool(remoteAddr, conn) return nil, errors.Errorf("conn read: %w", err) } - if isHighSignalMsg(msgType) { - logtrace.Info(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds()}) - } else { - logtrace.Debug(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds()}) - } + if isHighSignalMsg(msgType) { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } + logtrace.Info(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), f) + } else { + logtrace.Debug(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"}) + } return resp, nil } @@ -1004,12 +1021,11 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, return s.generateResponseMessage(ctx, BatchGetValues, message.Sender, ResultFailed, err.Error()) } - logtrace.Info(ctx, "network: batch get values ok", logtrace.Fields{ - logtrace.FieldModule: "p2p", - "requested-keys": len(keys), - "found": count, - "sender": message.Sender.String(), - }) + { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "requested-keys": len(keys), "found": count, "sender": message.Sender.String(), logtrace.FieldRole: "server"} + if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } + logtrace.Info(ctx, "network: batch get values ok", f) + } for i, key := range keys { val := KeyValWithClosest{ @@ -1247,11 +1263,11 @@ func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (r } // log.P2P().WithContext(ctx).Info("handle batch store data request received") - logtrace.Info(ctx, "network: batch store recv", logtrace.Fields{ - logtrace.FieldModule: "p2p", - "sender": message.Sender.String(), - "keys": len(request.Data), - }) + { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "sender": message.Sender.String(), "keys": len(request.Data), logtrace.FieldRole: "server"} + if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } + logtrace.Info(ctx, "network: batch store recv", f) + } // add the sender to queries hash table s.dht.addNode(ctx, message.Sender) @@ -1277,11 +1293,11 @@ func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (r }, } // log.P2P().WithContext(ctx).Info("handle batch store data request processed") - logtrace.Info(ctx, "network: batch store ok", logtrace.Fields{ - logtrace.FieldModule: "p2p", - "sender": message.Sender.String(), - "keys": len(request.Data), - }) + { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "sender": message.Sender.String(), "keys": len(request.Data), logtrace.FieldRole: "server"} + if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } + logtrace.Info(ctx, "network: batch store ok", f) + } // new a response message resMsg := s.dht.newMessage(BatchStoreData, message.Sender, response) diff --git a/p2p/kademlia/rq_symbols.go b/p2p/kademlia/rq_symbols.go index f1956dc4..85367dec 100644 --- a/p2p/kademlia/rq_symbols.go +++ b/p2p/kademlia/rq_symbols.go @@ -53,9 +53,9 @@ func (s *DHT) storeSymbols(ctx context.Context) error { } start := time.Now() logtrace.Info(wctx, "worker: dir start", logtrace.Fields{"dir": dir.Dir, "txid": dir.TXID, "symbols": preCount}) - if err := s.scanDirAndStoreSymbols(wctx, dir.Dir, dir.TXID); err != nil { - logtrace.Error(wctx, "scan and store symbols", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err}) - } + if err := s.scanDirAndStoreSymbols(wctx, dir.Dir, dir.TXID); err != nil { + logtrace.Error(wctx, "scan and store symbols", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err}) + } // Post-count remaining symbols remCount := -1 if set, rerr := utils.ReadDirFilenames(dir.Dir); rerr == nil { @@ -86,17 +86,17 @@ func (s *DHT) scanDirAndStoreSymbols(ctx context.Context, dir, txid string) erro logtrace.Info(ctx, "p2p-worker: storing ALL RaptorQ symbols", logtrace.Fields{"txid": txid, "dir": dir, "total": len(keys)}) - // Batch-flush at loadSymbolsBatchSize - for start := 0; start < len(keys); { - end := start + loadSymbolsBatchSize - if end > len(keys) { - end = len(keys) - } - if err := s.storeSymbolsInP2P(ctx, dir, keys[start:end]); err != nil { - return err - } - start = end - } + // Batch-flush at loadSymbolsBatchSize + for start := 0; start < len(keys); { + end := start + loadSymbolsBatchSize + if end > len(keys) { + end = len(keys) + } + if err := s.storeSymbolsInP2P(ctx, txid, dir, keys[start:end]); err != nil { + return err + } + start = end + } // Mark this directory as completed in rqstore if err := s.rqstore.SetIsCompleted(txid); err != nil { @@ -108,9 +108,9 @@ func (s *DHT) scanDirAndStoreSymbols(ctx context.Context, dir, txid string) erro // --------------------------------------------------------------------- // 2. Load → StoreBatch → Delete for a slice of keys // --------------------------------------------------------------------- -func (s *DHT) storeSymbolsInP2P(ctx context.Context, dir string, keys []string) error { +func (s *DHT) storeSymbolsInP2P(ctx context.Context, txid, dir string, keys []string) error { // Per-batch visibility for background worker - logtrace.Info(ctx, "worker: batch send", logtrace.Fields{"dir": dir, "keys": len(keys)}) + logtrace.Info(ctx, "worker: batch send", logtrace.Fields{"dir": dir, "keys": len(keys), logtrace.FieldTaskID: txid}) start := time.Now() loaded, err := utils.LoadSymbols(dir, keys) @@ -118,11 +118,11 @@ func (s *DHT) storeSymbolsInP2P(ctx context.Context, dir string, keys []string) return fmt.Errorf("load symbols: %w", err) } - if err := s.StoreBatch(ctx, loaded, 1, dir); err != nil { - return fmt.Errorf("p2p store batch: %w", err) - } + if err := s.StoreBatch(ctx, loaded, 1, txid); err != nil { + return fmt.Errorf("p2p store batch: %w", err) + } - logtrace.Info(ctx, "worker: batch ok", logtrace.Fields{"dir": dir, "keys": len(loaded), "ms": time.Since(start).Milliseconds()}) + logtrace.Info(ctx, "worker: batch ok", logtrace.Fields{"dir": dir, "keys": len(loaded), "ms": time.Since(start).Milliseconds(), logtrace.FieldTaskID: txid}) if err := utils.DeleteSymbols(ctx, dir, keys); err != nil { return fmt.Errorf("delete symbols: %w", err) diff --git a/pkg/logtrace/datadog.go b/pkg/logtrace/datadog.go index 95830836..6fb0ba86 100644 --- a/pkg/logtrace/datadog.go +++ b/pkg/logtrace/datadog.go @@ -116,10 +116,14 @@ func ddForward(level zapcore.Level, ctx context.Context, msg string, fields Fiel for k, v := range fields { attrs[k] = v } - // Attach correlation ID if present - if cid := extractCorrelationID(ctx); cid != "unknown" { - attrs["correlation_id"] = cid - } + // Attach correlation ID if present + if cid := extractCorrelationID(ctx); cid != "unknown" { + attrs["correlation_id"] = cid + } + // Attach origin/phase if present (first_pass | worker | download) + if o := OriginFromContext(ctx); o != "" { + attrs["origin"] = o + } entry := map[string]any{ "message": msg, diff --git a/pkg/logtrace/fields.go b/pkg/logtrace/fields.go index 8554137b..40e4e5f1 100644 --- a/pkg/logtrace/fields.go +++ b/pkg/logtrace/fields.go @@ -5,6 +5,8 @@ type Fields map[string]interface{} const ( FieldCorrelationID = "correlation_id" + FieldOrigin = "origin" + FieldRole = "role" FieldMethod = "method" FieldModule = "module" FieldError = "error" diff --git a/pkg/logtrace/log.go b/pkg/logtrace/log.go index 2a35fe38..469b32e8 100644 --- a/pkg/logtrace/log.go +++ b/pkg/logtrace/log.go @@ -16,6 +16,9 @@ type ContextKey string // CorrelationIDKey is the key for storing correlation ID in context const CorrelationIDKey ContextKey = "correlation_id" +// OriginKey marks which phase produced the log (first_pass | worker | download) +const OriginKey ContextKey = "origin" + var ( logger *zap.Logger minLevel zapcore.Level = zapcore.InfoLevel // effective minimum log level @@ -91,6 +94,22 @@ func CorrelationIDFromContext(ctx context.Context) string { return extractCorrelationID(ctx) } +// CtxWithOrigin stores a phase/origin tag in context +func CtxWithOrigin(ctx context.Context, origin string) context.Context { + if origin == "" { + return ctx + } + return context.WithValue(ctx, OriginKey, origin) +} + +// OriginFromContext returns the origin tag from context or "" +func OriginFromContext(ctx context.Context) string { + if v, ok := ctx.Value(OriginKey).(string); ok { + return v + } + return "" +} + // extractCorrelationID retrieves the correlation ID from context func extractCorrelationID(ctx context.Context) string { if correlationID, ok := ctx.Value(CorrelationIDKey).(string); ok { diff --git a/sdk/adapters/lumera/adapter.go b/sdk/adapters/lumera/adapter.go index 8fe7a1fb..1c20acdd 100644 --- a/sdk/adapters/lumera/adapter.go +++ b/sdk/adapters/lumera/adapter.go @@ -13,6 +13,7 @@ import ( lumeraclient "github.com/LumeraProtocol/supernode/v2/pkg/lumera" "github.com/cosmos/cosmos-sdk/crypto/keyring" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" "github.com/golang/protobuf/proto" ) @@ -25,6 +26,8 @@ type Client interface { GetSupernodeWithLatestAddress(ctx context.Context, address string) (*SuperNodeInfo, error) DecodeCascadeMetadata(ctx context.Context, action Action) (actiontypes.CascadeMetadata, error) VerifySignature(ctx context.Context, accountAddr string, data []byte, signature []byte) error + // GetBalance returns the bank balance for the given address and denom. + GetBalance(ctx context.Context, address string, denom string) (*banktypes.QueryBalanceResponse, error) } // SuperNodeInfo contains supernode information with latest address @@ -213,6 +216,22 @@ func (a *Adapter) VerifySignature(ctx context.Context, accountAddr string, data, return nil } +// GetBalance fetches the balance for a given address and denom via the underlying lumera client. +func (a *Adapter) GetBalance(ctx context.Context, address string, denom string) (*banktypes.QueryBalanceResponse, error) { + a.logger.Debug(ctx, "Querying bank balance", "address", address, "denom", denom) + resp, err := a.client.Bank().Balance(ctx, address, denom) + if err != nil { + a.logger.Error(ctx, "Failed to query bank balance", "address", address, "denom", denom, "error", err) + return nil, fmt.Errorf("failed to query bank balance: %w", err) + } + if resp == nil || resp.Balance == nil { + a.logger.Error(ctx, "Nil balance response", "address", address, "denom", denom) + return nil, fmt.Errorf("nil balance response for %s %s", address, denom) + } + a.logger.Debug(ctx, "Successfully fetched bank balance", "amount", resp.Balance.Amount.String(), "denom", resp.Balance.Denom) + return resp, nil +} + // DecodeCascadeMetadata decodes the raw metadata bytes into CascadeMetadata func (a *Adapter) DecodeCascadeMetadata(ctx context.Context, action Action) (actiontypes.CascadeMetadata, error) { if action.ActionType != "ACTION_TYPE_CASCADE" { diff --git a/sdk/task/task.go b/sdk/task/task.go index 97295902..2a87b201 100644 --- a/sdk/task/task.go +++ b/sdk/task/task.go @@ -9,7 +9,6 @@ import ( sdkmath "cosmossdk.io/math" "github.com/LumeraProtocol/supernode/v2/pkg/errgroup" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - plumera "github.com/LumeraProtocol/supernode/v2/pkg/lumera" txmod "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx" "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" snsvc "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" @@ -146,22 +145,8 @@ func (t *BaseTask) isServing(parent context.Context, sn lumera.Supernode) bool { return false } - // Finally, ensure the supernode account has a positive balance in the default fee denom. - // Use pkg/lumera to query bank balance from the chain. - cfg, err := plumera.NewConfig(t.config.Lumera.GRPCAddr, t.config.Lumera.ChainID, t.config.Account.KeyName, t.keyring) - if err != nil { - logtrace.Debug(ctx, "Failed to build lumera client config for balance check", logtrace.Fields{"error": err.Error()}) - return false - } - lc, err := plumera.NewClient(ctx, cfg) - if err != nil { - logtrace.Debug(ctx, "Failed to create lumera client for balance check", logtrace.Fields{"error": err.Error()}) - return false - } - defer lc.Close() - denom := txmod.DefaultFeeDenom // base denom (micro), e.g., "ulume" - bal, err := lc.Bank().Balance(ctx, sn.CosmosAddress, denom) + bal, err := t.client.GetBalance(ctx, sn.CosmosAddress, denom) if err != nil || bal == nil || bal.Balance == nil { return false } diff --git a/supernode/services/cascade/download.go b/supernode/services/cascade/download.go index 43d2e3b8..3b30d7e4 100644 --- a/supernode/services/cascade/download.go +++ b/supernode/services/cascade/download.go @@ -47,9 +47,10 @@ func (task *CascadeRegistrationTask) Download( req *DownloadRequest, send func(resp *DownloadResponse) error, ) (err error) { - // Seed correlation ID from actionID for downstream logs + // Seed correlation ID and origin from actionID for downstream logs if req != nil && req.ActionID != "" { ctx = logtrace.CtxWithCorrelationID(ctx, req.ActionID) + ctx = logtrace.CtxWithOrigin(ctx, "download") } fields := logtrace.Fields{logtrace.FieldMethod: "Download", logtrace.FieldRequest: req} logtrace.Info(ctx, "download: request", fields) diff --git a/supernode/services/cascade/helper.go b/supernode/services/cascade/helper.go index fea1d47e..c9e91106 100644 --- a/supernode/services/cascade/helper.go +++ b/supernode/services/cascade/helper.go @@ -187,6 +187,8 @@ func (task *CascadeRegistrationTask) storeArtefacts(ctx context.Context, actionI for k, v := range f { lf[k] = v } + // Tag the flow as first-pass just before handing over to P2P + ctx = logtrace.CtxWithOrigin(ctx, "first_pass") logtrace.Info(ctx, "store: first-pass begin", lf) if err := task.P2P.StoreArtefacts(ctx, adaptors.StoreArtefactsRequest{ diff --git a/supernode/services/cascade/register.go b/supernode/services/cascade/register.go index 3afac227..5fc5cdbb 100644 --- a/supernode/services/cascade/register.go +++ b/supernode/services/cascade/register.go @@ -44,9 +44,10 @@ func (task *CascadeRegistrationTask) Register( req *RegisterRequest, send func(resp *RegisterResponse) error, ) (err error) { - // Seed correlation ID from actionID so logs across layers can be joined + // Seed correlation ID and origin so logs across layers can be joined and filtered if req != nil && req.ActionID != "" { ctx = logtrace.CtxWithCorrelationID(ctx, req.ActionID) + ctx = logtrace.CtxWithOrigin(ctx, "first_pass") } fields := logtrace.Fields{logtrace.FieldMethod: "Register", logtrace.FieldRequest: req} From cae661d60842ac8e37fcc978a5088653ea1c8b12 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Fri, 3 Oct 2025 16:47:35 +0500 Subject: [PATCH 22/27] Metrics Cleanup --- Makefile | 2 +- docs/cascade-store-artifacts.md | 28 +- docs/p2p-metrics-capture.md | 197 +- gen/supernode/service.pb.go | 263 +++ .../{supernode.pb.gw.go => service.pb.gw.go} | 2 +- ...node.swagger.json => service.swagger.json} | 114 +- ...upernode_grpc.pb.go => service_grpc.pb.go} | 4 +- gen/supernode/status.pb.go | 1444 ++++++++++++ gen/supernode/status.swagger.json | 44 + gen/supernode/supernode.pb.go | 2034 ----------------- p2p/kademlia/dht.go | 113 +- p2p/kademlia/network.go | 162 +- p2p/kademlia/recent.go | 90 - p2p/p2p.go | 23 +- pkg/p2pmetrics/metrics.go | 397 ---- proto/proto.go | 6 - proto/supernode/service.proto | 34 + .../{supernode.proto => status.proto} | 65 +- sdk/README.md | 27 +- sdk/action/client.go | 6 +- sdk/adapters/supernodeservice/adapter.go | 257 +-- sdk/adapters/supernodeservice/options.go | 29 - sdk/adapters/supernodeservice/types.go | 101 +- sdk/event/keys.go | 23 +- sdk/net/client.go | 11 +- sdk/net/impl.go | 43 +- sdk/task/task.go | 5 +- supernode/cmd/start.go | 36 +- .../node/supernode/server/status_server.go | 63 +- supernode/services/cascade/adaptors/p2p.go | 17 +- supernode/services/cascade/config.go | 4 +- supernode/services/cascade/download.go | 38 +- supernode/services/cascade/helper.go | 10 +- supernode/services/cascade/register.go | 6 +- supernode/services/cascade/service.go | 2 +- supernode/services/common/storage/handler.go | 5 - .../services/common/supernode/service.go | 99 +- supernode/services/common/supernode/types.go | 36 +- tests/integration/p2p/p2p_integration_test.go | 2 +- 39 files changed, 1997 insertions(+), 3845 deletions(-) create mode 100644 gen/supernode/service.pb.go rename gen/supernode/{supernode.pb.gw.go => service.pb.gw.go} (99%) rename gen/supernode/{supernode.swagger.json => service.swagger.json} (81%) rename gen/supernode/{supernode_grpc.pb.go => service_grpc.pb.go} (98%) create mode 100644 gen/supernode/status.pb.go create mode 100644 gen/supernode/status.swagger.json delete mode 100644 gen/supernode/supernode.pb.go delete mode 100644 p2p/kademlia/recent.go delete mode 100644 pkg/p2pmetrics/metrics.go delete mode 100644 proto/proto.go create mode 100644 proto/supernode/service.proto rename proto/supernode/{supernode.proto => status.proto} (71%) delete mode 100644 sdk/adapters/supernodeservice/options.go diff --git a/Makefile b/Makefile index 81773d3b..fd9dfebf 100644 --- a/Makefile +++ b/Makefile @@ -98,7 +98,7 @@ gen-supernode: --grpc-gateway_out=gen \ --grpc-gateway_opt=paths=source_relative \ --openapiv2_out=gen \ - proto/supernode/supernode.proto + proto/supernode/service.proto proto/supernode/status.proto # Define the paths SUPERNODE_SRC=supernode/main.go diff --git a/docs/cascade-store-artifacts.md b/docs/cascade-store-artifacts.md index 880f5418..c2cf4892 100644 --- a/docs/cascade-store-artifacts.md +++ b/docs/cascade-store-artifacts.md @@ -1,6 +1,6 @@ # Cascade Artefacts Storage Flow -This document explains, in depth, how Cascade artefacts (ID files + RaptorQ symbols) are persisted to the P2P network, the control flow from the API to the P2P layer, what metrics are collected, and which background workers continue the process after the API call returns. +This document explains how Cascade artefacts (ID files + RaptorQ symbols) are persisted to the P2P network, the control flow from the API to the P2P layer, and which background workers continue the process after the API call returns. ## Scope & Terminology @@ -50,14 +50,13 @@ Function: `supernode/services/cascade/helper.go::storeArtefacts` - `SymbolsDir string`: filesystem directory where symbols were written. - `TaskID string` and `ActionID string`: identifiers for logging and DB association. -Returns `StoreArtefactsMetrics` with separate metrics for metadata and symbols plus an aggregated view. +Does not return metrics; logs provide visibility. ## P2P Adaptor: StoreArtefacts Implementation: `supernode/services/cascade/adaptors/p2p.go` -1) Store metadata (ID files) using `p2p.Client.StoreBatch(...)`: - - Returns `metaRatePct` and `metaRequests` (count of per‑node RPCs attempted during this batch store). +1) Store metadata (ID files) using `p2p.Client.StoreBatch(...)`. 2) Store symbols using `storeCascadeSymbols(...)`: - Records the symbol directory in a small SQLite store: `rqStore.StoreSymbolDirectory(taskID, symbolsDir)`. @@ -65,12 +64,10 @@ Implementation: `supernode/services/cascade/adaptors/p2p.go` - Streams symbols in fixed‑size batches of 2,500 files: - Each batch loads files, calls `p2p.Client.StoreBatch(...)` with a 5‑minute timeout, and deletes successfully uploaded files. - Marks “first batch stored” for this action: `rqStore.UpdateIsFirstBatchStored(actionID)`. - - Returns `(symRatePct, symCount, symRequests)`. + - Logs counts and timings; no metrics are returned. -3) Aggregation and return: - - Computes item‑weighted aggregate success rate across metadata and symbols: `aggRate = (metaRate*metaCount + symRate*symCount) / (metaCount + symCount)`. - - Total requests = `metaRequests + symRequests`. - - Returns `StoreArtefactsMetrics` with all fields populated. +3) Return: + - No metrics aggregation; return indicates success/failure only. Notes: - This adaptor only performs a first pass of symbol storage. For large directories it may downsample; the background worker completes the remaining symbols later (see Background Worker section). @@ -83,9 +80,7 @@ Notes: - Network store: `DHT.IterateBatchStore(ctx, values, typ, taskID)`: - For each value, compute its Blake3 hash; compute the top‑K closest nodes from the routing table. - Build a node→items map and invoke `batchStoreNetwork(...)` with bounded concurrency (a goroutine per node, limited via a semaphore; all joined before returning). - - Tally per‑node RPC attempts (requests) and successes to compute `successRatePct`. - - If the measured rate is below `minimumDataStoreSuccessRate` (75%), return an error along with `(ratePct, requests)`. - - Otherwise, return `(ratePct, requests, nil)`. + - If the measured success rate is below an internal threshold, DHT returns an error. Important distinctions: - `requests` is the number of per‑node RPCs attempted; it is not the number of items in the batch. @@ -93,13 +88,7 @@ Important distinctions: ## Metrics & Events -Returned metrics (from `StoreArtefacts`): - -- Metadata: `MetaRate` (%), `MetaRequests`, `MetaCount`. -- Symbols: `SymRate` (%), `SymRequests`, `SymCount`. -- Aggregate: `AggregatedRate` (item‑weighted), `TotalRequests`. - -`Register` logs and emits a single event line summarizing these metrics via `emitArtefactsStored(...)`, then proceeds to finalize the action on chain. +`Register` logs and emits an informational event (Artefacts stored), then proceeds to finalize the action on chain. ## Background Worker (Symbols Continuation) @@ -161,4 +150,3 @@ These values can be tuned in: - First pass deletes uploaded symbol files per batch (`utils.DeleteSymbols`) after a successful store batch. - Background worker also deletes files after each batch store. - The uploaded raw input file is removed by `Register` in a `defer` block regardless of outcome. - diff --git a/docs/p2p-metrics-capture.md b/docs/p2p-metrics-capture.md index 6cbafebf..b13bc393 100644 --- a/docs/p2p-metrics-capture.md +++ b/docs/p2p-metrics-capture.md @@ -1,186 +1,23 @@ -# P2P Metrics Capture — What Each Field Means and Where It’s Collected +# P2P Metrics — Current Behavior -This guide explains every field we emit in Cascade events, how it is measured, and exactly where it is captured in the code. +We removed the custom per‑RPC metrics capture and the `pkg/p2pmetrics` package. Logs are the source of truth for store/retrieve visibility, and the Status API provides a rolling DHT snapshot for high‑level metrics. -The design is minimal by intent: -- Metrics are collected only for the first pass of Register (store) and for the active Download operation. -- P2P APIs return errors only; per‑RPC details are captured via a small metrics package (`pkg/p2pmetrics`). -- No aggregation; we only group raw RPC attempts by IP. +What remains +- Status API metrics: DHT rolling windows (store success, batch retrieve), network handle counters, ban list, DB/disk stats, and connection pool metrics. +- Logs: detailed send/ok/fail lines for RPCs at both client and server. ---- +What was removed +- Per‑RPC metrics capture and grouping by IP for events. +- Metrics collectors and context tagging helpers. +- Recent per‑request lists from the Status API. -## Store (Register) Event +Events +- The supernode emits minimal events (e.g., artefacts stored, downloaded). These events no longer include metrics payloads. Use logs for detailed troubleshooting. -Event payload shape +Status API +- To include P2P metrics and peer info, clients set `include_p2p_metrics=true` on `StatusRequest`. +- The SDK adapter already includes this flag by default to populate peer count for eligibility checks. -```json -{ - "store": { - "duration_ms": 9876, - "symbols_first_pass": 220, - "symbols_total": 1200, - "id_files_count": 14, - "success_rate_pct": 82.5, - "calls_by_ip": { - "10.0.0.5": [ - {"ip": "10.0.0.5", "address": "A:4445", "keys": 100, "success": true, "duration_ms": 120}, - {"ip": "10.0.0.5", "address": "A:4445", "keys": 120, "success": false, "error": "timeout", "duration_ms": 300} - ] - } - } -} -``` - -### Fields - -- `store.duration_ms` - - Meaning: End‑to‑end elapsed time of the first‑pass store phase (Register’s storage section only). - - Where captured: `supernode/services/cascade/adaptors/p2p.go` - - A `time.Now()` timestamp is taken just before the first‑pass store function and measured on return. - -- `store.symbols_first_pass` - - Meaning: Number of symbols sent during the Register first pass (across the combined first batch and any immediate first‑pass symbol batches). - - Where captured: `supernode/services/cascade/adaptors/p2p.go` via `p2pmetrics.SetStoreSummary(...)` using the value returned by `storeCascadeSymbolsAndData`. - -- `store.symbols_total` - - Meaning: Total symbols available in the symbol directory (before sampling). Used to contextualize the first‑pass coverage. - - Where captured: Computed in `storeCascadeSymbolsAndData` and included in `SetStoreSummary`. - -- `store.id_files_count` - - Meaning: Number of redundant metadata files (ID files) sent in the first combined batch. - - Where captured: `len(req.IDFiles)` in `StoreArtefacts`, passed to `SetStoreSummary`. - -- `store.calls_by_ip` - - Meaning: All raw network store RPC attempts grouped by the node IP. - - Each array entry is a single RPC attempt with: - - `ip` — Node IP (fallback to `address` if missing). - - `address` — Node string `IP:port`. - - `keys` — Number of items in that RPC attempt (metadata + first symbols for the first combined batch, symbols for subsequent batches within the first pass). - - `success` — True if there was no transport error and no error message returned by the node response. Note: this flag does not explicitly check the `ResultOk` status; in rare cases, a non‑OK response with an empty error message may appear as `success` in metrics. (Internal success‑rate enforcement still uses explicit response status.) - - `error` — Any error string captured; omitted when success. - - `duration_ms` — RPC duration in milliseconds. - - `noop` — Present and `true` when no store payload was sent to the node (empty batch for that node). Such entries are recorded as `success=true`, `keys=0`, with no `error`. - - Where captured: - - Emission point (P2P): `p2p/kademlia/dht.go::IterateBatchStore(...)` - - After each node RPC returns, we call `p2pmetrics.RecordStore(taskID, Call{...})`. For nodes with no payload, a `noop: true` entry is emitted without sending a wire RPC. - - `taskID` is read from the context via `p2pmetrics.TaskIDFromContext(ctx)`. - - Grouping: `pkg/p2pmetrics/metrics.go` - - `StartStoreCapture(taskID)` enables capture; `StopStoreCapture(taskID)` disables it. - - Calls are grouped by `ip` (fallback to `address`) without further aggregation. - -- `store.success_rate_pct` - - Meaning: First‑pass store success rate computed from captured per‑RPC outcomes: successful responses divided by total recorded store RPC attempts, expressed as a percentage. - - Where captured: Computed in `pkg/p2pmetrics/metrics.go::BuildStoreEventPayloadFromCollector` from `calls_by_ip` data. - -### First‑Pass Success Threshold - -- Internal enforcement only: if DHT first‑pass success rate is below 75%, `IterateBatchStore` returns an error. -- We also emit `store.success_rate_pct` for analytics; the threshold only affects control flow (errors), not the emitted metric. -- Code: `p2p/kademlia/dht.go::IterateBatchStore`. - -### Scope Limits - -- Background worker (which continues storing remaining symbols) is NOT captured — we don’t set a metrics task ID on those paths. - ---- - -## Download Event - -Event payload shape - -```json -{ - "retrieve": { - "found_local": 42, - "retrieve_ms": 2000, - "decode_ms": 8000, - "calls_by_ip": { - "10.0.0.7": [ - {"ip": "10.0.0.7", "address": "B:4445", "keys": 13, "success": true, "duration_ms": 90} - ] - } - } -} -``` - -### Fields - -- `retrieve.found_local` - - Meaning: Number of items retrieved from local storage before any network calls. - - Where captured: `p2p/kademlia/dht.go::BatchRetrieve(...)` - - After `fetchAndAddLocalKeys`, we call `p2pmetrics.ReportFoundLocal(taskID, int(foundLocalCount))`. - - `taskID` is read from context with `p2pmetrics.TaskIDFromContext(ctx)`. - -- `retrieve.retrieve_ms` - - Meaning: Time spent in network batch‑retrieve. - - Where captured: `supernode/services/cascade/download.go` - - Timestamp before `BatchRetrieve`, measured after it returns. - -- `retrieve.decode_ms` - - Meaning: Time spent decoding symbols and reconstructing the file. - - Where captured: `supernode/services/cascade/download.go` - - Timestamp before decode, measured after it returns. - -- `retrieve.calls_by_ip` - - Meaning: All raw per‑RPC retrieve attempts grouped by node IP. - - Each array entry is a single RPC attempt with: - - `ip`, `address` — Identifiers as available. - - `keys` — Number of symbols returned by that node in that call. - - `success` — True if the RPC completed without error (even if `keys == 0`). Transport/status errors remain `success=false` with an `error` message. - - `error` — Error string when the RPC failed; omitted otherwise. - - `duration_ms` — RPC duration in milliseconds. - - `noop` — Present and `true` when no network request was actually sent to the node (e.g., all requested keys were already satisfied or deduped before issuing the call). Such entries are recorded as `success=true`, `keys=0`, with no `error`. - - Where captured: - - Emission point (P2P): `p2p/kademlia/dht.go::iterateBatchGetValues(...)` - - Each node attempt records a `p2pmetrics.RecordRetrieve(taskID, Call{...})`. For attempts where no network RPC is sent, a `noop: true` entry is emitted. - - `taskID` is extracted from context using `p2pmetrics.TaskIDFromContext(ctx)`. - - Grouping: `pkg/p2pmetrics/metrics.go` (same grouping/fallback as store). - -### Scope Limits - -- Metrics are captured only for the active Download call (context is tagged in `download.go`). - ---- - -## Context Tagging (Task ID) - -- We use an explicit, metrics‑only context key defined in `pkg/p2pmetrics` to tag P2P calls with a task ID. - - Setters: `p2pmetrics.WithTaskID(ctx, id)`. - - Getters: `p2pmetrics.TaskIDFromContext(ctx)`. -- Where it is set: - - Store (first pass): `supernode/services/cascade/adaptors/p2p.go` wraps `StoreBatch` calls. - - Download: `supernode/services/cascade/download.go` wraps `BatchRetrieve` call. - ---- - -## Building and Emitting Events - -- Store - - `supernode/services/cascade/helper.go::emitArtefactsStored(...)` - - Builds `store` payload via `p2pmetrics.BuildStoreEventPayloadFromCollector(taskID)`. - - Includes `success_rate_pct` (first‑pass store success rate computed from captured per‑RPC outcomes) in addition to the minimal fields. - - Emits the event. - -- Download - - `supernode/services/cascade/download.go` - - Builds `retrieve` payload via `p2pmetrics.BuildDownloadEventPayloadFromCollector(actionID)`. - - Emits the event. - ---- - -## Quick File Map - -- Capture + grouping: `supernode/pkg/p2pmetrics/metrics.go` -- Store adaptor: `supernode/supernode/services/cascade/adaptors/p2p.go` -- Store event: `supernode/supernode/services/cascade/helper.go` -- Download flow: `supernode/supernode/services/cascade/download.go` -- DHT store calls: `supernode/p2p/kademlia/dht.go::IterateBatchStore` -- DHT retrieve calls: `supernode/p2p/kademlia/dht.go::BatchRetrieve` and `iterateBatchGetValues` - ---- - -## Notes - -- No P2P stats/snapshots are used to build events. -- No aggregation is performed; we only group raw RPC attempts by IP. -- First‑pass success rate is enforced internally (75% threshold) but not emitted as a metric. +References +- Status proto: `proto/supernode/status.proto` +- Service proto: `proto/supernode/service.proto` diff --git a/gen/supernode/service.pb.go b/gen/supernode/service.pb.go new file mode 100644 index 00000000..b8399095 --- /dev/null +++ b/gen/supernode/service.pb.go @@ -0,0 +1,263 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.35.2 +// protoc v3.21.12 +// source: supernode/service.proto + +package supernode + +import ( + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ListServicesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ListServicesRequest) Reset() { + *x = ListServicesRequest{} + mi := &file_supernode_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListServicesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServicesRequest) ProtoMessage() {} + +func (x *ListServicesRequest) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServicesRequest.ProtoReflect.Descriptor instead. +func (*ListServicesRequest) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{0} +} + +type ListServicesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Services []*ServiceInfo `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"` + Count int32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` +} + +func (x *ListServicesResponse) Reset() { + *x = ListServicesResponse{} + mi := &file_supernode_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListServicesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServicesResponse) ProtoMessage() {} + +func (x *ListServicesResponse) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServicesResponse.ProtoReflect.Descriptor instead. +func (*ListServicesResponse) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListServicesResponse) GetServices() []*ServiceInfo { + if x != nil { + return x.Services + } + return nil +} + +func (x *ListServicesResponse) GetCount() int32 { + if x != nil { + return x.Count + } + return 0 +} + +type ServiceInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Methods []string `protobuf:"bytes,2,rep,name=methods,proto3" json:"methods,omitempty"` +} + +func (x *ServiceInfo) Reset() { + *x = ServiceInfo{} + mi := &file_supernode_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ServiceInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceInfo) ProtoMessage() {} + +func (x *ServiceInfo) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceInfo.ProtoReflect.Descriptor instead. +func (*ServiceInfo) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ServiceInfo) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ServiceInfo) GetMethods() []string { + if x != nil { + return x.Methods + } + return nil +} + +var File_supernode_service_proto protoreflect.FileDescriptor + +var file_supernode_service_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x75, 0x70, 0x65, 0x72, + 0x6e, 0x6f, 0x64, 0x65, 0x1a, 0x16, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x15, 0x0a, 0x13, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x60, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x75, + 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x14, 0x0a, + 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x22, 0x3b, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, + 0x32, 0xd7, 0x01, 0x0a, 0x10, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, + 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x69, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, + 0x1e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x12, 0x10, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, + 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, + 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, + 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_supernode_service_proto_rawDescOnce sync.Once + file_supernode_service_proto_rawDescData = file_supernode_service_proto_rawDesc +) + +func file_supernode_service_proto_rawDescGZIP() []byte { + file_supernode_service_proto_rawDescOnce.Do(func() { + file_supernode_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_supernode_service_proto_rawDescData) + }) + return file_supernode_service_proto_rawDescData +} + +var file_supernode_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_supernode_service_proto_goTypes = []any{ + (*ListServicesRequest)(nil), // 0: supernode.ListServicesRequest + (*ListServicesResponse)(nil), // 1: supernode.ListServicesResponse + (*ServiceInfo)(nil), // 2: supernode.ServiceInfo + (*StatusRequest)(nil), // 3: supernode.StatusRequest + (*StatusResponse)(nil), // 4: supernode.StatusResponse +} +var file_supernode_service_proto_depIdxs = []int32{ + 2, // 0: supernode.ListServicesResponse.services:type_name -> supernode.ServiceInfo + 3, // 1: supernode.SupernodeService.GetStatus:input_type -> supernode.StatusRequest + 0, // 2: supernode.SupernodeService.ListServices:input_type -> supernode.ListServicesRequest + 4, // 3: supernode.SupernodeService.GetStatus:output_type -> supernode.StatusResponse + 1, // 4: supernode.SupernodeService.ListServices:output_type -> supernode.ListServicesResponse + 3, // [3:5] is the sub-list for method output_type + 1, // [1:3] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_supernode_service_proto_init() } +func file_supernode_service_proto_init() { + if File_supernode_service_proto != nil { + return + } + file_supernode_status_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_supernode_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_supernode_service_proto_goTypes, + DependencyIndexes: file_supernode_service_proto_depIdxs, + MessageInfos: file_supernode_service_proto_msgTypes, + }.Build() + File_supernode_service_proto = out.File + file_supernode_service_proto_rawDesc = nil + file_supernode_service_proto_goTypes = nil + file_supernode_service_proto_depIdxs = nil +} diff --git a/gen/supernode/supernode.pb.gw.go b/gen/supernode/service.pb.gw.go similarity index 99% rename from gen/supernode/supernode.pb.gw.go rename to gen/supernode/service.pb.gw.go index 0976b8b7..326bccf3 100644 --- a/gen/supernode/supernode.pb.gw.go +++ b/gen/supernode/service.pb.gw.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: supernode/supernode.proto +// source: supernode/service.proto /* Package supernode is a reverse proxy. diff --git a/gen/supernode/supernode.swagger.json b/gen/supernode/service.swagger.json similarity index 81% rename from gen/supernode/supernode.swagger.json rename to gen/supernode/service.swagger.json index 00a47bb8..08140033 100644 --- a/gen/supernode/supernode.swagger.json +++ b/gen/supernode/service.swagger.json @@ -1,7 +1,7 @@ { "swagger": "2.0", "info": { - "title": "supernode/supernode.proto", + "title": "supernode/service.proto", "version": "version not set" }, "tags": [ @@ -249,92 +249,6 @@ }, "title": "Per-handler counters from network layer" }, - "P2PMetricsRecentBatchRetrieveEntry": { - "type": "object", - "properties": { - "timeUnix": { - "type": "string", - "format": "int64" - }, - "senderId": { - "type": "string" - }, - "senderIp": { - "type": "string" - }, - "requested": { - "type": "integer", - "format": "int32" - }, - "found": { - "type": "integer", - "format": "int32" - }, - "durationMs": { - "type": "string", - "format": "int64" - }, - "error": { - "type": "string" - } - }, - "title": "Last handled BatchGetValues requests (most recent first)" - }, - "P2PMetricsRecentBatchRetrieveList": { - "type": "object", - "properties": { - "entries": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/P2PMetricsRecentBatchRetrieveEntry" - } - } - } - }, - "P2PMetricsRecentBatchStoreEntry": { - "type": "object", - "properties": { - "timeUnix": { - "type": "string", - "format": "int64" - }, - "senderId": { - "type": "string" - }, - "senderIp": { - "type": "string" - }, - "keys": { - "type": "integer", - "format": "int32" - }, - "durationMs": { - "type": "string", - "format": "int64" - }, - "ok": { - "type": "boolean" - }, - "error": { - "type": "string" - } - }, - "title": "Last handled BatchStoreData requests (most recent first)" - }, - "P2PMetricsRecentBatchStoreList": { - "type": "object", - "properties": { - "entries": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/P2PMetricsRecentBatchStoreEntry" - } - } - }, - "title": "Per-IP buckets: last 10 per sender IP" - }, "ResourcesCPU": { "type": "object", "properties": { @@ -450,32 +364,6 @@ }, "disk": { "$ref": "#/definitions/P2PMetricsDiskStatus" - }, - "recentBatchStore": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/P2PMetricsRecentBatchStoreEntry" - } - }, - "recentBatchRetrieve": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/P2PMetricsRecentBatchRetrieveEntry" - } - }, - "recentBatchStoreByIp": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/P2PMetricsRecentBatchStoreList" - } - }, - "recentBatchRetrieveByIp": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/P2PMetricsRecentBatchRetrieveList" - } } }, "title": "P2P metrics and diagnostics (additive field)" diff --git a/gen/supernode/supernode_grpc.pb.go b/gen/supernode/service_grpc.pb.go similarity index 98% rename from gen/supernode/supernode_grpc.pb.go rename to gen/supernode/service_grpc.pb.go index 97eb3a0a..acb2e4c9 100644 --- a/gen/supernode/supernode_grpc.pb.go +++ b/gen/supernode/service_grpc.pb.go @@ -2,7 +2,7 @@ // versions: // - protoc-gen-go-grpc v1.5.1 // - protoc v3.21.12 -// source: supernode/supernode.proto +// source: supernode/service.proto package supernode @@ -159,5 +159,5 @@ var SupernodeService_ServiceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "supernode/supernode.proto", + Metadata: "supernode/service.proto", } diff --git a/gen/supernode/status.pb.go b/gen/supernode/status.pb.go new file mode 100644 index 00000000..52045726 --- /dev/null +++ b/gen/supernode/status.pb.go @@ -0,0 +1,1444 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.35.2 +// protoc v3.21.12 +// source: supernode/status.proto + +package supernode + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// StatusRequest controls optional metrics in the status response +type StatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional: include detailed P2P metrics in the response + // Maps to query param via grpc-gateway: /api/v1/status?include_p2p_metrics=true + IncludeP2PMetrics bool `protobuf:"varint,1,opt,name=include_p2p_metrics,json=includeP2pMetrics,proto3" json:"include_p2p_metrics,omitempty"` +} + +func (x *StatusRequest) Reset() { + *x = StatusRequest{} + mi := &file_supernode_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusRequest) ProtoMessage() {} + +func (x *StatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusRequest.ProtoReflect.Descriptor instead. +func (*StatusRequest) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{0} +} + +func (x *StatusRequest) GetIncludeP2PMetrics() bool { + if x != nil { + return x.IncludeP2PMetrics + } + return false +} + +// The StatusResponse represents system status with clear organization +type StatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` // Supernode version + UptimeSeconds uint64 `protobuf:"varint,2,opt,name=uptime_seconds,json=uptimeSeconds,proto3" json:"uptime_seconds,omitempty"` // Uptime in seconds + Resources *StatusResponse_Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` + RunningTasks []*StatusResponse_ServiceTasks `protobuf:"bytes,4,rep,name=running_tasks,json=runningTasks,proto3" json:"running_tasks,omitempty"` // Services with currently running tasks + RegisteredServices []string `protobuf:"bytes,5,rep,name=registered_services,json=registeredServices,proto3" json:"registered_services,omitempty"` // All registered/available services + Network *StatusResponse_Network `protobuf:"bytes,6,opt,name=network,proto3" json:"network,omitempty"` // P2P network information + Rank int32 `protobuf:"varint,7,opt,name=rank,proto3" json:"rank,omitempty"` // Rank in the top supernodes list (0 if not in top list) + IpAddress string `protobuf:"bytes,8,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` // Supernode IP address with port (e.g., "192.168.1.1:4445") + P2PMetrics *StatusResponse_P2PMetrics `protobuf:"bytes,9,opt,name=p2p_metrics,json=p2pMetrics,proto3" json:"p2p_metrics,omitempty"` +} + +func (x *StatusResponse) Reset() { + *x = StatusResponse{} + mi := &file_supernode_status_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse) ProtoMessage() {} + +func (x *StatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse.ProtoReflect.Descriptor instead. +func (*StatusResponse) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1} +} + +func (x *StatusResponse) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *StatusResponse) GetUptimeSeconds() uint64 { + if x != nil { + return x.UptimeSeconds + } + return 0 +} + +func (x *StatusResponse) GetResources() *StatusResponse_Resources { + if x != nil { + return x.Resources + } + return nil +} + +func (x *StatusResponse) GetRunningTasks() []*StatusResponse_ServiceTasks { + if x != nil { + return x.RunningTasks + } + return nil +} + +func (x *StatusResponse) GetRegisteredServices() []string { + if x != nil { + return x.RegisteredServices + } + return nil +} + +func (x *StatusResponse) GetNetwork() *StatusResponse_Network { + if x != nil { + return x.Network + } + return nil +} + +func (x *StatusResponse) GetRank() int32 { + if x != nil { + return x.Rank + } + return 0 +} + +func (x *StatusResponse) GetIpAddress() string { + if x != nil { + return x.IpAddress + } + return "" +} + +func (x *StatusResponse) GetP2PMetrics() *StatusResponse_P2PMetrics { + if x != nil { + return x.P2PMetrics + } + return nil +} + +// System resource information +type StatusResponse_Resources struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Cpu *StatusResponse_Resources_CPU `protobuf:"bytes,1,opt,name=cpu,proto3" json:"cpu,omitempty"` + Memory *StatusResponse_Resources_Memory `protobuf:"bytes,2,opt,name=memory,proto3" json:"memory,omitempty"` + StorageVolumes []*StatusResponse_Resources_Storage `protobuf:"bytes,3,rep,name=storage_volumes,json=storageVolumes,proto3" json:"storage_volumes,omitempty"` + HardwareSummary string `protobuf:"bytes,4,opt,name=hardware_summary,json=hardwareSummary,proto3" json:"hardware_summary,omitempty"` // Formatted hardware summary (e.g., "8 cores / 32GB RAM") +} + +func (x *StatusResponse_Resources) Reset() { + *x = StatusResponse_Resources{} + mi := &file_supernode_status_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_Resources) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_Resources) ProtoMessage() {} + +func (x *StatusResponse_Resources) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_Resources.ProtoReflect.Descriptor instead. +func (*StatusResponse_Resources) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *StatusResponse_Resources) GetCpu() *StatusResponse_Resources_CPU { + if x != nil { + return x.Cpu + } + return nil +} + +func (x *StatusResponse_Resources) GetMemory() *StatusResponse_Resources_Memory { + if x != nil { + return x.Memory + } + return nil +} + +func (x *StatusResponse_Resources) GetStorageVolumes() []*StatusResponse_Resources_Storage { + if x != nil { + return x.StorageVolumes + } + return nil +} + +func (x *StatusResponse_Resources) GetHardwareSummary() string { + if x != nil { + return x.HardwareSummary + } + return "" +} + +// ServiceTasks contains task information for a specific service +type StatusResponse_ServiceTasks struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + TaskIds []string `protobuf:"bytes,2,rep,name=task_ids,json=taskIds,proto3" json:"task_ids,omitempty"` + TaskCount int32 `protobuf:"varint,3,opt,name=task_count,json=taskCount,proto3" json:"task_count,omitempty"` +} + +func (x *StatusResponse_ServiceTasks) Reset() { + *x = StatusResponse_ServiceTasks{} + mi := &file_supernode_status_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_ServiceTasks) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_ServiceTasks) ProtoMessage() {} + +func (x *StatusResponse_ServiceTasks) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_ServiceTasks.ProtoReflect.Descriptor instead. +func (*StatusResponse_ServiceTasks) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *StatusResponse_ServiceTasks) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +func (x *StatusResponse_ServiceTasks) GetTaskIds() []string { + if x != nil { + return x.TaskIds + } + return nil +} + +func (x *StatusResponse_ServiceTasks) GetTaskCount() int32 { + if x != nil { + return x.TaskCount + } + return 0 +} + +// Network information +type StatusResponse_Network struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PeersCount int32 `protobuf:"varint,1,opt,name=peers_count,json=peersCount,proto3" json:"peers_count,omitempty"` // Number of connected peers in P2P network + PeerAddresses []string `protobuf:"bytes,2,rep,name=peer_addresses,json=peerAddresses,proto3" json:"peer_addresses,omitempty"` // List of connected peer addresses (optional, may be empty for privacy) +} + +func (x *StatusResponse_Network) Reset() { + *x = StatusResponse_Network{} + mi := &file_supernode_status_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_Network) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_Network) ProtoMessage() {} + +func (x *StatusResponse_Network) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_Network.ProtoReflect.Descriptor instead. +func (*StatusResponse_Network) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *StatusResponse_Network) GetPeersCount() int32 { + if x != nil { + return x.PeersCount + } + return 0 +} + +func (x *StatusResponse_Network) GetPeerAddresses() []string { + if x != nil { + return x.PeerAddresses + } + return nil +} + +// P2P metrics and diagnostics (additive field) +type StatusResponse_P2PMetrics struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DhtMetrics *StatusResponse_P2PMetrics_DhtMetrics `protobuf:"bytes,1,opt,name=dht_metrics,json=dhtMetrics,proto3" json:"dht_metrics,omitempty"` + NetworkHandleMetrics map[string]*StatusResponse_P2PMetrics_HandleCounters `protobuf:"bytes,2,rep,name=network_handle_metrics,json=networkHandleMetrics,proto3" json:"network_handle_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ConnPoolMetrics map[string]int64 `protobuf:"bytes,3,rep,name=conn_pool_metrics,json=connPoolMetrics,proto3" json:"conn_pool_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + BanList []*StatusResponse_P2PMetrics_BanEntry `protobuf:"bytes,4,rep,name=ban_list,json=banList,proto3" json:"ban_list,omitempty"` + Database *StatusResponse_P2PMetrics_DatabaseStats `protobuf:"bytes,5,opt,name=database,proto3" json:"database,omitempty"` + Disk *StatusResponse_P2PMetrics_DiskStatus `protobuf:"bytes,6,opt,name=disk,proto3" json:"disk,omitempty"` +} + +func (x *StatusResponse_P2PMetrics) Reset() { + *x = StatusResponse_P2PMetrics{} + mi := &file_supernode_status_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3} +} + +func (x *StatusResponse_P2PMetrics) GetDhtMetrics() *StatusResponse_P2PMetrics_DhtMetrics { + if x != nil { + return x.DhtMetrics + } + return nil +} + +func (x *StatusResponse_P2PMetrics) GetNetworkHandleMetrics() map[string]*StatusResponse_P2PMetrics_HandleCounters { + if x != nil { + return x.NetworkHandleMetrics + } + return nil +} + +func (x *StatusResponse_P2PMetrics) GetConnPoolMetrics() map[string]int64 { + if x != nil { + return x.ConnPoolMetrics + } + return nil +} + +func (x *StatusResponse_P2PMetrics) GetBanList() []*StatusResponse_P2PMetrics_BanEntry { + if x != nil { + return x.BanList + } + return nil +} + +func (x *StatusResponse_P2PMetrics) GetDatabase() *StatusResponse_P2PMetrics_DatabaseStats { + if x != nil { + return x.Database + } + return nil +} + +func (x *StatusResponse_P2PMetrics) GetDisk() *StatusResponse_P2PMetrics_DiskStatus { + if x != nil { + return x.Disk + } + return nil +} + +type StatusResponse_Resources_CPU struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UsagePercent float64 `protobuf:"fixed64,1,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // CPU usage percentage (0-100) + Cores int32 `protobuf:"varint,2,opt,name=cores,proto3" json:"cores,omitempty"` // Number of CPU cores +} + +func (x *StatusResponse_Resources_CPU) Reset() { + *x = StatusResponse_Resources_CPU{} + mi := &file_supernode_status_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_Resources_CPU) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_Resources_CPU) ProtoMessage() {} + +func (x *StatusResponse_Resources_CPU) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_Resources_CPU.ProtoReflect.Descriptor instead. +func (*StatusResponse_Resources_CPU) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 0, 0} +} + +func (x *StatusResponse_Resources_CPU) GetUsagePercent() float64 { + if x != nil { + return x.UsagePercent + } + return 0 +} + +func (x *StatusResponse_Resources_CPU) GetCores() int32 { + if x != nil { + return x.Cores + } + return 0 +} + +type StatusResponse_Resources_Memory struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TotalGb float64 `protobuf:"fixed64,1,opt,name=total_gb,json=totalGb,proto3" json:"total_gb,omitempty"` // Total memory in GB + UsedGb float64 `protobuf:"fixed64,2,opt,name=used_gb,json=usedGb,proto3" json:"used_gb,omitempty"` // Used memory in GB + AvailableGb float64 `protobuf:"fixed64,3,opt,name=available_gb,json=availableGb,proto3" json:"available_gb,omitempty"` // Available memory in GB + UsagePercent float64 `protobuf:"fixed64,4,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Memory usage percentage (0-100) +} + +func (x *StatusResponse_Resources_Memory) Reset() { + *x = StatusResponse_Resources_Memory{} + mi := &file_supernode_status_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_Resources_Memory) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_Resources_Memory) ProtoMessage() {} + +func (x *StatusResponse_Resources_Memory) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_Resources_Memory.ProtoReflect.Descriptor instead. +func (*StatusResponse_Resources_Memory) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 0, 1} +} + +func (x *StatusResponse_Resources_Memory) GetTotalGb() float64 { + if x != nil { + return x.TotalGb + } + return 0 +} + +func (x *StatusResponse_Resources_Memory) GetUsedGb() float64 { + if x != nil { + return x.UsedGb + } + return 0 +} + +func (x *StatusResponse_Resources_Memory) GetAvailableGb() float64 { + if x != nil { + return x.AvailableGb + } + return 0 +} + +func (x *StatusResponse_Resources_Memory) GetUsagePercent() float64 { + if x != nil { + return x.UsagePercent + } + return 0 +} + +type StatusResponse_Resources_Storage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // Storage path being monitored + TotalBytes uint64 `protobuf:"varint,2,opt,name=total_bytes,json=totalBytes,proto3" json:"total_bytes,omitempty"` + UsedBytes uint64 `protobuf:"varint,3,opt,name=used_bytes,json=usedBytes,proto3" json:"used_bytes,omitempty"` + AvailableBytes uint64 `protobuf:"varint,4,opt,name=available_bytes,json=availableBytes,proto3" json:"available_bytes,omitempty"` + UsagePercent float64 `protobuf:"fixed64,5,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Storage usage percentage (0-100) +} + +func (x *StatusResponse_Resources_Storage) Reset() { + *x = StatusResponse_Resources_Storage{} + mi := &file_supernode_status_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_Resources_Storage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_Resources_Storage) ProtoMessage() {} + +func (x *StatusResponse_Resources_Storage) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_Resources_Storage.ProtoReflect.Descriptor instead. +func (*StatusResponse_Resources_Storage) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 0, 2} +} + +func (x *StatusResponse_Resources_Storage) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *StatusResponse_Resources_Storage) GetTotalBytes() uint64 { + if x != nil { + return x.TotalBytes + } + return 0 +} + +func (x *StatusResponse_Resources_Storage) GetUsedBytes() uint64 { + if x != nil { + return x.UsedBytes + } + return 0 +} + +func (x *StatusResponse_Resources_Storage) GetAvailableBytes() uint64 { + if x != nil { + return x.AvailableBytes + } + return 0 +} + +func (x *StatusResponse_Resources_Storage) GetUsagePercent() float64 { + if x != nil { + return x.UsagePercent + } + return 0 +} + +// Rolling DHT metrics snapshot +type StatusResponse_P2PMetrics_DhtMetrics struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StoreSuccessRecent []*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint `protobuf:"bytes,1,rep,name=store_success_recent,json=storeSuccessRecent,proto3" json:"store_success_recent,omitempty"` + BatchRetrieveRecent []*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint `protobuf:"bytes,2,rep,name=batch_retrieve_recent,json=batchRetrieveRecent,proto3" json:"batch_retrieve_recent,omitempty"` + HotPathBannedSkips int64 `protobuf:"varint,3,opt,name=hot_path_banned_skips,json=hotPathBannedSkips,proto3" json:"hot_path_banned_skips,omitempty"` // counter + HotPathBanIncrements int64 `protobuf:"varint,4,opt,name=hot_path_ban_increments,json=hotPathBanIncrements,proto3" json:"hot_path_ban_increments,omitempty"` // counter +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics) Reset() { + *x = StatusResponse_P2PMetrics_DhtMetrics{} + mi := &file_supernode_status_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_DhtMetrics) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_DhtMetrics) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_DhtMetrics) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 0} +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics) GetStoreSuccessRecent() []*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint { + if x != nil { + return x.StoreSuccessRecent + } + return nil +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics) GetBatchRetrieveRecent() []*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint { + if x != nil { + return x.BatchRetrieveRecent + } + return nil +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics) GetHotPathBannedSkips() int64 { + if x != nil { + return x.HotPathBannedSkips + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics) GetHotPathBanIncrements() int64 { + if x != nil { + return x.HotPathBanIncrements + } + return 0 +} + +// Per-handler counters from network layer +type StatusResponse_P2PMetrics_HandleCounters struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Total int64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` + Success int64 `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` + Failure int64 `protobuf:"varint,3,opt,name=failure,proto3" json:"failure,omitempty"` + Timeout int64 `protobuf:"varint,4,opt,name=timeout,proto3" json:"timeout,omitempty"` +} + +func (x *StatusResponse_P2PMetrics_HandleCounters) Reset() { + *x = StatusResponse_P2PMetrics_HandleCounters{} + mi := &file_supernode_status_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_HandleCounters) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_HandleCounters) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_HandleCounters) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_HandleCounters.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_HandleCounters) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 1} +} + +func (x *StatusResponse_P2PMetrics_HandleCounters) GetTotal() int64 { + if x != nil { + return x.Total + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_HandleCounters) GetSuccess() int64 { + if x != nil { + return x.Success + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_HandleCounters) GetFailure() int64 { + if x != nil { + return x.Failure + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_HandleCounters) GetTimeout() int64 { + if x != nil { + return x.Timeout + } + return 0 +} + +// Ban list entry +type StatusResponse_P2PMetrics_BanEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // printable ID + Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` // last seen IP + Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` // last seen port + Count int32 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` // failure count + CreatedAtUnix int64 `protobuf:"varint,5,opt,name=created_at_unix,json=createdAtUnix,proto3" json:"created_at_unix,omitempty"` // first ban time (unix seconds) + AgeSeconds int64 `protobuf:"varint,6,opt,name=age_seconds,json=ageSeconds,proto3" json:"age_seconds,omitempty"` // age in seconds +} + +func (x *StatusResponse_P2PMetrics_BanEntry) Reset() { + *x = StatusResponse_P2PMetrics_BanEntry{} + mi := &file_supernode_status_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_BanEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_BanEntry) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_BanEntry) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_BanEntry.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_BanEntry) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 2} +} + +func (x *StatusResponse_P2PMetrics_BanEntry) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *StatusResponse_P2PMetrics_BanEntry) GetIp() string { + if x != nil { + return x.Ip + } + return "" +} + +func (x *StatusResponse_P2PMetrics_BanEntry) GetPort() uint32 { + if x != nil { + return x.Port + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_BanEntry) GetCount() int32 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_BanEntry) GetCreatedAtUnix() int64 { + if x != nil { + return x.CreatedAtUnix + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_BanEntry) GetAgeSeconds() int64 { + if x != nil { + return x.AgeSeconds + } + return 0 +} + +// DB stats +type StatusResponse_P2PMetrics_DatabaseStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + P2PDbSizeMb float64 `protobuf:"fixed64,1,opt,name=p2p_db_size_mb,json=p2pDbSizeMb,proto3" json:"p2p_db_size_mb,omitempty"` + P2PDbRecordsCount int64 `protobuf:"varint,2,opt,name=p2p_db_records_count,json=p2pDbRecordsCount,proto3" json:"p2p_db_records_count,omitempty"` +} + +func (x *StatusResponse_P2PMetrics_DatabaseStats) Reset() { + *x = StatusResponse_P2PMetrics_DatabaseStats{} + mi := &file_supernode_status_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_DatabaseStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_DatabaseStats) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_DatabaseStats) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_DatabaseStats.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_DatabaseStats) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 3} +} + +func (x *StatusResponse_P2PMetrics_DatabaseStats) GetP2PDbSizeMb() float64 { + if x != nil { + return x.P2PDbSizeMb + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DatabaseStats) GetP2PDbRecordsCount() int64 { + if x != nil { + return x.P2PDbRecordsCount + } + return 0 +} + +// Disk status +type StatusResponse_P2PMetrics_DiskStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AllMb float64 `protobuf:"fixed64,1,opt,name=all_mb,json=allMb,proto3" json:"all_mb,omitempty"` + UsedMb float64 `protobuf:"fixed64,2,opt,name=used_mb,json=usedMb,proto3" json:"used_mb,omitempty"` + FreeMb float64 `protobuf:"fixed64,3,opt,name=free_mb,json=freeMb,proto3" json:"free_mb,omitempty"` +} + +func (x *StatusResponse_P2PMetrics_DiskStatus) Reset() { + *x = StatusResponse_P2PMetrics_DiskStatus{} + mi := &file_supernode_status_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_DiskStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_DiskStatus) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_DiskStatus) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_DiskStatus.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_DiskStatus) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 4} +} + +func (x *StatusResponse_P2PMetrics_DiskStatus) GetAllMb() float64 { + if x != nil { + return x.AllMb + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DiskStatus) GetUsedMb() float64 { + if x != nil { + return x.UsedMb + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DiskStatus) GetFreeMb() float64 { + if x != nil { + return x.FreeMb + } + return 0 +} + +type StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds) + Requests int32 `protobuf:"varint,2,opt,name=requests,proto3" json:"requests,omitempty"` // total node RPCs attempted + Successful int32 `protobuf:"varint,3,opt,name=successful,proto3" json:"successful,omitempty"` // successful node RPCs + SuccessRate float64 `protobuf:"fixed64,4,opt,name=success_rate,json=successRate,proto3" json:"success_rate,omitempty"` // percentage (0-100) +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) Reset() { + *x = StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint{} + mi := &file_supernode_status_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 0, 0} +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetTimeUnix() int64 { + if x != nil { + return x.TimeUnix + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetRequests() int32 { + if x != nil { + return x.Requests + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetSuccessful() int32 { + if x != nil { + return x.Successful + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetSuccessRate() float64 { + if x != nil { + return x.SuccessRate + } + return 0 +} + +type StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds) + Keys int32 `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` // keys requested + Required int32 `protobuf:"varint,3,opt,name=required,proto3" json:"required,omitempty"` // required count + FoundLocal int32 `protobuf:"varint,4,opt,name=found_local,json=foundLocal,proto3" json:"found_local,omitempty"` // found locally + FoundNetwork int32 `protobuf:"varint,5,opt,name=found_network,json=foundNetwork,proto3" json:"found_network,omitempty"` // found on network + DurationMs int64 `protobuf:"varint,6,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"` // duration in milliseconds +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) Reset() { + *x = StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint{} + mi := &file_supernode_status_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 0, 1} +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetTimeUnix() int64 { + if x != nil { + return x.TimeUnix + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetKeys() int32 { + if x != nil { + return x.Keys + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetRequired() int32 { + if x != nil { + return x.Required + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetFoundLocal() int32 { + if x != nil { + return x.FoundLocal + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetFoundNetwork() int32 { + if x != nil { + return x.FoundNetwork + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetDurationMs() int64 { + if x != nil { + return x.DurationMs + } + return 0 +} + +var File_supernode_status_proto protoreflect.FileDescriptor + +var file_supernode_status_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, + 0x6f, 0x64, 0x65, 0x22, 0x3f, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, + 0x70, 0x32, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x11, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x32, 0x70, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x22, 0x84, 0x19, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x75, 0x70, 0x74, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x75, + 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x0d, 0x72, + 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x0c, 0x72, 0x75, 0x6e, 0x6e, + 0x69, 0x6e, 0x67, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x72, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, + 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x6e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x73, 0x75, 0x70, + 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x07, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x70, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x45, 0x0a, 0x0b, 0x70, 0x32, 0x70, + 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x52, 0x0a, 0x70, 0x32, 0x70, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x1a, 0x82, 0x05, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x39, + 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x73, 0x75, + 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x2e, 0x43, 0x50, 0x55, 0x52, 0x03, 0x63, 0x70, 0x75, 0x12, 0x42, 0x0a, 0x06, 0x6d, 0x65, 0x6d, + 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x73, 0x75, 0x70, 0x65, + 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x4d, + 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x54, 0x0a, + 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x52, 0x0e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x77, 0x61, 0x72, 0x65, 0x5f, + 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x68, + 0x61, 0x72, 0x64, 0x77, 0x61, 0x72, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x1a, 0x40, + 0x0a, 0x03, 0x43, 0x50, 0x55, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, + 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, + 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, + 0x1a, 0x84, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x67, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x47, 0x62, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x67, + 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x75, 0x73, 0x65, 0x64, 0x47, 0x62, 0x12, + 0x21, 0x0a, 0x0c, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x67, 0x62, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, + 0x47, 0x62, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65, + 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0xab, 0x01, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x64, + 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x75, 0x73, + 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x76, 0x61, 0x69, 0x6c, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, + 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0x6b, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x61, 0x73, 0x6b, + 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x73, 0x6b, + 0x49, 0x64, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x1a, 0x51, 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x1f, 0x0a, + 0x0b, 0x70, 0x65, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x25, + 0x0a, 0x0e, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x65, 0x73, 0x1a, 0xf3, 0x0e, 0x0a, 0x0a, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x12, 0x50, 0x0a, 0x0b, 0x64, 0x68, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x75, 0x70, 0x65, + 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, + 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x0a, 0x64, 0x68, 0x74, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x74, 0x0a, 0x16, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x4e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, + 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x65, 0x0a, 0x11, + 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x43, 0x6f, + 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x12, 0x48, 0x0a, 0x08, 0x62, 0x61, 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x42, 0x61, 0x6e, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x62, 0x61, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x4e, 0x0a, + 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x32, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x43, 0x0a, + 0x04, 0x64, 0x69, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x75, + 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x04, 0x64, 0x69, + 0x73, 0x6b, 0x1a, 0xc0, 0x05, 0x0a, 0x0a, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x12, 0x73, 0x0a, 0x14, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x41, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, + 0x6e, 0x74, 0x52, 0x12, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x76, 0x0a, 0x15, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, + 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, + 0x69, 0x65, 0x76, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x13, 0x62, 0x61, 0x74, 0x63, 0x68, + 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x31, + 0x0a, 0x15, 0x68, 0x6f, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x62, 0x61, 0x6e, 0x6e, 0x65, + 0x64, 0x5f, 0x73, 0x6b, 0x69, 0x70, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x68, + 0x6f, 0x74, 0x50, 0x61, 0x74, 0x68, 0x42, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x6b, 0x69, 0x70, + 0x73, 0x12, 0x35, 0x0a, 0x17, 0x68, 0x6f, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x62, 0x61, + 0x6e, 0x5f, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x14, 0x68, 0x6f, 0x74, 0x50, 0x61, 0x74, 0x68, 0x42, 0x61, 0x6e, 0x49, 0x6e, + 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x8f, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x6f, + 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x75, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x66, 0x75, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x75, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x75, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x1a, 0xc8, 0x01, 0x0a, 0x12, 0x42, + 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x50, 0x6f, 0x69, 0x6e, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x12, + 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6b, 0x65, + 0x79, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1f, + 0x0a, 0x0b, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0a, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x12, + 0x23, 0x0a, 0x0d, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x4e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x1a, 0x74, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, + 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, + 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x1a, 0x9d, 0x01, 0x0a, 0x08, + 0x42, 0x61, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, + 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x67, + 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0a, 0x61, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x65, 0x0a, 0x0d, 0x44, + 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0e, + 0x70, 0x32, 0x70, 0x5f, 0x64, 0x62, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x70, 0x32, 0x70, 0x44, 0x62, 0x53, 0x69, 0x7a, 0x65, 0x4d, + 0x62, 0x12, 0x2f, 0x0a, 0x14, 0x70, 0x32, 0x70, 0x5f, 0x64, 0x62, 0x5f, 0x72, 0x65, 0x63, 0x6f, + 0x72, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x11, 0x70, 0x32, 0x70, 0x44, 0x62, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x1a, 0x55, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x15, 0x0a, 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6d, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x4d, 0x62, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f, + 0x6d, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x75, 0x73, 0x65, 0x64, 0x4d, 0x62, + 0x12, 0x17, 0x0a, 0x07, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x06, 0x66, 0x72, 0x65, 0x65, 0x4d, 0x62, 0x1a, 0x7c, 0x0a, 0x19, 0x4e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x48, 0x61, + 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x6e, 0x50, + 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, + 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, + 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_supernode_status_proto_rawDescOnce sync.Once + file_supernode_status_proto_rawDescData = file_supernode_status_proto_rawDesc +) + +func file_supernode_status_proto_rawDescGZIP() []byte { + file_supernode_status_proto_rawDescOnce.Do(func() { + file_supernode_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_supernode_status_proto_rawDescData) + }) + return file_supernode_status_proto_rawDescData +} + +var file_supernode_status_proto_msgTypes = make([]protoimpl.MessageInfo, 18) +var file_supernode_status_proto_goTypes = []any{ + (*StatusRequest)(nil), // 0: supernode.StatusRequest + (*StatusResponse)(nil), // 1: supernode.StatusResponse + (*StatusResponse_Resources)(nil), // 2: supernode.StatusResponse.Resources + (*StatusResponse_ServiceTasks)(nil), // 3: supernode.StatusResponse.ServiceTasks + (*StatusResponse_Network)(nil), // 4: supernode.StatusResponse.Network + (*StatusResponse_P2PMetrics)(nil), // 5: supernode.StatusResponse.P2PMetrics + (*StatusResponse_Resources_CPU)(nil), // 6: supernode.StatusResponse.Resources.CPU + (*StatusResponse_Resources_Memory)(nil), // 7: supernode.StatusResponse.Resources.Memory + (*StatusResponse_Resources_Storage)(nil), // 8: supernode.StatusResponse.Resources.Storage + (*StatusResponse_P2PMetrics_DhtMetrics)(nil), // 9: supernode.StatusResponse.P2PMetrics.DhtMetrics + (*StatusResponse_P2PMetrics_HandleCounters)(nil), // 10: supernode.StatusResponse.P2PMetrics.HandleCounters + (*StatusResponse_P2PMetrics_BanEntry)(nil), // 11: supernode.StatusResponse.P2PMetrics.BanEntry + (*StatusResponse_P2PMetrics_DatabaseStats)(nil), // 12: supernode.StatusResponse.P2PMetrics.DatabaseStats + (*StatusResponse_P2PMetrics_DiskStatus)(nil), // 13: supernode.StatusResponse.P2PMetrics.DiskStatus + nil, // 14: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry + nil, // 15: supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry + (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint)(nil), // 16: supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint + (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint)(nil), // 17: supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint +} +var file_supernode_status_proto_depIdxs = []int32{ + 2, // 0: supernode.StatusResponse.resources:type_name -> supernode.StatusResponse.Resources + 3, // 1: supernode.StatusResponse.running_tasks:type_name -> supernode.StatusResponse.ServiceTasks + 4, // 2: supernode.StatusResponse.network:type_name -> supernode.StatusResponse.Network + 5, // 3: supernode.StatusResponse.p2p_metrics:type_name -> supernode.StatusResponse.P2PMetrics + 6, // 4: supernode.StatusResponse.Resources.cpu:type_name -> supernode.StatusResponse.Resources.CPU + 7, // 5: supernode.StatusResponse.Resources.memory:type_name -> supernode.StatusResponse.Resources.Memory + 8, // 6: supernode.StatusResponse.Resources.storage_volumes:type_name -> supernode.StatusResponse.Resources.Storage + 9, // 7: supernode.StatusResponse.P2PMetrics.dht_metrics:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics + 14, // 8: supernode.StatusResponse.P2PMetrics.network_handle_metrics:type_name -> supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry + 15, // 9: supernode.StatusResponse.P2PMetrics.conn_pool_metrics:type_name -> supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry + 11, // 10: supernode.StatusResponse.P2PMetrics.ban_list:type_name -> supernode.StatusResponse.P2PMetrics.BanEntry + 12, // 11: supernode.StatusResponse.P2PMetrics.database:type_name -> supernode.StatusResponse.P2PMetrics.DatabaseStats + 13, // 12: supernode.StatusResponse.P2PMetrics.disk:type_name -> supernode.StatusResponse.P2PMetrics.DiskStatus + 16, // 13: supernode.StatusResponse.P2PMetrics.DhtMetrics.store_success_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint + 17, // 14: supernode.StatusResponse.P2PMetrics.DhtMetrics.batch_retrieve_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint + 10, // 15: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.HandleCounters + 16, // [16:16] is the sub-list for method output_type + 16, // [16:16] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name +} + +func init() { file_supernode_status_proto_init() } +func file_supernode_status_proto_init() { + if File_supernode_status_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_supernode_status_proto_rawDesc, + NumEnums: 0, + NumMessages: 18, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_supernode_status_proto_goTypes, + DependencyIndexes: file_supernode_status_proto_depIdxs, + MessageInfos: file_supernode_status_proto_msgTypes, + }.Build() + File_supernode_status_proto = out.File + file_supernode_status_proto_rawDesc = nil + file_supernode_status_proto_goTypes = nil + file_supernode_status_proto_depIdxs = nil +} diff --git a/gen/supernode/status.swagger.json b/gen/supernode/status.swagger.json new file mode 100644 index 00000000..5b014db1 --- /dev/null +++ b/gen/supernode/status.swagger.json @@ -0,0 +1,44 @@ +{ + "swagger": "2.0", + "info": { + "title": "supernode/status.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string" + } + }, + "additionalProperties": {} + }, + "rpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + } + } + } + } + } +} diff --git a/gen/supernode/supernode.pb.go b/gen/supernode/supernode.pb.go deleted file mode 100644 index 431bc8b5..00000000 --- a/gen/supernode/supernode.pb.go +++ /dev/null @@ -1,2034 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.35.1 -// protoc v3.21.12 -// source: supernode/supernode.proto - -package supernode - -import ( - reflect "reflect" - sync "sync" - - _ "google.golang.org/genproto/googleapis/api/annotations" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type StatusRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional: include detailed P2P metrics in the response - // Maps to query param via grpc-gateway: /api/v1/status?include_p2p_metrics=true - IncludeP2PMetrics bool `protobuf:"varint,1,opt,name=include_p2p_metrics,json=includeP2pMetrics,proto3" json:"include_p2p_metrics,omitempty"` -} - -func (x *StatusRequest) Reset() { - *x = StatusRequest{} - mi := &file_supernode_supernode_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusRequest) ProtoMessage() {} - -func (x *StatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusRequest.ProtoReflect.Descriptor instead. -func (*StatusRequest) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{0} -} - -func (x *StatusRequest) GetIncludeP2PMetrics() bool { - if x != nil { - return x.IncludeP2PMetrics - } - return false -} - -type ListServicesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *ListServicesRequest) Reset() { - *x = ListServicesRequest{} - mi := &file_supernode_supernode_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListServicesRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListServicesRequest) ProtoMessage() {} - -func (x *ListServicesRequest) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListServicesRequest.ProtoReflect.Descriptor instead. -func (*ListServicesRequest) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{1} -} - -type ListServicesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Services []*ServiceInfo `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"` - Count int32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` -} - -func (x *ListServicesResponse) Reset() { - *x = ListServicesResponse{} - mi := &file_supernode_supernode_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListServicesResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListServicesResponse) ProtoMessage() {} - -func (x *ListServicesResponse) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListServicesResponse.ProtoReflect.Descriptor instead. -func (*ListServicesResponse) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{2} -} - -func (x *ListServicesResponse) GetServices() []*ServiceInfo { - if x != nil { - return x.Services - } - return nil -} - -func (x *ListServicesResponse) GetCount() int32 { - if x != nil { - return x.Count - } - return 0 -} - -type ServiceInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Methods []string `protobuf:"bytes,2,rep,name=methods,proto3" json:"methods,omitempty"` -} - -func (x *ServiceInfo) Reset() { - *x = ServiceInfo{} - mi := &file_supernode_supernode_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ServiceInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceInfo) ProtoMessage() {} - -func (x *ServiceInfo) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceInfo.ProtoReflect.Descriptor instead. -func (*ServiceInfo) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{3} -} - -func (x *ServiceInfo) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *ServiceInfo) GetMethods() []string { - if x != nil { - return x.Methods - } - return nil -} - -// The StatusResponse represents system status with clear organization -type StatusResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` // Supernode version - UptimeSeconds uint64 `protobuf:"varint,2,opt,name=uptime_seconds,json=uptimeSeconds,proto3" json:"uptime_seconds,omitempty"` // Uptime in seconds - Resources *StatusResponse_Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` - RunningTasks []*StatusResponse_ServiceTasks `protobuf:"bytes,4,rep,name=running_tasks,json=runningTasks,proto3" json:"running_tasks,omitempty"` // Services with currently running tasks - RegisteredServices []string `protobuf:"bytes,5,rep,name=registered_services,json=registeredServices,proto3" json:"registered_services,omitempty"` // All registered/available services - Network *StatusResponse_Network `protobuf:"bytes,6,opt,name=network,proto3" json:"network,omitempty"` // P2P network information - Rank int32 `protobuf:"varint,7,opt,name=rank,proto3" json:"rank,omitempty"` // Rank in the top supernodes list (0 if not in top list) - IpAddress string `protobuf:"bytes,8,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` // Supernode IP address with port (e.g., "192.168.1.1:4445") - P2PMetrics *StatusResponse_P2PMetrics `protobuf:"bytes,9,opt,name=p2p_metrics,json=p2pMetrics,proto3" json:"p2p_metrics,omitempty"` -} - -func (x *StatusResponse) Reset() { - *x = StatusResponse{} - mi := &file_supernode_supernode_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse) ProtoMessage() {} - -func (x *StatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse.ProtoReflect.Descriptor instead. -func (*StatusResponse) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4} -} - -func (x *StatusResponse) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -func (x *StatusResponse) GetUptimeSeconds() uint64 { - if x != nil { - return x.UptimeSeconds - } - return 0 -} - -func (x *StatusResponse) GetResources() *StatusResponse_Resources { - if x != nil { - return x.Resources - } - return nil -} - -func (x *StatusResponse) GetRunningTasks() []*StatusResponse_ServiceTasks { - if x != nil { - return x.RunningTasks - } - return nil -} - -func (x *StatusResponse) GetRegisteredServices() []string { - if x != nil { - return x.RegisteredServices - } - return nil -} - -func (x *StatusResponse) GetNetwork() *StatusResponse_Network { - if x != nil { - return x.Network - } - return nil -} - -func (x *StatusResponse) GetRank() int32 { - if x != nil { - return x.Rank - } - return 0 -} - -func (x *StatusResponse) GetIpAddress() string { - if x != nil { - return x.IpAddress - } - return "" -} - -func (x *StatusResponse) GetP2PMetrics() *StatusResponse_P2PMetrics { - if x != nil { - return x.P2PMetrics - } - return nil -} - -// System resource information -type StatusResponse_Resources struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Cpu *StatusResponse_Resources_CPU `protobuf:"bytes,1,opt,name=cpu,proto3" json:"cpu,omitempty"` - Memory *StatusResponse_Resources_Memory `protobuf:"bytes,2,opt,name=memory,proto3" json:"memory,omitempty"` - StorageVolumes []*StatusResponse_Resources_Storage `protobuf:"bytes,3,rep,name=storage_volumes,json=storageVolumes,proto3" json:"storage_volumes,omitempty"` - HardwareSummary string `protobuf:"bytes,4,opt,name=hardware_summary,json=hardwareSummary,proto3" json:"hardware_summary,omitempty"` // Formatted hardware summary (e.g., "8 cores / 32GB RAM") -} - -func (x *StatusResponse_Resources) Reset() { - *x = StatusResponse_Resources{} - mi := &file_supernode_supernode_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_Resources) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_Resources) ProtoMessage() {} - -func (x *StatusResponse_Resources) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_Resources.ProtoReflect.Descriptor instead. -func (*StatusResponse_Resources) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 0} -} - -func (x *StatusResponse_Resources) GetCpu() *StatusResponse_Resources_CPU { - if x != nil { - return x.Cpu - } - return nil -} - -func (x *StatusResponse_Resources) GetMemory() *StatusResponse_Resources_Memory { - if x != nil { - return x.Memory - } - return nil -} - -func (x *StatusResponse_Resources) GetStorageVolumes() []*StatusResponse_Resources_Storage { - if x != nil { - return x.StorageVolumes - } - return nil -} - -func (x *StatusResponse_Resources) GetHardwareSummary() string { - if x != nil { - return x.HardwareSummary - } - return "" -} - -// ServiceTasks contains task information for a specific service -type StatusResponse_ServiceTasks struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` - TaskIds []string `protobuf:"bytes,2,rep,name=task_ids,json=taskIds,proto3" json:"task_ids,omitempty"` - TaskCount int32 `protobuf:"varint,3,opt,name=task_count,json=taskCount,proto3" json:"task_count,omitempty"` -} - -func (x *StatusResponse_ServiceTasks) Reset() { - *x = StatusResponse_ServiceTasks{} - mi := &file_supernode_supernode_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_ServiceTasks) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_ServiceTasks) ProtoMessage() {} - -func (x *StatusResponse_ServiceTasks) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[6] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_ServiceTasks.ProtoReflect.Descriptor instead. -func (*StatusResponse_ServiceTasks) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 1} -} - -func (x *StatusResponse_ServiceTasks) GetServiceName() string { - if x != nil { - return x.ServiceName - } - return "" -} - -func (x *StatusResponse_ServiceTasks) GetTaskIds() []string { - if x != nil { - return x.TaskIds - } - return nil -} - -func (x *StatusResponse_ServiceTasks) GetTaskCount() int32 { - if x != nil { - return x.TaskCount - } - return 0 -} - -// Network information -type StatusResponse_Network struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeersCount int32 `protobuf:"varint,1,opt,name=peers_count,json=peersCount,proto3" json:"peers_count,omitempty"` // Number of connected peers in P2P network - PeerAddresses []string `protobuf:"bytes,2,rep,name=peer_addresses,json=peerAddresses,proto3" json:"peer_addresses,omitempty"` // List of connected peer addresses (optional, may be empty for privacy) -} - -func (x *StatusResponse_Network) Reset() { - *x = StatusResponse_Network{} - mi := &file_supernode_supernode_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_Network) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_Network) ProtoMessage() {} - -func (x *StatusResponse_Network) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[7] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_Network.ProtoReflect.Descriptor instead. -func (*StatusResponse_Network) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 2} -} - -func (x *StatusResponse_Network) GetPeersCount() int32 { - if x != nil { - return x.PeersCount - } - return 0 -} - -func (x *StatusResponse_Network) GetPeerAddresses() []string { - if x != nil { - return x.PeerAddresses - } - return nil -} - -// P2P metrics and diagnostics (additive field) -type StatusResponse_P2PMetrics struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - DhtMetrics *StatusResponse_P2PMetrics_DhtMetrics `protobuf:"bytes,1,opt,name=dht_metrics,json=dhtMetrics,proto3" json:"dht_metrics,omitempty"` - NetworkHandleMetrics map[string]*StatusResponse_P2PMetrics_HandleCounters `protobuf:"bytes,2,rep,name=network_handle_metrics,json=networkHandleMetrics,proto3" json:"network_handle_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - ConnPoolMetrics map[string]int64 `protobuf:"bytes,3,rep,name=conn_pool_metrics,json=connPoolMetrics,proto3" json:"conn_pool_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - BanList []*StatusResponse_P2PMetrics_BanEntry `protobuf:"bytes,4,rep,name=ban_list,json=banList,proto3" json:"ban_list,omitempty"` - Database *StatusResponse_P2PMetrics_DatabaseStats `protobuf:"bytes,5,opt,name=database,proto3" json:"database,omitempty"` - Disk *StatusResponse_P2PMetrics_DiskStatus `protobuf:"bytes,6,opt,name=disk,proto3" json:"disk,omitempty"` - RecentBatchStore []*StatusResponse_P2PMetrics_RecentBatchStoreEntry `protobuf:"bytes,7,rep,name=recent_batch_store,json=recentBatchStore,proto3" json:"recent_batch_store,omitempty"` - RecentBatchRetrieve []*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry `protobuf:"bytes,8,rep,name=recent_batch_retrieve,json=recentBatchRetrieve,proto3" json:"recent_batch_retrieve,omitempty"` - RecentBatchStoreByIp map[string]*StatusResponse_P2PMetrics_RecentBatchStoreList `protobuf:"bytes,9,rep,name=recent_batch_store_by_ip,json=recentBatchStoreByIp,proto3" json:"recent_batch_store_by_ip,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - RecentBatchRetrieveByIp map[string]*StatusResponse_P2PMetrics_RecentBatchRetrieveList `protobuf:"bytes,10,rep,name=recent_batch_retrieve_by_ip,json=recentBatchRetrieveByIp,proto3" json:"recent_batch_retrieve_by_ip,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *StatusResponse_P2PMetrics) Reset() { - *x = StatusResponse_P2PMetrics{} - mi := &file_supernode_supernode_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[8] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3} -} - -func (x *StatusResponse_P2PMetrics) GetDhtMetrics() *StatusResponse_P2PMetrics_DhtMetrics { - if x != nil { - return x.DhtMetrics - } - return nil -} - -func (x *StatusResponse_P2PMetrics) GetNetworkHandleMetrics() map[string]*StatusResponse_P2PMetrics_HandleCounters { - if x != nil { - return x.NetworkHandleMetrics - } - return nil -} - -func (x *StatusResponse_P2PMetrics) GetConnPoolMetrics() map[string]int64 { - if x != nil { - return x.ConnPoolMetrics - } - return nil -} - -func (x *StatusResponse_P2PMetrics) GetBanList() []*StatusResponse_P2PMetrics_BanEntry { - if x != nil { - return x.BanList - } - return nil -} - -func (x *StatusResponse_P2PMetrics) GetDatabase() *StatusResponse_P2PMetrics_DatabaseStats { - if x != nil { - return x.Database - } - return nil -} - -func (x *StatusResponse_P2PMetrics) GetDisk() *StatusResponse_P2PMetrics_DiskStatus { - if x != nil { - return x.Disk - } - return nil -} - -func (x *StatusResponse_P2PMetrics) GetRecentBatchStore() []*StatusResponse_P2PMetrics_RecentBatchStoreEntry { - if x != nil { - return x.RecentBatchStore - } - return nil -} - -func (x *StatusResponse_P2PMetrics) GetRecentBatchRetrieve() []*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry { - if x != nil { - return x.RecentBatchRetrieve - } - return nil -} - -func (x *StatusResponse_P2PMetrics) GetRecentBatchStoreByIp() map[string]*StatusResponse_P2PMetrics_RecentBatchStoreList { - if x != nil { - return x.RecentBatchStoreByIp - } - return nil -} - -func (x *StatusResponse_P2PMetrics) GetRecentBatchRetrieveByIp() map[string]*StatusResponse_P2PMetrics_RecentBatchRetrieveList { - if x != nil { - return x.RecentBatchRetrieveByIp - } - return nil -} - -type StatusResponse_Resources_CPU struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - UsagePercent float64 `protobuf:"fixed64,1,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // CPU usage percentage (0-100) - Cores int32 `protobuf:"varint,2,opt,name=cores,proto3" json:"cores,omitempty"` // Number of CPU cores -} - -func (x *StatusResponse_Resources_CPU) Reset() { - *x = StatusResponse_Resources_CPU{} - mi := &file_supernode_supernode_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_Resources_CPU) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_Resources_CPU) ProtoMessage() {} - -func (x *StatusResponse_Resources_CPU) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[9] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_Resources_CPU.ProtoReflect.Descriptor instead. -func (*StatusResponse_Resources_CPU) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 0, 0} -} - -func (x *StatusResponse_Resources_CPU) GetUsagePercent() float64 { - if x != nil { - return x.UsagePercent - } - return 0 -} - -func (x *StatusResponse_Resources_CPU) GetCores() int32 { - if x != nil { - return x.Cores - } - return 0 -} - -type StatusResponse_Resources_Memory struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TotalGb float64 `protobuf:"fixed64,1,opt,name=total_gb,json=totalGb,proto3" json:"total_gb,omitempty"` // Total memory in GB - UsedGb float64 `protobuf:"fixed64,2,opt,name=used_gb,json=usedGb,proto3" json:"used_gb,omitempty"` // Used memory in GB - AvailableGb float64 `protobuf:"fixed64,3,opt,name=available_gb,json=availableGb,proto3" json:"available_gb,omitempty"` // Available memory in GB - UsagePercent float64 `protobuf:"fixed64,4,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Memory usage percentage (0-100) -} - -func (x *StatusResponse_Resources_Memory) Reset() { - *x = StatusResponse_Resources_Memory{} - mi := &file_supernode_supernode_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_Resources_Memory) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_Resources_Memory) ProtoMessage() {} - -func (x *StatusResponse_Resources_Memory) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[10] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_Resources_Memory.ProtoReflect.Descriptor instead. -func (*StatusResponse_Resources_Memory) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 0, 1} -} - -func (x *StatusResponse_Resources_Memory) GetTotalGb() float64 { - if x != nil { - return x.TotalGb - } - return 0 -} - -func (x *StatusResponse_Resources_Memory) GetUsedGb() float64 { - if x != nil { - return x.UsedGb - } - return 0 -} - -func (x *StatusResponse_Resources_Memory) GetAvailableGb() float64 { - if x != nil { - return x.AvailableGb - } - return 0 -} - -func (x *StatusResponse_Resources_Memory) GetUsagePercent() float64 { - if x != nil { - return x.UsagePercent - } - return 0 -} - -type StatusResponse_Resources_Storage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // Storage path being monitored - TotalBytes uint64 `protobuf:"varint,2,opt,name=total_bytes,json=totalBytes,proto3" json:"total_bytes,omitempty"` - UsedBytes uint64 `protobuf:"varint,3,opt,name=used_bytes,json=usedBytes,proto3" json:"used_bytes,omitempty"` - AvailableBytes uint64 `protobuf:"varint,4,opt,name=available_bytes,json=availableBytes,proto3" json:"available_bytes,omitempty"` - UsagePercent float64 `protobuf:"fixed64,5,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Storage usage percentage (0-100) -} - -func (x *StatusResponse_Resources_Storage) Reset() { - *x = StatusResponse_Resources_Storage{} - mi := &file_supernode_supernode_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_Resources_Storage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_Resources_Storage) ProtoMessage() {} - -func (x *StatusResponse_Resources_Storage) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[11] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_Resources_Storage.ProtoReflect.Descriptor instead. -func (*StatusResponse_Resources_Storage) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 0, 2} -} - -func (x *StatusResponse_Resources_Storage) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -func (x *StatusResponse_Resources_Storage) GetTotalBytes() uint64 { - if x != nil { - return x.TotalBytes - } - return 0 -} - -func (x *StatusResponse_Resources_Storage) GetUsedBytes() uint64 { - if x != nil { - return x.UsedBytes - } - return 0 -} - -func (x *StatusResponse_Resources_Storage) GetAvailableBytes() uint64 { - if x != nil { - return x.AvailableBytes - } - return 0 -} - -func (x *StatusResponse_Resources_Storage) GetUsagePercent() float64 { - if x != nil { - return x.UsagePercent - } - return 0 -} - -// Rolling DHT metrics snapshot -type StatusResponse_P2PMetrics_DhtMetrics struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - StoreSuccessRecent []*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint `protobuf:"bytes,1,rep,name=store_success_recent,json=storeSuccessRecent,proto3" json:"store_success_recent,omitempty"` - BatchRetrieveRecent []*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint `protobuf:"bytes,2,rep,name=batch_retrieve_recent,json=batchRetrieveRecent,proto3" json:"batch_retrieve_recent,omitempty"` - HotPathBannedSkips int64 `protobuf:"varint,3,opt,name=hot_path_banned_skips,json=hotPathBannedSkips,proto3" json:"hot_path_banned_skips,omitempty"` // counter - HotPathBanIncrements int64 `protobuf:"varint,4,opt,name=hot_path_ban_increments,json=hotPathBanIncrements,proto3" json:"hot_path_ban_increments,omitempty"` // counter -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics) Reset() { - *x = StatusResponse_P2PMetrics_DhtMetrics{} - mi := &file_supernode_supernode_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics_DhtMetrics) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics_DhtMetrics) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[12] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics_DhtMetrics) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 0} -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics) GetStoreSuccessRecent() []*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint { - if x != nil { - return x.StoreSuccessRecent - } - return nil -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics) GetBatchRetrieveRecent() []*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint { - if x != nil { - return x.BatchRetrieveRecent - } - return nil -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics) GetHotPathBannedSkips() int64 { - if x != nil { - return x.HotPathBannedSkips - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics) GetHotPathBanIncrements() int64 { - if x != nil { - return x.HotPathBanIncrements - } - return 0 -} - -// Per-handler counters from network layer -type StatusResponse_P2PMetrics_HandleCounters struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Total int64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` - Success int64 `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` - Failure int64 `protobuf:"varint,3,opt,name=failure,proto3" json:"failure,omitempty"` - Timeout int64 `protobuf:"varint,4,opt,name=timeout,proto3" json:"timeout,omitempty"` -} - -func (x *StatusResponse_P2PMetrics_HandleCounters) Reset() { - *x = StatusResponse_P2PMetrics_HandleCounters{} - mi := &file_supernode_supernode_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics_HandleCounters) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics_HandleCounters) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics_HandleCounters) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[13] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics_HandleCounters.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics_HandleCounters) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 1} -} - -func (x *StatusResponse_P2PMetrics_HandleCounters) GetTotal() int64 { - if x != nil { - return x.Total - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_HandleCounters) GetSuccess() int64 { - if x != nil { - return x.Success - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_HandleCounters) GetFailure() int64 { - if x != nil { - return x.Failure - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_HandleCounters) GetTimeout() int64 { - if x != nil { - return x.Timeout - } - return 0 -} - -// Ban list entry -type StatusResponse_P2PMetrics_BanEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // printable ID - Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` // last seen IP - Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` // last seen port - Count int32 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` // failure count - CreatedAtUnix int64 `protobuf:"varint,5,opt,name=created_at_unix,json=createdAtUnix,proto3" json:"created_at_unix,omitempty"` // first ban time (unix seconds) - AgeSeconds int64 `protobuf:"varint,6,opt,name=age_seconds,json=ageSeconds,proto3" json:"age_seconds,omitempty"` // age in seconds -} - -func (x *StatusResponse_P2PMetrics_BanEntry) Reset() { - *x = StatusResponse_P2PMetrics_BanEntry{} - mi := &file_supernode_supernode_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics_BanEntry) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics_BanEntry) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics_BanEntry) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[14] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics_BanEntry.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics_BanEntry) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 2} -} - -func (x *StatusResponse_P2PMetrics_BanEntry) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *StatusResponse_P2PMetrics_BanEntry) GetIp() string { - if x != nil { - return x.Ip - } - return "" -} - -func (x *StatusResponse_P2PMetrics_BanEntry) GetPort() uint32 { - if x != nil { - return x.Port - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_BanEntry) GetCount() int32 { - if x != nil { - return x.Count - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_BanEntry) GetCreatedAtUnix() int64 { - if x != nil { - return x.CreatedAtUnix - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_BanEntry) GetAgeSeconds() int64 { - if x != nil { - return x.AgeSeconds - } - return 0 -} - -// DB stats -type StatusResponse_P2PMetrics_DatabaseStats struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - P2PDbSizeMb float64 `protobuf:"fixed64,1,opt,name=p2p_db_size_mb,json=p2pDbSizeMb,proto3" json:"p2p_db_size_mb,omitempty"` - P2PDbRecordsCount int64 `protobuf:"varint,2,opt,name=p2p_db_records_count,json=p2pDbRecordsCount,proto3" json:"p2p_db_records_count,omitempty"` -} - -func (x *StatusResponse_P2PMetrics_DatabaseStats) Reset() { - *x = StatusResponse_P2PMetrics_DatabaseStats{} - mi := &file_supernode_supernode_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics_DatabaseStats) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics_DatabaseStats) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics_DatabaseStats) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[15] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics_DatabaseStats.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics_DatabaseStats) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 3} -} - -func (x *StatusResponse_P2PMetrics_DatabaseStats) GetP2PDbSizeMb() float64 { - if x != nil { - return x.P2PDbSizeMb - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DatabaseStats) GetP2PDbRecordsCount() int64 { - if x != nil { - return x.P2PDbRecordsCount - } - return 0 -} - -// Disk status -type StatusResponse_P2PMetrics_DiskStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AllMb float64 `protobuf:"fixed64,1,opt,name=all_mb,json=allMb,proto3" json:"all_mb,omitempty"` - UsedMb float64 `protobuf:"fixed64,2,opt,name=used_mb,json=usedMb,proto3" json:"used_mb,omitempty"` - FreeMb float64 `protobuf:"fixed64,3,opt,name=free_mb,json=freeMb,proto3" json:"free_mb,omitempty"` -} - -func (x *StatusResponse_P2PMetrics_DiskStatus) Reset() { - *x = StatusResponse_P2PMetrics_DiskStatus{} - mi := &file_supernode_supernode_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics_DiskStatus) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics_DiskStatus) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics_DiskStatus) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[16] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics_DiskStatus.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics_DiskStatus) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 4} -} - -func (x *StatusResponse_P2PMetrics_DiskStatus) GetAllMb() float64 { - if x != nil { - return x.AllMb - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DiskStatus) GetUsedMb() float64 { - if x != nil { - return x.UsedMb - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DiskStatus) GetFreeMb() float64 { - if x != nil { - return x.FreeMb - } - return 0 -} - -// Last handled BatchStoreData requests (most recent first) -type StatusResponse_P2PMetrics_RecentBatchStoreEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` - SenderId string `protobuf:"bytes,2,opt,name=sender_id,json=senderId,proto3" json:"sender_id,omitempty"` - SenderIp string `protobuf:"bytes,3,opt,name=sender_ip,json=senderIp,proto3" json:"sender_ip,omitempty"` - Keys int32 `protobuf:"varint,4,opt,name=keys,proto3" json:"keys,omitempty"` - DurationMs int64 `protobuf:"varint,5,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"` - Ok bool `protobuf:"varint,6,opt,name=ok,proto3" json:"ok,omitempty"` - Error string `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"` -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) Reset() { - *x = StatusResponse_P2PMetrics_RecentBatchStoreEntry{} - mi := &file_supernode_supernode_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics_RecentBatchStoreEntry) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[19] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics_RecentBatchStoreEntry.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics_RecentBatchStoreEntry) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 7} -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetTimeUnix() int64 { - if x != nil { - return x.TimeUnix - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetSenderId() string { - if x != nil { - return x.SenderId - } - return "" -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetSenderIp() string { - if x != nil { - return x.SenderIp - } - return "" -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetKeys() int32 { - if x != nil { - return x.Keys - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetDurationMs() int64 { - if x != nil { - return x.DurationMs - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetOk() bool { - if x != nil { - return x.Ok - } - return false -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetError() string { - if x != nil { - return x.Error - } - return "" -} - -// Last handled BatchGetValues requests (most recent first) -type StatusResponse_P2PMetrics_RecentBatchRetrieveEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` - SenderId string `protobuf:"bytes,2,opt,name=sender_id,json=senderId,proto3" json:"sender_id,omitempty"` - SenderIp string `protobuf:"bytes,3,opt,name=sender_ip,json=senderIp,proto3" json:"sender_ip,omitempty"` - Requested int32 `protobuf:"varint,4,opt,name=requested,proto3" json:"requested,omitempty"` - Found int32 `protobuf:"varint,5,opt,name=found,proto3" json:"found,omitempty"` - DurationMs int64 `protobuf:"varint,6,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"` - Error string `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"` -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) Reset() { - *x = StatusResponse_P2PMetrics_RecentBatchRetrieveEntry{} - mi := &file_supernode_supernode_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[20] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics_RecentBatchRetrieveEntry.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 8} -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetTimeUnix() int64 { - if x != nil { - return x.TimeUnix - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetSenderId() string { - if x != nil { - return x.SenderId - } - return "" -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetSenderIp() string { - if x != nil { - return x.SenderIp - } - return "" -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetRequested() int32 { - if x != nil { - return x.Requested - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetFound() int32 { - if x != nil { - return x.Found - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetDurationMs() int64 { - if x != nil { - return x.DurationMs - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetError() string { - if x != nil { - return x.Error - } - return "" -} - -// Per-IP buckets: last 10 per sender IP -type StatusResponse_P2PMetrics_RecentBatchStoreList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Entries []*StatusResponse_P2PMetrics_RecentBatchStoreEntry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreList) Reset() { - *x = StatusResponse_P2PMetrics_RecentBatchStoreList{} - mi := &file_supernode_supernode_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreList) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics_RecentBatchStoreList) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreList) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[21] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics_RecentBatchStoreList.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics_RecentBatchStoreList) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 9} -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreList) GetEntries() []*StatusResponse_P2PMetrics_RecentBatchStoreEntry { - if x != nil { - return x.Entries - } - return nil -} - -type StatusResponse_P2PMetrics_RecentBatchRetrieveList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Entries []*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveList) Reset() { - *x = StatusResponse_P2PMetrics_RecentBatchRetrieveList{} - mi := &file_supernode_supernode_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveList) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics_RecentBatchRetrieveList) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveList) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[22] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics_RecentBatchRetrieveList.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics_RecentBatchRetrieveList) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 10} -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveList) GetEntries() []*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry { - if x != nil { - return x.Entries - } - return nil -} - -type StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds) - Requests int32 `protobuf:"varint,2,opt,name=requests,proto3" json:"requests,omitempty"` // total node RPCs attempted - Successful int32 `protobuf:"varint,3,opt,name=successful,proto3" json:"successful,omitempty"` // successful node RPCs - SuccessRate float64 `protobuf:"fixed64,4,opt,name=success_rate,json=successRate,proto3" json:"success_rate,omitempty"` // percentage (0-100) -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) Reset() { - *x = StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint{} - mi := &file_supernode_supernode_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[25] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 0, 0} -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetTimeUnix() int64 { - if x != nil { - return x.TimeUnix - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetRequests() int32 { - if x != nil { - return x.Requests - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetSuccessful() int32 { - if x != nil { - return x.Successful - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetSuccessRate() float64 { - if x != nil { - return x.SuccessRate - } - return 0 -} - -type StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds) - Keys int32 `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` // keys requested - Required int32 `protobuf:"varint,3,opt,name=required,proto3" json:"required,omitempty"` // required count - FoundLocal int32 `protobuf:"varint,4,opt,name=found_local,json=foundLocal,proto3" json:"found_local,omitempty"` // found locally - FoundNetwork int32 `protobuf:"varint,5,opt,name=found_network,json=foundNetwork,proto3" json:"found_network,omitempty"` // found on network - DurationMs int64 `protobuf:"varint,6,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"` // duration in milliseconds -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) Reset() { - *x = StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint{} - mi := &file_supernode_supernode_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[26] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 0, 1} -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetTimeUnix() int64 { - if x != nil { - return x.TimeUnix - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetKeys() int32 { - if x != nil { - return x.Keys - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetRequired() int32 { - if x != nil { - return x.Required - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetFoundLocal() int32 { - if x != nil { - return x.FoundLocal - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetFoundNetwork() int32 { - if x != nil { - return x.FoundNetwork - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetDurationMs() int64 { - if x != nil { - return x.DurationMs - } - return 0 -} - -var File_supernode_supernode_proto protoreflect.FileDescriptor - -var file_supernode_supernode_proto_rawDesc = []byte{ - 0x0a, 0x19, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x73, 0x75, 0x70, 0x65, - 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x75, 0x70, - 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x3f, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, - 0x5f, 0x70, 0x32, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x11, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x32, 0x70, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x60, 0x0a, 0x14, - 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x3b, - 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x22, 0xf7, 0x23, 0x0a, 0x0e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, - 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x75, 0x70, 0x74, 0x69, - 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x0d, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, - 0x41, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x0d, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, - 0x73, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x73, 0x75, 0x70, 0x65, - 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, - 0x73, 0x52, 0x0c, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, - 0x2f, 0x0a, 0x13, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x12, 0x3b, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x12, 0x0a, - 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x72, 0x61, 0x6e, - 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x12, 0x45, 0x0a, 0x0b, 0x70, 0x32, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, - 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x0a, 0x70, 0x32, 0x70, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x1a, 0x82, 0x05, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x43, 0x50, 0x55, 0x52, 0x03, 0x63, 0x70, 0x75, - 0x12, 0x42, 0x0a, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x52, 0x06, 0x6d, 0x65, - 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x54, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, - 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, - 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x73, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x0e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x68, 0x61, - 0x72, 0x64, 0x77, 0x61, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x68, 0x61, 0x72, 0x64, 0x77, 0x61, 0x72, 0x65, 0x53, 0x75, - 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x1a, 0x40, 0x0a, 0x03, 0x43, 0x50, 0x55, 0x12, 0x23, 0x0a, 0x0d, - 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, - 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x1a, 0x84, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x6d, 0x6f, - 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x67, 0x62, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x47, 0x62, 0x12, 0x17, 0x0a, - 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x67, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, - 0x75, 0x73, 0x65, 0x64, 0x47, 0x62, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, - 0x62, 0x6c, 0x65, 0x5f, 0x67, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x61, 0x76, - 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x47, 0x62, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, - 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0xab, - 0x01, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, - 0x0a, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, - 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x09, 0x75, 0x73, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x27, - 0x0a, 0x0f, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, - 0x6c, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, - 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, - 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0x6b, 0x0a, 0x0c, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x19, 0x0a, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, - 0x73, 0x6b, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, - 0x74, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x51, 0x0a, 0x07, 0x4e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x65, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x73, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x61, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x70, - 0x65, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x1a, 0xe6, 0x19, 0x0a, - 0x0a, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x50, 0x0a, 0x0b, 0x64, - 0x68, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x52, 0x0a, 0x64, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x74, 0x0a, - 0x16, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x5f, - 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, - 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, - 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x6e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x12, 0x65, 0x0a, 0x11, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, - 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, - 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x50, - 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x48, 0x0a, 0x08, 0x62, 0x61, - 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, - 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x73, 0x2e, 0x42, 0x61, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x62, 0x61, 0x6e, - 0x4c, 0x69, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x61, 0x74, - 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, - 0x62, 0x61, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x64, 0x69, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, - 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x04, 0x64, 0x69, 0x73, 0x6b, 0x12, 0x68, 0x0a, 0x12, 0x72, 0x65, 0x63, - 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x18, - 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, - 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, - 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x10, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, - 0x6f, 0x72, 0x65, 0x12, 0x71, 0x0a, 0x15, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, - 0x74, 0x63, 0x68, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x18, 0x08, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, - 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, - 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x13, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, - 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x12, 0x76, 0x0a, 0x18, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, - 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x62, 0x79, 0x5f, - 0x69, 0x70, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x52, - 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x42, - 0x79, 0x49, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, - 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x79, 0x49, 0x70, 0x12, 0x7f, - 0x0a, 0x1b, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x72, - 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x69, 0x70, 0x18, 0x0a, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, - 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, - 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x42, 0x79, 0x49, - 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x17, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, - 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x42, 0x79, 0x49, 0x70, 0x1a, - 0xc0, 0x05, 0x0a, 0x0a, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x73, - 0x0a, 0x14, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, - 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x73, - 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x53, 0x74, - 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, - 0x12, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x63, - 0x65, 0x6e, 0x74, 0x12, 0x76, 0x0a, 0x15, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x72, 0x65, 0x74, - 0x72, 0x69, 0x65, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, - 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, - 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x13, 0x62, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, - 0x72, 0x69, 0x65, 0x76, 0x65, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x15, 0x68, - 0x6f, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x62, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, - 0x6b, 0x69, 0x70, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x68, 0x6f, 0x74, 0x50, - 0x61, 0x74, 0x68, 0x42, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x6b, 0x69, 0x70, 0x73, 0x12, 0x35, - 0x0a, 0x17, 0x68, 0x6f, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x62, 0x61, 0x6e, 0x5f, 0x69, - 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x14, 0x68, 0x6f, 0x74, 0x50, 0x61, 0x74, 0x68, 0x42, 0x61, 0x6e, 0x49, 0x6e, 0x63, 0x72, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x8f, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53, - 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, - 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, - 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, - 0x75, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x66, 0x75, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, - 0x72, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x73, 0x75, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x1a, 0xc8, 0x01, 0x0a, 0x12, 0x42, 0x61, 0x74, 0x63, - 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b, - 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x6b, - 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, - 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x66, - 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0a, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x12, 0x23, 0x0a, 0x0d, - 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x0c, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x73, 0x1a, 0x74, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x65, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x75, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x18, - 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x1a, 0x9d, 0x01, 0x0a, 0x08, 0x42, 0x61, 0x6e, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, - 0x26, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x75, 0x6e, - 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x41, 0x74, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x67, 0x65, 0x5f, 0x73, - 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x61, 0x67, - 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x65, 0x0a, 0x0d, 0x44, 0x61, 0x74, 0x61, - 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0e, 0x70, 0x32, 0x70, - 0x5f, 0x64, 0x62, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x01, 0x52, 0x0b, 0x70, 0x32, 0x70, 0x44, 0x62, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x62, 0x12, 0x2f, - 0x0a, 0x14, 0x70, 0x32, 0x70, 0x5f, 0x64, 0x62, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, - 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x70, 0x32, - 0x70, 0x44, 0x62, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, - 0x55, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x15, 0x0a, - 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6d, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x61, - 0x6c, 0x6c, 0x4d, 0x62, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x6d, 0x62, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x75, 0x73, 0x65, 0x64, 0x4d, 0x62, 0x12, 0x17, 0x0a, - 0x07, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, - 0x66, 0x72, 0x65, 0x65, 0x4d, 0x62, 0x1a, 0x7c, 0x0a, 0x19, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, - 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xc9, 0x01, 0x0a, 0x15, 0x52, 0x65, 0x63, - 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, - 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, - 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x49, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x1f, 0x0a, - 0x0b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x12, 0x0e, - 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x12, 0x14, - 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x1a, 0xdc, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, - 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x1b, - 0x0a, 0x09, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, - 0x65, 0x6e, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x49, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x1f, 0x0a, 0x0b, - 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x12, 0x14, 0x0a, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x1a, 0x6c, 0x0a, 0x14, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, - 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x54, 0x0a, 0x07, 0x65, - 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x73, - 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, - 0x6f, 0x72, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, - 0x73, 0x1a, 0x72, 0x0a, 0x17, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, - 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x57, 0x0a, 0x07, - 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, - 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, - 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, - 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x82, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, - 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x79, 0x49, 0x70, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, - 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x88, 0x01, 0x0a, 0x1c, 0x52, - 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, - 0x76, 0x65, 0x42, 0x79, 0x49, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x52, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, - 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, - 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x32, 0xd7, 0x01, 0x0a, 0x10, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, - 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x47, 0x65, - 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x19, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x69, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x12, 0x10, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, - 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75, - 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70, - 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75, - 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_supernode_supernode_proto_rawDescOnce sync.Once - file_supernode_supernode_proto_rawDescData = file_supernode_supernode_proto_rawDesc -) - -func file_supernode_supernode_proto_rawDescGZIP() []byte { - file_supernode_supernode_proto_rawDescOnce.Do(func() { - file_supernode_supernode_proto_rawDescData = protoimpl.X.CompressGZIP(file_supernode_supernode_proto_rawDescData) - }) - return file_supernode_supernode_proto_rawDescData -} - -var file_supernode_supernode_proto_msgTypes = make([]protoimpl.MessageInfo, 27) -var file_supernode_supernode_proto_goTypes = []any{ - (*StatusRequest)(nil), // 0: supernode.StatusRequest - (*ListServicesRequest)(nil), // 1: supernode.ListServicesRequest - (*ListServicesResponse)(nil), // 2: supernode.ListServicesResponse - (*ServiceInfo)(nil), // 3: supernode.ServiceInfo - (*StatusResponse)(nil), // 4: supernode.StatusResponse - (*StatusResponse_Resources)(nil), // 5: supernode.StatusResponse.Resources - (*StatusResponse_ServiceTasks)(nil), // 6: supernode.StatusResponse.ServiceTasks - (*StatusResponse_Network)(nil), // 7: supernode.StatusResponse.Network - (*StatusResponse_P2PMetrics)(nil), // 8: supernode.StatusResponse.P2PMetrics - (*StatusResponse_Resources_CPU)(nil), // 9: supernode.StatusResponse.Resources.CPU - (*StatusResponse_Resources_Memory)(nil), // 10: supernode.StatusResponse.Resources.Memory - (*StatusResponse_Resources_Storage)(nil), // 11: supernode.StatusResponse.Resources.Storage - (*StatusResponse_P2PMetrics_DhtMetrics)(nil), // 12: supernode.StatusResponse.P2PMetrics.DhtMetrics - (*StatusResponse_P2PMetrics_HandleCounters)(nil), // 13: supernode.StatusResponse.P2PMetrics.HandleCounters - (*StatusResponse_P2PMetrics_BanEntry)(nil), // 14: supernode.StatusResponse.P2PMetrics.BanEntry - (*StatusResponse_P2PMetrics_DatabaseStats)(nil), // 15: supernode.StatusResponse.P2PMetrics.DatabaseStats - (*StatusResponse_P2PMetrics_DiskStatus)(nil), // 16: supernode.StatusResponse.P2PMetrics.DiskStatus - nil, // 17: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry - nil, // 18: supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry - (*StatusResponse_P2PMetrics_RecentBatchStoreEntry)(nil), // 19: supernode.StatusResponse.P2PMetrics.RecentBatchStoreEntry - (*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry)(nil), // 20: supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveEntry - (*StatusResponse_P2PMetrics_RecentBatchStoreList)(nil), // 21: supernode.StatusResponse.P2PMetrics.RecentBatchStoreList - (*StatusResponse_P2PMetrics_RecentBatchRetrieveList)(nil), // 22: supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveList - nil, // 23: supernode.StatusResponse.P2PMetrics.RecentBatchStoreByIpEntry - nil, // 24: supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveByIpEntry - (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint)(nil), // 25: supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint - (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint)(nil), // 26: supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint -} -var file_supernode_supernode_proto_depIdxs = []int32{ - 3, // 0: supernode.ListServicesResponse.services:type_name -> supernode.ServiceInfo - 5, // 1: supernode.StatusResponse.resources:type_name -> supernode.StatusResponse.Resources - 6, // 2: supernode.StatusResponse.running_tasks:type_name -> supernode.StatusResponse.ServiceTasks - 7, // 3: supernode.StatusResponse.network:type_name -> supernode.StatusResponse.Network - 8, // 4: supernode.StatusResponse.p2p_metrics:type_name -> supernode.StatusResponse.P2PMetrics - 9, // 5: supernode.StatusResponse.Resources.cpu:type_name -> supernode.StatusResponse.Resources.CPU - 10, // 6: supernode.StatusResponse.Resources.memory:type_name -> supernode.StatusResponse.Resources.Memory - 11, // 7: supernode.StatusResponse.Resources.storage_volumes:type_name -> supernode.StatusResponse.Resources.Storage - 12, // 8: supernode.StatusResponse.P2PMetrics.dht_metrics:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics - 17, // 9: supernode.StatusResponse.P2PMetrics.network_handle_metrics:type_name -> supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry - 18, // 10: supernode.StatusResponse.P2PMetrics.conn_pool_metrics:type_name -> supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry - 14, // 11: supernode.StatusResponse.P2PMetrics.ban_list:type_name -> supernode.StatusResponse.P2PMetrics.BanEntry - 15, // 12: supernode.StatusResponse.P2PMetrics.database:type_name -> supernode.StatusResponse.P2PMetrics.DatabaseStats - 16, // 13: supernode.StatusResponse.P2PMetrics.disk:type_name -> supernode.StatusResponse.P2PMetrics.DiskStatus - 19, // 14: supernode.StatusResponse.P2PMetrics.recent_batch_store:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchStoreEntry - 20, // 15: supernode.StatusResponse.P2PMetrics.recent_batch_retrieve:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveEntry - 23, // 16: supernode.StatusResponse.P2PMetrics.recent_batch_store_by_ip:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchStoreByIpEntry - 24, // 17: supernode.StatusResponse.P2PMetrics.recent_batch_retrieve_by_ip:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveByIpEntry - 25, // 18: supernode.StatusResponse.P2PMetrics.DhtMetrics.store_success_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint - 26, // 19: supernode.StatusResponse.P2PMetrics.DhtMetrics.batch_retrieve_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint - 13, // 20: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.HandleCounters - 19, // 21: supernode.StatusResponse.P2PMetrics.RecentBatchStoreList.entries:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchStoreEntry - 20, // 22: supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveList.entries:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveEntry - 21, // 23: supernode.StatusResponse.P2PMetrics.RecentBatchStoreByIpEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchStoreList - 22, // 24: supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveByIpEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveList - 0, // 25: supernode.SupernodeService.GetStatus:input_type -> supernode.StatusRequest - 1, // 26: supernode.SupernodeService.ListServices:input_type -> supernode.ListServicesRequest - 4, // 27: supernode.SupernodeService.GetStatus:output_type -> supernode.StatusResponse - 2, // 28: supernode.SupernodeService.ListServices:output_type -> supernode.ListServicesResponse - 27, // [27:29] is the sub-list for method output_type - 25, // [25:27] is the sub-list for method input_type - 25, // [25:25] is the sub-list for extension type_name - 25, // [25:25] is the sub-list for extension extendee - 0, // [0:25] is the sub-list for field type_name -} - -func init() { file_supernode_supernode_proto_init() } -func file_supernode_supernode_proto_init() { - if File_supernode_supernode_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_supernode_supernode_proto_rawDesc, - NumEnums: 0, - NumMessages: 27, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_supernode_supernode_proto_goTypes, - DependencyIndexes: file_supernode_supernode_proto_depIdxs, - MessageInfos: file_supernode_supernode_proto_msgTypes, - }.Build() - File_supernode_supernode_proto = out.File - file_supernode_supernode_proto_rawDesc = nil - file_supernode_supernode_proto_goTypes = nil - file_supernode_supernode_proto_depIdxs = nil -} diff --git a/p2p/kademlia/dht.go b/p2p/kademlia/dht.go index 0bca2c45..9d029479 100644 --- a/p2p/kademlia/dht.go +++ b/p2p/kademlia/dht.go @@ -23,7 +23,6 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "github.com/LumeraProtocol/supernode/v2/pkg/lumera" ltc "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials" - "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics" "github.com/LumeraProtocol/supernode/v2/pkg/storage" "github.com/LumeraProtocol/supernode/v2/pkg/storage/memory" "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" @@ -125,7 +124,7 @@ func (s *DHT) ConnPoolSnapshot() map[string]int64 { // Options contains configuration options for the queries node type Options struct { - ID []byte + ID []byte // The queries IPv4 or IPv6 address IP string @@ -143,8 +142,6 @@ type Options struct { // Keyring for credentials Keyring keyring.Keyring - // MetricsDisabled gates DHT-level metrics emission (p2pmetrics hooks and snapshots) - MetricsDisabled bool } // NewDHT returns a new DHT node @@ -474,17 +471,7 @@ func (s *DHT) Stats(ctx context.Context) (map[string]interface{}, error) { dhtStats["peers_count"] = len(s.ht.nodes()) dhtStats["peers"] = s.ht.nodes() dhtStats["network"] = s.network.HandleMetricsSnapshot() - // Include recent request snapshots for observability - if s.network != nil { - if overall, byIP := s.network.RecentBatchStoreSnapshot(); len(overall) > 0 || len(byIP) > 0 { - dhtStats["recent_batch_store_overall"] = overall - dhtStats["recent_batch_store_by_ip"] = byIP - } - if overall, byIP := s.network.RecentBatchRetrieveSnapshot(); len(overall) > 0 || len(byIP) > 0 { - dhtStats["recent_batch_retrieve_overall"] = overall - dhtStats["recent_batch_retrieve_by_ip"] = byIP - } - } + // Removed: recent per-request snapshots (logs provide visibility) dhtStats["database"] = dbStats return dhtStats, nil @@ -695,8 +682,7 @@ func (s *DHT) fetchAndAddLocalKeys(ctx context.Context, hexKeys []string, result } func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, txID string, localOnly ...bool) (result map[string][]byte, err error) { - start := time.Now() - logtrace.Debug(ctx, "DHT BatchRetrieve begin", logtrace.Fields{"txid": txID, "keys": len(keys), "required": required}) + logtrace.Debug(ctx, "DHT BatchRetrieve begin", logtrace.Fields{"txid": txID, "keys": len(keys), "required": required}) result = make(map[string][]byte) var resMap sync.Map var foundLocalCount int32 @@ -768,23 +754,23 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, if err != nil { return nil, fmt.Errorf("fetch and add local keys: %v", err) } - // Report how many were found locally, for event metrics - if !s.options.MetricsDisabled { - p2pmetrics.ReportFoundLocal(p2pmetrics.TaskIDFromContext(ctx), int(foundLocalCount)) - } + // Found locally count is logged via summary below; no external metrics if foundLocalCount >= required { return result, nil } - batchSize := batchRetrieveSize - var networkFound int32 - totalBatches := int(math.Ceil(float64(required) / float64(batchSize))) - parallelBatches := int(math.Min(float64(totalBatches), float64(fetchSymbolsBatchConcurrency))) + batchSize := batchRetrieveSize + var networkFound int32 + totalBatches := int(math.Ceil(float64(required) / float64(batchSize))) + parallelBatches := int(math.Min(float64(totalBatches), float64(fetchSymbolsBatchConcurrency))) - semaphore := make(chan struct{}, parallelBatches) - var wg sync.WaitGroup - gctx, cancel := context.WithCancel(ctx) - defer cancel() + semaphore := make(chan struct{}, parallelBatches) + var wg sync.WaitGroup + gctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Measure only the network retrieval phase (after local scan) + netStart := time.Now() for start := 0; start < len(keys); start += batchSize { end := start + batchSize @@ -816,18 +802,15 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, wg.Wait() - netFound := int(atomic.LoadInt32(&networkFound)) -{ - f := logtrace.Fields{"txid": txID, "found_local": foundLocalCount, "found_network": netFound, "required": required, "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"} + netFound := int(atomic.LoadInt32(&networkFound)) +{ + f := logtrace.Fields{"txid": txID, "found_local": foundLocalCount, "found_network": netFound, "required": required, "ms": time.Since(netStart).Milliseconds(), logtrace.FieldRole: "client"} if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } logtrace.Info(ctx, "dht: batch retrieve summary", f) } - // Record batch retrieve stats for internal DHT snapshot window - s.metrics.RecordBatchRetrieve(len(keys), int(required), int(foundLocalCount), netFound, time.Since(start)) - // Also feed retrieve counts into the per-task collector for stream events - if !s.options.MetricsDisabled { - p2pmetrics.SetRetrieveBatchSummary(p2pmetrics.TaskIDFromContext(ctx), len(keys), int(required), int(foundLocalCount), netFound, time.Since(start).Milliseconds()) - } + // Record batch retrieve stats for internal DHT snapshot window (network phase only) + s.metrics.RecordBatchRetrieve(len(keys), int(required), int(foundLocalCount), netFound, time.Since(netStart)) + // No per-task metrics collector updates return result, nil } @@ -959,8 +942,7 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, defer func() { <-semaphore }() } - callStart := time.Now() - indices := fetchMap[nodeID] + indices := fetchMap[nodeID] requestKeys := make(map[string]KeyValWithClosest) for _, idx := range indices { if idx < len(hexKeys) { @@ -984,19 +966,9 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, firstErr = err } mu.Unlock() - // record failed RPC per-node - if !s.options.MetricsDisabled { - p2pmetrics.RecordRetrieve(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ - IP: node.IP, - Address: node.String(), - Keys: 0, - Success: false, - Error: err.Error(), - DurationMS: time.Since(callStart).Milliseconds(), - }) + // per-node metrics removed; logs retained + return } - return - } returned := 0 for k, v := range decompressedData { @@ -1016,19 +988,9 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, } } - // record successful RPC per-node (returned may be 0). Success is true when no error. - if !s.options.MetricsDisabled { - p2pmetrics.RecordRetrieve(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ - IP: node.IP, - Address: node.String(), - Keys: returned, - Success: true, - Error: "", - DurationMS: time.Since(callStart).Milliseconds(), - }) - } - }(node, nodeID) - } + // per-node metrics removed; logs retained + }(node, nodeID) + } wg.Wait() @@ -1727,14 +1689,11 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i storeResponses := s.batchStoreNetwork(ctx, values, knownNodes, storageMap, typ) for response := range storeResponses { requests++ - var nodeAddr string - var nodeIP string + var nodeAddr string if response.Receiver != nil { - nodeAddr = response.Receiver.String() - nodeIP = response.Receiver.IP + nodeAddr = response.Receiver.String() } else if response.Message != nil && response.Message.Sender != nil { - nodeAddr = response.Message.Sender.String() - nodeIP = response.Message.Sender.IP + nodeAddr = response.Message.Sender.String() } errMsg := "" @@ -1765,17 +1724,7 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i } } - // Emit per-node store RPC call via metrics bridge (no P2P API coupling) - if !s.options.MetricsDisabled { - p2pmetrics.RecordStore(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ - IP: nodeIP, - Address: nodeAddr, - Keys: response.KeysCount, - Success: errMsg == "" && response.Error == nil, - Error: errMsg, - DurationMS: response.DurationMS, - }) - } + // per-node store metrics removed; logs retained } diff --git a/p2p/kademlia/network.go b/p2p/kademlia/network.go index dc0552fc..e4ab76e5 100644 --- a/p2p/kademlia/network.go +++ b/p2p/kademlia/network.go @@ -69,12 +69,6 @@ type Network struct { metrics sync.Map - // recent request tracking (last 10 entries overall and per IP) - recentMu sync.Mutex - recentStoreOverall []RecentBatchStoreEntry - recentStoreByIP map[string][]RecentBatchStoreEntry - recentRetrieveOverall []RecentBatchRetrieveEntry - recentRetrieveByIP map[string][]RecentBatchRetrieveEntry } // NewNetwork returns a network service @@ -956,40 +950,17 @@ func (s *Network) handleBatchFindValues(ctx context.Context, message *Message, r } func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, reqID string) (res []byte, err error) { - start := time.Now() - appended := false - defer func() { - if response, err := s.handlePanic(ctx, message.Sender, BatchGetValues); response != nil || err != nil { - res = response - if !appended { - s.appendRetrieveEntry(message.Sender.IP, RecentBatchRetrieveEntry{ - TimeUnix: time.Now().UTC().Unix(), - SenderID: string(message.Sender.ID), - SenderIP: message.Sender.IP, - Requested: 0, - Found: 0, - DurationMS: time.Since(start).Milliseconds(), - Error: "panic/recovered", - }) - } - } - }() + defer func() { + if response, err := s.handlePanic(ctx, message.Sender, BatchGetValues); response != nil || err != nil { + res = response + } + }() - request, ok := message.Data.(*BatchGetValuesRequest) - if !ok { - err := errors.New("invalid BatchGetValuesRequest") - s.appendRetrieveEntry(message.Sender.IP, RecentBatchRetrieveEntry{ - TimeUnix: time.Now().UTC().Unix(), - SenderID: string(message.Sender.ID), - SenderIP: message.Sender.IP, - Requested: 0, - Found: 0, - DurationMS: time.Since(start).Milliseconds(), - Error: err.Error(), - }) - appended = true - return s.generateResponseMessage(ctx, BatchGetValues, message.Sender, ResultFailed, err.Error()) - } + request, ok := message.Data.(*BatchGetValuesRequest) + if !ok { + err := errors.New("invalid BatchGetValuesRequest") + return s.generateResponseMessage(ctx, BatchGetValues, message.Sender, ResultFailed, err.Error()) + } logtrace.Debug(ctx, "Batch get values request received", logtrace.Fields{ logtrace.FieldModule: "p2p", @@ -1005,21 +976,11 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, i++ } - values, count, err := s.dht.store.RetrieveBatchValues(ctx, keys, false) - if err != nil { - err = errors.Errorf("batch find values: %w", err) - s.appendRetrieveEntry(message.Sender.IP, RecentBatchRetrieveEntry{ - TimeUnix: time.Now().UTC().Unix(), - SenderID: string(message.Sender.ID), - SenderIP: message.Sender.IP, - Requested: len(keys), - Found: count, - DurationMS: time.Since(start).Milliseconds(), - Error: err.Error(), - }) - appended = true - return s.generateResponseMessage(ctx, BatchGetValues, message.Sender, ResultFailed, err.Error()) - } + values, count, err := s.dht.store.RetrieveBatchValues(ctx, keys, false) + if err != nil { + err = errors.Errorf("batch find values: %w", err) + return s.generateResponseMessage(ctx, BatchGetValues, message.Sender, ResultFailed, err.Error()) + } { f := logtrace.Fields{logtrace.FieldModule: "p2p", "requested-keys": len(keys), "found": count, "sender": message.Sender.String(), logtrace.FieldRole: "server"} @@ -1044,19 +1005,9 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, } // new a response message - resMsg := s.dht.newMessage(BatchGetValues, message.Sender, response) - resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx) - s.appendRetrieveEntry(message.Sender.IP, RecentBatchRetrieveEntry{ - TimeUnix: time.Now().UTC().Unix(), - SenderID: string(message.Sender.ID), - SenderIP: message.Sender.IP, - Requested: len(keys), - Found: count, - DurationMS: time.Since(start).Milliseconds(), - Error: "", - }) - appended = true - return s.encodeMesage(resMsg) + resMsg := s.dht.newMessage(BatchGetValues, message.Sender, response) + resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx) + return s.encodeMesage(resMsg) } func (s *Network) handleBatchFindValuesRequest(ctx context.Context, req *BatchFindValuesRequest, ip string, reqID string) (isDone bool, compressedData []byte, err error) { @@ -1227,40 +1178,17 @@ func findTopHeaviestKeys(dataMap map[string][]byte, size int) (int, []string) { } func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (res []byte, err error) { - start := time.Now() - appended := false - defer func() { - if response, err := s.handlePanic(ctx, message.Sender, BatchStoreData); response != nil || err != nil { - res = response - if !appended { - s.appendStoreEntry(message.Sender.IP, RecentBatchStoreEntry{ - TimeUnix: time.Now().UTC().Unix(), - SenderID: string(message.Sender.ID), - SenderIP: message.Sender.IP, - Keys: 0, - DurationMS: time.Since(start).Milliseconds(), - OK: false, - Error: "panic/recovered", - }) - } - } - }() + defer func() { + if response, err := s.handlePanic(ctx, message.Sender, BatchStoreData); response != nil || err != nil { + res = response + } + }() - request, ok := message.Data.(*BatchStoreDataRequest) - if !ok { - err := errors.New("invalid BatchStoreDataRequest") - s.appendStoreEntry(message.Sender.IP, RecentBatchStoreEntry{ - TimeUnix: time.Now().UTC().Unix(), - SenderID: string(message.Sender.ID), - SenderIP: message.Sender.IP, - Keys: 0, - DurationMS: time.Since(start).Milliseconds(), - OK: false, - Error: err.Error(), - }) - appended = true - return s.generateResponseMessage(ctx, BatchStoreData, message.Sender, ResultFailed, err.Error()) - } + request, ok := message.Data.(*BatchStoreDataRequest) + if !ok { + err := errors.New("invalid BatchStoreDataRequest") + return s.generateResponseMessage(ctx, BatchStoreData, message.Sender, ResultFailed, err.Error()) + } // log.P2P().WithContext(ctx).Info("handle batch store data request received") { @@ -1272,20 +1200,10 @@ func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (r // add the sender to queries hash table s.dht.addNode(ctx, message.Sender) - if err := s.dht.store.StoreBatch(ctx, request.Data, 1, false); err != nil { - err = errors.Errorf("batch store the data: %w", err) - s.appendStoreEntry(message.Sender.IP, RecentBatchStoreEntry{ - TimeUnix: time.Now().UTC().Unix(), - SenderID: string(message.Sender.ID), - SenderIP: message.Sender.IP, - Keys: len(request.Data), - DurationMS: time.Since(start).Milliseconds(), - OK: false, - Error: err.Error(), - }) - appended = true - return s.generateResponseMessage(ctx, BatchStoreData, message.Sender, ResultFailed, err.Error()) - } + if err := s.dht.store.StoreBatch(ctx, request.Data, 1, false); err != nil { + err = errors.Errorf("batch store the data: %w", err) + return s.generateResponseMessage(ctx, BatchStoreData, message.Sender, ResultFailed, err.Error()) + } response := &StoreDataResponse{ Status: ResponseStatus{ @@ -1300,19 +1218,9 @@ func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (r } // new a response message - resMsg := s.dht.newMessage(BatchStoreData, message.Sender, response) - resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx) - s.appendStoreEntry(message.Sender.IP, RecentBatchStoreEntry{ - TimeUnix: time.Now().UTC().Unix(), - SenderID: string(message.Sender.ID), - SenderIP: message.Sender.IP, - Keys: len(request.Data), - DurationMS: time.Since(start).Milliseconds(), - OK: true, - Error: "", - }) - appended = true - return s.encodeMesage(resMsg) + resMsg := s.dht.newMessage(BatchStoreData, message.Sender, response) + resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx) + return s.encodeMesage(resMsg) } func (s *Network) handleBatchFindNode(ctx context.Context, message *Message) (res []byte, err error) { diff --git a/p2p/kademlia/recent.go b/p2p/kademlia/recent.go deleted file mode 100644 index 2467cf02..00000000 --- a/p2p/kademlia/recent.go +++ /dev/null @@ -1,90 +0,0 @@ -package kademlia - -import ( - "sync" - "time" -) - -// RecentBatchStoreEntry captures a handled BatchStoreData request outcome -type RecentBatchStoreEntry struct { - TimeUnix int64 `json:"time_unix"` - SenderID string `json:"sender_id"` - SenderIP string `json:"sender_ip"` - Keys int `json:"keys"` - DurationMS int64 `json:"duration_ms"` - OK bool `json:"ok"` - Error string `json:"error,omitempty"` -} - -// RecentBatchRetrieveEntry captures a handled BatchGetValues request outcome -type RecentBatchRetrieveEntry struct { - TimeUnix int64 `json:"time_unix"` - SenderID string `json:"sender_id"` - SenderIP string `json:"sender_ip"` - Requested int `json:"requested"` - Found int `json:"found"` - DurationMS int64 `json:"duration_ms"` - Error string `json:"error,omitempty"` -} - -func (s *Network) appendStoreEntry(ip string, e RecentBatchStoreEntry) { - s.recentMu.Lock() - defer s.recentMu.Unlock() - if s.recentStoreByIP == nil { - s.recentStoreByIP = make(map[string][]RecentBatchStoreEntry) - } - s.recentStoreOverall = append([]RecentBatchStoreEntry{e}, s.recentStoreOverall...) - if len(s.recentStoreOverall) > 10 { - s.recentStoreOverall = s.recentStoreOverall[:10] - } - lst := append([]RecentBatchStoreEntry{e}, s.recentStoreByIP[ip]...) - if len(lst) > 10 { - lst = lst[:10] - } - s.recentStoreByIP[ip] = lst -} - -func (s *Network) appendRetrieveEntry(ip string, e RecentBatchRetrieveEntry) { - s.recentMu.Lock() - defer s.recentMu.Unlock() - if s.recentRetrieveByIP == nil { - s.recentRetrieveByIP = make(map[string][]RecentBatchRetrieveEntry) - } - s.recentRetrieveOverall = append([]RecentBatchRetrieveEntry{e}, s.recentRetrieveOverall...) - if len(s.recentRetrieveOverall) > 10 { - s.recentRetrieveOverall = s.recentRetrieveOverall[:10] - } - lst := append([]RecentBatchRetrieveEntry{e}, s.recentRetrieveByIP[ip]...) - if len(lst) > 10 { - lst = lst[:10] - } - s.recentRetrieveByIP[ip] = lst -} - -// RecentBatchStoreSnapshot returns copies of recent store entries (overall and by IP) -func (s *Network) RecentBatchStoreSnapshot() (overall []RecentBatchStoreEntry, byIP map[string][]RecentBatchStoreEntry) { - s.recentMu.Lock() - defer s.recentMu.Unlock() - overall = append([]RecentBatchStoreEntry(nil), s.recentStoreOverall...) - byIP = make(map[string][]RecentBatchStoreEntry, len(s.recentStoreByIP)) - for k, v := range s.recentStoreByIP { - byIP[k] = append([]RecentBatchStoreEntry(nil), v...) - } - return -} - -// RecentBatchRetrieveSnapshot returns copies of recent retrieve entries (overall and by IP) -func (s *Network) RecentBatchRetrieveSnapshot() (overall []RecentBatchRetrieveEntry, byIP map[string][]RecentBatchRetrieveEntry) { - s.recentMu.Lock() - defer s.recentMu.Unlock() - overall = append([]RecentBatchRetrieveEntry(nil), s.recentRetrieveOverall...) - byIP = make(map[string][]RecentBatchRetrieveEntry, len(s.recentRetrieveByIP)) - for k, v := range s.recentRetrieveByIP { - byIP[k] = append([]RecentBatchRetrieveEntry(nil), v...) - } - return -} - -// helper to avoid unused import warning if needed -var _ = time.Now -var _ = sync.Mutex{} diff --git a/p2p/p2p.go b/p2p/p2p.go index 2e416111..bb38ac0c 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -47,8 +47,7 @@ type p2p struct { running bool // if the kademlia network is ready lumeraClient lumera.Client keyring keyring.Keyring // Add the keyring field - rqstore rqstore.Store - metricsDisabled bool + rqstore rqstore.Store } // Run the kademlia network @@ -228,13 +227,12 @@ func (s *p2p) NClosestNodesWithIncludingNodeList(ctx context.Context, n int, key func (s *p2p) configure(ctx context.Context) error { // new the queries storage kadOpts := &kademlia.Options{ - LumeraClient: s.lumeraClient, - Keyring: s.keyring, // Pass the keyring - BootstrapNodes: []*kademlia.Node{}, - IP: s.config.ListenAddress, - Port: s.config.Port, - ID: []byte(s.config.ID), - MetricsDisabled: s.metricsDisabled, + LumeraClient: s.lumeraClient, + Keyring: s.keyring, // Pass the keyring + BootstrapNodes: []*kademlia.Node{}, + IP: s.config.ListenAddress, + Port: s.config.Port, + ID: []byte(s.config.ID), } if len(kadOpts.ID) == 0 { @@ -253,7 +251,7 @@ func (s *p2p) configure(ctx context.Context) error { } // New returns a new p2p instance. -func New(ctx context.Context, config *Config, lumeraClient lumera.Client, kr keyring.Keyring, rqstore rqstore.Store, cloud cloud.Storage, mst *sqlite.MigrationMetaStore, metricsDisabled bool) (P2P, error) { +func New(ctx context.Context, config *Config, lumeraClient lumera.Client, kr keyring.Keyring, rqstore rqstore.Store, cloud cloud.Storage, mst *sqlite.MigrationMetaStore) (P2P, error) { store, err := sqlite.NewStore(ctx, config.DataDir, cloud, mst) if err != nil { return nil, errors.Errorf("new kademlia store: %w", err) @@ -270,9 +268,8 @@ func New(ctx context.Context, config *Config, lumeraClient lumera.Client, kr key config: config, lumeraClient: lumeraClient, keyring: kr, // Store the keyring - rqstore: rqstore, - metricsDisabled: metricsDisabled, - }, nil + rqstore: rqstore, + }, nil } // LocalStore store data into the kademlia network diff --git a/pkg/p2pmetrics/metrics.go b/pkg/p2pmetrics/metrics.go deleted file mode 100644 index 165f0eaa..00000000 --- a/pkg/p2pmetrics/metrics.go +++ /dev/null @@ -1,397 +0,0 @@ -package p2pmetrics - -import ( - "context" - "sync" -) - -// Call represents a single per-node RPC outcome (store or retrieve). -type Call struct { - IP string `json:"ip"` - Address string `json:"address"` - Keys int `json:"keys"` - Success bool `json:"success"` - Error string `json:"error,omitempty"` - DurationMS int64 `json:"duration_ms"` - Noop bool `json:"noop,omitempty"` -} - -// -------- Lightweight hooks ------------------------- - -var ( - storeMu sync.RWMutex - storeHook = make(map[string]func(Call)) - - retrieveMu sync.RWMutex - retrieveHook = make(map[string]func(Call)) - - foundLocalMu sync.RWMutex - foundLocalCb = make(map[string]func(int)) -) - -// RegisterStoreHook registers a callback to receive store RPC calls for a task. -func RegisterStoreHook(taskID string, fn func(Call)) { - storeMu.Lock() - defer storeMu.Unlock() - if fn == nil { - delete(storeHook, taskID) - return - } - storeHook[taskID] = fn -} - -// UnregisterStoreHook removes the registered store callback for a task. -func UnregisterStoreHook(taskID string) { RegisterStoreHook(taskID, nil) } - -// RecordStore invokes the registered store callback for the given task, if any. -func RecordStore(taskID string, c Call) { - storeMu.RLock() - fn := storeHook[taskID] - storeMu.RUnlock() - if fn != nil { - fn(c) - } -} - -// RegisterRetrieveHook registers a callback to receive retrieve RPC calls. -func RegisterRetrieveHook(taskID string, fn func(Call)) { - retrieveMu.Lock() - defer retrieveMu.Unlock() - if fn == nil { - delete(retrieveHook, taskID) - return - } - retrieveHook[taskID] = fn -} - -// UnregisterRetrieveHook removes the registered retrieve callback for a task. -func UnregisterRetrieveHook(taskID string) { RegisterRetrieveHook(taskID, nil) } - -// RecordRetrieve invokes the registered retrieve callback for the given task. -func RecordRetrieve(taskID string, c Call) { - retrieveMu.RLock() - fn := retrieveHook[taskID] - retrieveMu.RUnlock() - if fn != nil { - fn(c) - } -} - -// RegisterFoundLocalHook registers a callback to receive found-local counts. -func RegisterFoundLocalHook(taskID string, fn func(int)) { - foundLocalMu.Lock() - defer foundLocalMu.Unlock() - if fn == nil { - delete(foundLocalCb, taskID) - return - } - foundLocalCb[taskID] = fn -} - -// UnregisterFoundLocalHook removes the registered found-local callback. -func UnregisterFoundLocalHook(taskID string) { RegisterFoundLocalHook(taskID, nil) } - -// ReportFoundLocal invokes the registered found-local callback for the task. -func ReportFoundLocal(taskID string, count int) { - foundLocalMu.RLock() - fn := foundLocalCb[taskID] - foundLocalMu.RUnlock() - if fn != nil { - fn(count) - } -} - -// -------- Minimal in-process collectors for events -------------------------- - -// Store session -type storeSession struct { - CallsByIP map[string][]Call - SymbolsFirstPass int - SymbolsTotal int - IDFilesCount int - DurationMS int64 -} - -var storeSessions = struct{ m map[string]*storeSession }{m: map[string]*storeSession{}} - -// RegisterStoreBridge hooks store callbacks into the store session collector. -func StartStoreCapture(taskID string) { - RegisterStoreHook(taskID, func(c Call) { - s := storeSessions.m[taskID] - if s == nil { - s = &storeSession{CallsByIP: map[string][]Call{}} - storeSessions.m[taskID] = s - } - key := c.IP - if key == "" { - key = c.Address - } - s.CallsByIP[key] = append(s.CallsByIP[key], c) - }) -} - -func StopStoreCapture(taskID string) { UnregisterStoreHook(taskID) } - -// SetStoreSummary sets store summary fields for the first pass and totals. -// -// - symbolsFirstPass: number of symbols sent during the first pass -// - symbolsTotal: total symbols available in the directory -// - idFilesCount: number of ID/metadata files included in the first combined batch -// - durationMS: elapsed time of the first-pass store phase -func SetStoreSummary(taskID string, symbolsFirstPass, symbolsTotal, idFilesCount int, durationMS int64) { - if taskID == "" { - return - } - s := storeSessions.m[taskID] - if s == nil { - s = &storeSession{CallsByIP: map[string][]Call{}} - storeSessions.m[taskID] = s - } - s.SymbolsFirstPass = symbolsFirstPass - s.SymbolsTotal = symbolsTotal - s.IDFilesCount = idFilesCount - s.DurationMS = durationMS -} - -// BuildStoreEventPayloadFromCollector builds the store event payload (minimal). -func BuildStoreEventPayloadFromCollector(taskID string) map[string]any { - s := storeSessions.m[taskID] - if s == nil { - return map[string]any{ - "store": map[string]any{ - "duration_ms": int64(0), - "symbols_first_pass": 0, - "symbols_total": 0, - "id_files_count": 0, - "success_rate_pct": float64(0), - "calls_by_ip": map[string][]Call{}, - }, - } - } - // Compute per-call success rate across first-pass store RPC attempts - totalCalls := 0 - successCalls := 0 - for _, calls := range s.CallsByIP { - for _, c := range calls { - totalCalls++ - if c.Success { - successCalls++ - } - } - } - var successRate float64 - if totalCalls > 0 { - successRate = float64(successCalls) / float64(totalCalls) * 100.0 - } - return map[string]any{ - "store": map[string]any{ - "duration_ms": s.DurationMS, - "symbols_first_pass": s.SymbolsFirstPass, - "symbols_total": s.SymbolsTotal, - "id_files_count": s.IDFilesCount, - "success_rate_pct": successRate, - "calls_by_ip": s.CallsByIP, - }, - } -} - -// Retrieve session -type retrieveSession struct { - mu sync.RWMutex - CallsByIP map[string][]Call - FoundLocal int - FoundNet int - Keys int - Required int - RetrieveMS int64 - DecodeMS int64 -} - -var retrieveSessions = struct{ m map[string]*retrieveSession }{m: map[string]*retrieveSession{}} - -// internal event channel for retrieve metrics (per task) -type retrieveEvent struct { - typ int // 0: per-node call, 1: found-local update - call Call - n int -} - -var retrieveEventChans = struct { - mu sync.Mutex - m map[string]chan retrieveEvent -}{m: map[string]chan retrieveEvent{}} - -// StartRetrieveCapture hooks retrieve callbacks into a buffered channel and a -// single goroutine that serializes updates to avoid concurrent map writes. -func StartRetrieveCapture(taskID string) { - // Create or get session upfront - s := retrieveSessions.m[taskID] - if s == nil { - s = &retrieveSession{CallsByIP: map[string][]Call{}} - retrieveSessions.m[taskID] = s - } - - // Per-task buffered channel - ch := make(chan retrieveEvent, 4096) - retrieveEventChans.mu.Lock() - retrieveEventChans.m[taskID] = ch - retrieveEventChans.mu.Unlock() - - // Worker goroutine to serialize writes - go func(taskID string, ch <-chan retrieveEvent) { - for ev := range ch { - sess := retrieveSessions.m[taskID] - if sess == nil { - sess = &retrieveSession{CallsByIP: map[string][]Call{}} - retrieveSessions.m[taskID] = sess - } - switch ev.typ { - case 0: // per-node call - key := ev.call.IP - if key == "" { - key = ev.call.Address - } - sess.mu.Lock() - if sess.CallsByIP == nil { - sess.CallsByIP = map[string][]Call{} - } - sess.CallsByIP[key] = append(sess.CallsByIP[key], ev.call) - sess.mu.Unlock() - case 1: // found-local update - sess.FoundLocal = ev.n - } - } - }(taskID, ch) - - // Register hooks that enqueue events (non-blocking) - RegisterRetrieveHook(taskID, func(c Call) { - retrieveEventChans.mu.Lock() - ch, ok := retrieveEventChans.m[taskID] - retrieveEventChans.mu.Unlock() - if ok { - select { - case ch <- retrieveEvent{typ: 0, call: c}: - default: // drop if buffer is full - } - } - }) - RegisterFoundLocalHook(taskID, func(n int) { - retrieveEventChans.mu.Lock() - ch, ok := retrieveEventChans.m[taskID] - retrieveEventChans.mu.Unlock() - if ok { - select { - case ch <- retrieveEvent{typ: 1, n: n}: - default: - } - } - }) -} - -func StopRetrieveCapture(taskID string) { - UnregisterRetrieveHook(taskID) - UnregisterFoundLocalHook(taskID) - retrieveEventChans.mu.Lock() - if ch, ok := retrieveEventChans.m[taskID]; ok { - delete(retrieveEventChans.m, taskID) - close(ch) - } - retrieveEventChans.mu.Unlock() -} - -// SetRetrieveBatchSummary sets counts for a retrieval attempt. -func SetRetrieveBatchSummary(taskID string, keys, required, foundLocal, foundNet int, retrieveMS int64) { - if taskID == "" { - return - } - s := retrieveSessions.m[taskID] - if s == nil { - s = &retrieveSession{CallsByIP: map[string][]Call{}} - retrieveSessions.m[taskID] = s - } - s.Keys = keys - s.Required = required - s.FoundLocal = foundLocal - s.FoundNet = foundNet - s.RetrieveMS = retrieveMS -} - -// SetRetrieveSummary sets timing info for retrieve/decode phases. -func SetRetrieveSummary(taskID string, retrieveMS, decodeMS int64) { - if taskID == "" { - return - } - s := retrieveSessions.m[taskID] - if s == nil { - s = &retrieveSession{CallsByIP: map[string][]Call{}} - retrieveSessions.m[taskID] = s - } - s.RetrieveMS = retrieveMS - s.DecodeMS = decodeMS -} - -// BuildDownloadEventPayloadFromCollector builds the download section payload. -func BuildDownloadEventPayloadFromCollector(taskID string) map[string]any { - s := retrieveSessions.m[taskID] - if s == nil { - return map[string]any{ - "retrieve": map[string]any{ - "keys": 0, - "required": 0, - "found_local": 0, - "found_net": 0, - "retrieve_ms": int64(0), - "decode_ms": int64(0), - "calls_by_ip": map[string][]Call{}, - }, - } - } - // Create a snapshot copy of CallsByIP to avoid concurrent map access - s.mu.RLock() - callsCopy := make(map[string][]Call, len(s.CallsByIP)) - for k, v := range s.CallsByIP { - vv := make([]Call, len(v)) - copy(vv, v) - callsCopy[k] = vv - } - s.mu.RUnlock() - - return map[string]any{ - "retrieve": map[string]any{ - "keys": s.Keys, - "required": s.Required, - "found_local": s.FoundLocal, - "found_net": s.FoundNet, - "retrieve_ms": s.RetrieveMS, - "decode_ms": s.DecodeMS, - "calls_by_ip": callsCopy, - }, - } -} - -// -------- Context helpers (dedicated to metrics tagging) -------------------- - -type ctxKey string - -var taskIDKey ctxKey = "p2pmetrics-task-id" - -// WithTaskID returns a child context with the metrics task ID set. -func WithTaskID(ctx context.Context, taskID string) context.Context { - if ctx == nil { - return context.Background() - } - return context.WithValue(ctx, taskIDKey, taskID) -} - -// TaskIDFromContext extracts the metrics task ID from context (or ""). -func TaskIDFromContext(ctx context.Context) string { - if ctx == nil { - return "" - } - if v := ctx.Value(taskIDKey); v != nil { - if s, ok := v.(string); ok { - return s - } - } - return "" -} diff --git a/proto/proto.go b/proto/proto.go deleted file mode 100644 index 34045007..00000000 --- a/proto/proto.go +++ /dev/null @@ -1,6 +0,0 @@ -package proto - -const ( - // MetadataKeySessID is unique numeric for every registration process, encompasses for all connections. - MetadataKeySessID = "sessID" -) diff --git a/proto/supernode/service.proto b/proto/supernode/service.proto new file mode 100644 index 00000000..9725f84a --- /dev/null +++ b/proto/supernode/service.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; +package supernode; +option go_package = "github.com/LumeraProtocol/supernode/v2/gen/supernode"; + +import "supernode/status.proto"; +import "google/api/annotations.proto"; + +// SupernodeService provides status information for all services +service SupernodeService { + rpc GetStatus(StatusRequest) returns (StatusResponse) { + option (google.api.http) = { + get: "/api/v1/status" + }; + } + + rpc ListServices(ListServicesRequest) returns (ListServicesResponse) { + option (google.api.http) = { + get: "/api/v1/services" + }; + } +} + +message ListServicesRequest {} + +message ListServicesResponse { + repeated ServiceInfo services = 1; + int32 count = 2; +} + +message ServiceInfo { + string name = 1; + repeated string methods = 2; +} + diff --git a/proto/supernode/supernode.proto b/proto/supernode/status.proto similarity index 71% rename from proto/supernode/supernode.proto rename to proto/supernode/status.proto index 50597e90..7cafe908 100644 --- a/proto/supernode/supernode.proto +++ b/proto/supernode/status.proto @@ -2,41 +2,13 @@ syntax = "proto3"; package supernode; option go_package = "github.com/LumeraProtocol/supernode/v2/gen/supernode"; -import "google/api/annotations.proto"; - -// SupernodeService provides status information for all services -service SupernodeService { - rpc GetStatus(StatusRequest) returns (StatusResponse) { - option (google.api.http) = { - get: "/api/v1/status" - }; - } - - rpc ListServices(ListServicesRequest) returns (ListServicesResponse) { - option (google.api.http) = { - get: "/api/v1/services" - }; - } -} - +// StatusRequest controls optional metrics in the status response message StatusRequest { // Optional: include detailed P2P metrics in the response // Maps to query param via grpc-gateway: /api/v1/status?include_p2p_metrics=true bool include_p2p_metrics = 1; } -message ListServicesRequest {} - -message ListServicesResponse { - repeated ServiceInfo services = 1; - int32 count = 2; -} - -message ServiceInfo { - string name = 1; - repeated string methods = 2; -} - // The StatusResponse represents system status with clear organization message StatusResponse { string version = 1; // Supernode version @@ -46,7 +18,7 @@ message StatusResponse { message Resources { message CPU { double usage_percent = 1; // CPU usage percentage (0-100) - int32 cores = 2; // Number of CPU cores + int32 cores = 2; // Number of CPU cores } message Memory { @@ -154,39 +126,8 @@ message StatusResponse { repeated BanEntry ban_list = 4; DatabaseStats database = 5; DiskStatus disk = 6; - - // Last handled BatchStoreData requests (most recent first) - message RecentBatchStoreEntry { - int64 time_unix = 1; - string sender_id = 2; - string sender_ip = 3; - int32 keys = 4; - int64 duration_ms = 5; - bool ok = 6; - string error = 7; - } - - // Last handled BatchGetValues requests (most recent first) - message RecentBatchRetrieveEntry { - int64 time_unix = 1; - string sender_id = 2; - string sender_ip = 3; - int32 requested = 4; - int32 found = 5; - int64 duration_ms = 6; - string error = 7; - } - - repeated RecentBatchStoreEntry recent_batch_store = 7; - repeated RecentBatchRetrieveEntry recent_batch_retrieve = 8; - - // Per-IP buckets: last 10 per sender IP - message RecentBatchStoreList { repeated RecentBatchStoreEntry entries = 1; } - message RecentBatchRetrieveList { repeated RecentBatchRetrieveEntry entries = 1; } - map recent_batch_store_by_ip = 9; - map recent_batch_retrieve_by_ip = 10; } P2PMetrics p2p_metrics = 9; - } + diff --git a/sdk/README.md b/sdk/README.md index cf2501cf..f8385eef 100644 --- a/sdk/README.md +++ b/sdk/README.md @@ -288,7 +288,7 @@ if err != nil { **Returns:** - `error`: Error if the task doesn't exist or deletion fails -### GetSupernodeStatus +### GetSupernodeStatus (Status API) Retrieves the current status and resource information of a specific supernode. @@ -305,27 +305,12 @@ if err != nil { - `supernodeAddress string`: Cosmos address of the supernode **Returns:** -- `*supernodeservice.SupernodeStatusresponse`: Status information including CPU usage, memory stats, and active services -- `error`: Error if the supernode is unreachable or query fails +- `*supernode.StatusResponse`: Status information including CPU usage, memory stats, active services, and P2P metrics +- `error`: Error if the supernode is unreachable or the query fails -Include detailed P2P metrics (optional): - -By default, peer info and P2P metrics are not returned to keep calls lightweight. To include them, set an option in the context: - -```go -import snsvc "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" - -// Opt-in via context -ctxWithMetrics := snsvc.WithIncludeP2PMetrics(ctx) -status, err := client.GetSupernodeStatus(ctxWithMetrics, "lumera1abc...") -if err != nil { - // handle error -} - -// Access optional fields when present -fmt.Println("Peers:", status.Network.PeersCount) -fmt.Println("DHT hot path bans:", status.P2PMetrics.DhtMetrics.HotPathBanIncrements) -``` +Notes: +- The SDK always requests P2P metrics to ensure `Network.PeersCount` is populated for eligibility checks. +- Status response is the generated type; no mapping layer in the SDK. ### SubscribeToEvents diff --git a/sdk/action/client.go b/sdk/action/client.go index fc3c2d7e..db5a932f 100644 --- a/sdk/action/client.go +++ b/sdk/action/client.go @@ -4,8 +4,8 @@ import ( "context" "fmt" + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" - "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" "github.com/LumeraProtocol/supernode/v2/sdk/config" "github.com/LumeraProtocol/supernode/v2/sdk/event" "github.com/LumeraProtocol/supernode/v2/sdk/log" @@ -26,7 +26,7 @@ type Client interface { GetTask(ctx context.Context, taskID string) (*task.TaskEntry, bool) SubscribeToEvents(ctx context.Context, eventType event.EventType, handler event.Handler) error SubscribeToAllEvents(ctx context.Context, handler event.Handler) error - GetSupernodeStatus(ctx context.Context, supernodeAddress string) (*supernodeservice.SupernodeStatusresponse, error) + GetSupernodeStatus(ctx context.Context, supernodeAddress string) (*pb.StatusResponse, error) // DownloadCascade downloads cascade to outputDir, filename determined by action ID DownloadCascade(ctx context.Context, actionID, outputDir, signature string) (string, error) } @@ -151,7 +151,7 @@ func (c *ClientImpl) SubscribeToAllEvents(ctx context.Context, handler event.Han } // GetSupernodeStatus retrieves the status of a specific supernode by its address -func (c *ClientImpl) GetSupernodeStatus(ctx context.Context, supernodeAddress string) (*supernodeservice.SupernodeStatusresponse, error) { +func (c *ClientImpl) GetSupernodeStatus(ctx context.Context, supernodeAddress string) (*pb.StatusResponse, error) { if supernodeAddress == "" { c.logger.Error(ctx, "Empty supernode address provided") return nil, fmt.Errorf("supernode address cannot be empty") diff --git a/sdk/adapters/supernodeservice/adapter.go b/sdk/adapters/supernodeservice/adapter.go index 0d326a17..9712915c 100644 --- a/sdk/adapters/supernodeservice/adapter.go +++ b/sdk/adapters/supernodeservice/adapter.go @@ -2,7 +2,6 @@ package supernodeservice import ( "context" - "encoding/json" "fmt" "io" "os" @@ -345,30 +344,7 @@ func (a *cascadeAdapter) CascadeSupernodeRegister(ctx context.Context, in *Casca event.KeyTaskID: in.TaskId, event.KeyActionID: in.ActionID, } - // For artefacts stored, parse JSON payload with metrics (new minimal shape) - if resp.EventType == cascade.SupernodeEventType_ARTEFACTS_STORED { - var payload map[string]any - if err := json.Unmarshal([]byte(resp.Message), &payload); err == nil { - if store, ok := payload["store"].(map[string]any); ok { - if v, ok := store["duration_ms"].(float64); ok { - edata[event.KeyStoreDurationMS] = int64(v) - } - if v, ok := store["symbols_first_pass"].(float64); ok { - edata[event.KeyStoreSymbolsFirstPass] = int64(v) - } - if v, ok := store["symbols_total"].(float64); ok { - edata[event.KeyStoreSymbolsTotal] = int64(v) - } - if v, ok := store["id_files_count"].(float64); ok { - edata[event.KeyStoreIDFilesCount] = int64(v) - } - if v, ok := store["calls_by_ip"]; ok { - edata[event.KeyStoreCallsByIP] = v - } - } - } - } - in.EventLogger(ctx, toSdkEventWithMessage(resp.EventType, resp.Message), resp.Message, edata) + in.EventLogger(ctx, toSdkEvent(resp.EventType), resp.Message, edata) } // Optionally capture the final response @@ -395,18 +371,18 @@ func (a *cascadeAdapter) CascadeSupernodeRegister(ctx context.Context, in *Casca }, nil } -func (a *cascadeAdapter) GetSupernodeStatus(ctx context.Context) (SupernodeStatusresponse, error) { - // Gate P2P metrics via context option to keep API backward compatible - req := &supernode.StatusRequest{IncludeP2PMetrics: includeP2PMetrics(ctx)} +func (a *cascadeAdapter) GetSupernodeStatus(ctx context.Context) (*supernode.StatusResponse, error) { + // Always include P2P metrics to populate peers count for eligibility checks + req := &supernode.StatusRequest{IncludeP2PMetrics: true} resp, err := a.statusClient.GetStatus(ctx, req) if err != nil { a.logger.Error(ctx, "Failed to get supernode status", "error", err) - return SupernodeStatusresponse{}, fmt.Errorf("failed to get supernode status: %w", err) + return nil, fmt.Errorf("failed to get supernode status: %w", err) } a.logger.Debug(ctx, "Supernode status retrieved", "status", resp) - return *toSdkSupernodeStatus(resp), nil + return resp, nil } // CascadeSupernodeDownload downloads a file from a supernode gRPC stream @@ -471,45 +447,6 @@ func (a *cascadeAdapter) CascadeSupernodeDownload( event.KeyEventType: x.Event.EventType, event.KeyMessage: x.Event.Message, } - // Parse detailed metrics for downloaded event if JSON payload provided (new minimal shape) - if x.Event.EventType == cascade.SupernodeEventType_ARTEFACTS_DOWNLOADED { - var payload map[string]any - if err := json.Unmarshal([]byte(x.Event.Message), &payload); err == nil { - if retrieve, ok := payload["retrieve"].(map[string]any); ok { - if v, ok := retrieve["found_local"].(float64); ok { - edata[event.KeyRetrieveFoundLocal] = int64(v) - } - if v, ok := retrieve["retrieve_ms"].(float64); ok { - edata[event.KeyRetrieveMS] = int64(v) - } - if v, ok := retrieve["decode_ms"].(float64); ok { - edata[event.KeyDecodeMS] = int64(v) - } - if v, ok := retrieve["calls_by_ip"]; ok { - edata[event.KeyRetrieveCallsByIP] = v - } - // Optional additional retrieve fields - if v, ok := retrieve["keys"].(float64); ok { - edata[event.KeyRetrieveKeys] = int64(v) - } - if v, ok := retrieve["required"].(float64); ok { - edata[event.KeyRetrieveRequired] = int64(v) - } - if v, ok := retrieve["found_net"].(float64); ok { - edata[event.KeyRetrieveFoundNet] = int64(v) - } - if v, ok := retrieve["target_required_percent"].(float64); ok { - edata[event.KeyTargetRequiredPercent] = v - } - if v, ok := retrieve["target_required_count"].(float64); ok { - edata[event.KeyTargetRequiredCount] = int64(v) - } - if v, ok := retrieve["total_symbols"].(float64); ok { - edata[event.KeyTotalSymbols] = int64(v) - } - } - } - } // Avoid blocking Recv loop on event handling; dispatch asynchronously evtType := toSdkEvent(x.Event.EventType) go func(ed event.EventData, et event.EventType, msg string) { @@ -614,185 +551,3 @@ func toSdkEvent(e cascade.SupernodeEventType) event.EventType { return event.SupernodeUnknown } } - -// toSdkEventWithMessage extends event mapping using message content for finer granularity -func toSdkEventWithMessage(e cascade.SupernodeEventType, msg string) event.EventType { - // Detect finalize simulation pass piggybacked on RQID_VERIFIED - if e == cascade.SupernodeEventType_RQID_VERIFIED && msg == "finalize action simulation passed" { - return event.SupernodeFinalizeSimulated - } - return toSdkEvent(e) -} - -func toSdkSupernodeStatus(resp *supernode.StatusResponse) *SupernodeStatusresponse { - result := &SupernodeStatusresponse{} - result.Version = resp.Version - result.UptimeSeconds = resp.UptimeSeconds - - // Convert Resources data - if resp.Resources != nil { - // Convert CPU data - if resp.Resources.Cpu != nil { - result.Resources.CPU.UsagePercent = resp.Resources.Cpu.UsagePercent - result.Resources.CPU.Cores = resp.Resources.Cpu.Cores - } - - // Convert Memory data - if resp.Resources.Memory != nil { - result.Resources.Memory.TotalGB = resp.Resources.Memory.TotalGb - result.Resources.Memory.UsedGB = resp.Resources.Memory.UsedGb - result.Resources.Memory.AvailableGB = resp.Resources.Memory.AvailableGb - result.Resources.Memory.UsagePercent = resp.Resources.Memory.UsagePercent - } - - // Convert Storage data - result.Resources.Storage = make([]StorageInfo, 0, len(resp.Resources.StorageVolumes)) - for _, storage := range resp.Resources.StorageVolumes { - result.Resources.Storage = append(result.Resources.Storage, StorageInfo{ - Path: storage.Path, - TotalBytes: storage.TotalBytes, - UsedBytes: storage.UsedBytes, - AvailableBytes: storage.AvailableBytes, - UsagePercent: storage.UsagePercent, - }) - } - - // Copy hardware summary - result.Resources.HardwareSummary = resp.Resources.HardwareSummary - } - - // Convert RunningTasks data - result.RunningTasks = make([]ServiceTasks, 0, len(resp.RunningTasks)) - for _, service := range resp.RunningTasks { - result.RunningTasks = append(result.RunningTasks, ServiceTasks{ - ServiceName: service.ServiceName, - TaskIDs: service.TaskIds, - TaskCount: service.TaskCount, - }) - } - - // Convert RegisteredServices data - result.RegisteredServices = make([]string, len(resp.RegisteredServices)) - copy(result.RegisteredServices, resp.RegisteredServices) - - // Convert Network data - if resp.Network != nil { - result.Network.PeersCount = resp.Network.PeersCount - result.Network.PeerAddresses = make([]string, len(resp.Network.PeerAddresses)) - copy(result.Network.PeerAddresses, resp.Network.PeerAddresses) - } - - // Copy rank and IP address - result.Rank = resp.Rank - result.IPAddress = resp.IpAddress - - // Map optional P2P metrics - if resp.P2PMetrics != nil { - // DHT metrics - if resp.P2PMetrics.DhtMetrics != nil { - // Store success recent - for _, p := range resp.P2PMetrics.DhtMetrics.StoreSuccessRecent { - result.P2PMetrics.DhtMetrics.StoreSuccessRecent = append(result.P2PMetrics.DhtMetrics.StoreSuccessRecent, struct { - TimeUnix int64 - Requests int32 - Successful int32 - SuccessRate float64 - }{ - TimeUnix: p.TimeUnix, - Requests: p.Requests, - Successful: p.Successful, - SuccessRate: p.SuccessRate, - }) - } - // Batch retrieve recent - for _, p := range resp.P2PMetrics.DhtMetrics.BatchRetrieveRecent { - result.P2PMetrics.DhtMetrics.BatchRetrieveRecent = append(result.P2PMetrics.DhtMetrics.BatchRetrieveRecent, struct { - TimeUnix int64 - Keys int32 - Required int32 - FoundLocal int32 - FoundNetwork int32 - DurationMS int64 - }{ - TimeUnix: p.TimeUnix, - Keys: p.Keys, - Required: p.Required, - FoundLocal: p.FoundLocal, - FoundNetwork: p.FoundNetwork, - DurationMS: p.DurationMs, - }) - } - result.P2PMetrics.DhtMetrics.HotPathBannedSkips = resp.P2PMetrics.DhtMetrics.HotPathBannedSkips - result.P2PMetrics.DhtMetrics.HotPathBanIncrements = resp.P2PMetrics.DhtMetrics.HotPathBanIncrements - } - - // Network handle metrics - if resp.P2PMetrics.NetworkHandleMetrics != nil { - if result.P2PMetrics.NetworkHandleMetrics == nil { - result.P2PMetrics.NetworkHandleMetrics = map[string]struct { - Total int64 - Success int64 - Failure int64 - Timeout int64 - }{} - } - for k, v := range resp.P2PMetrics.NetworkHandleMetrics { - result.P2PMetrics.NetworkHandleMetrics[k] = struct { - Total int64 - Success int64 - Failure int64 - Timeout int64 - }{ - Total: v.Total, - Success: v.Success, - Failure: v.Failure, - Timeout: v.Timeout, - } - } - } - - // Conn pool metrics - if resp.P2PMetrics.ConnPoolMetrics != nil { - if result.P2PMetrics.ConnPoolMetrics == nil { - result.P2PMetrics.ConnPoolMetrics = map[string]int64{} - } - for k, v := range resp.P2PMetrics.ConnPoolMetrics { - result.P2PMetrics.ConnPoolMetrics[k] = v - } - } - - // Ban list - for _, b := range resp.P2PMetrics.BanList { - result.P2PMetrics.BanList = append(result.P2PMetrics.BanList, struct { - ID string - IP string - Port uint32 - Count int32 - CreatedAtUnix int64 - AgeSeconds int64 - }{ - ID: b.Id, - IP: b.Ip, - Port: b.Port, - Count: b.Count, - CreatedAtUnix: b.CreatedAtUnix, - AgeSeconds: b.AgeSeconds, - }) - } - - // Database - if resp.P2PMetrics.Database != nil { - result.P2PMetrics.Database.P2PDBSizeMB = resp.P2PMetrics.Database.P2PDbSizeMb - result.P2PMetrics.Database.P2PDBRecordsCount = resp.P2PMetrics.Database.P2PDbRecordsCount - } - - // Disk - if resp.P2PMetrics.Disk != nil { - result.P2PMetrics.Disk.AllMB = resp.P2PMetrics.Disk.AllMb - result.P2PMetrics.Disk.UsedMB = resp.P2PMetrics.Disk.UsedMb - result.P2PMetrics.Disk.FreeMB = resp.P2PMetrics.Disk.FreeMb - } - } - - return result -} diff --git a/sdk/adapters/supernodeservice/options.go b/sdk/adapters/supernodeservice/options.go deleted file mode 100644 index 547a28c9..00000000 --- a/sdk/adapters/supernodeservice/options.go +++ /dev/null @@ -1,29 +0,0 @@ -package supernodeservice - -import "context" - -// internal context key to toggle P2P metrics in status requests -type ctxKey string - -const ctxKeyIncludeP2P ctxKey = "include_p2p_metrics" - -// WithIncludeP2PMetrics returns a child context that requests detailed P2P metrics -// (and peer info) in status responses. -func WithIncludeP2PMetrics(ctx context.Context) context.Context { - return context.WithValue(ctx, ctxKeyIncludeP2P, true) -} - -// WithP2PMetrics allows explicitly setting the include flag. -func WithP2PMetrics(ctx context.Context, include bool) context.Context { - return context.WithValue(ctx, ctxKeyIncludeP2P, include) -} - -// includeP2PMetrics reads the flag from context; defaults to false when unset. -func includeP2PMetrics(ctx context.Context) bool { - v := ctx.Value(ctxKeyIncludeP2P) - if b, ok := v.(bool); ok { - return b - } - return false -} - diff --git a/sdk/adapters/supernodeservice/types.go b/sdk/adapters/supernodeservice/types.go index 4dbdd7b6..89e04cae 100644 --- a/sdk/adapters/supernodeservice/types.go +++ b/sdk/adapters/supernodeservice/types.go @@ -1,11 +1,12 @@ package supernodeservice import ( - "context" + "context" - "google.golang.org/grpc" + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + "google.golang.org/grpc" - "github.com/LumeraProtocol/supernode/v2/sdk/event" + "github.com/LumeraProtocol/supernode/v2/sdk/event" ) type LoggerFunc func( @@ -28,93 +29,7 @@ type CascadeSupernodeRegisterResponse struct { TxHash string } -// ServiceTasks contains task information for a specific service -type ServiceTasks struct { - ServiceName string - TaskIDs []string - TaskCount int32 -} - -// StorageInfo contains storage metrics for a specific path -type StorageInfo struct { - Path string - TotalBytes uint64 - UsedBytes uint64 - AvailableBytes uint64 - UsagePercent float64 -} - -type SupernodeStatusresponse struct { - Version string // Supernode version - UptimeSeconds uint64 // Uptime in seconds - Resources struct { - CPU struct { - UsagePercent float64 - Cores int32 - } - Memory struct { - TotalGB float64 - UsedGB float64 - AvailableGB float64 - UsagePercent float64 - } - Storage []StorageInfo - HardwareSummary string // Formatted hardware summary - } - RunningTasks []ServiceTasks // Services with running tasks - RegisteredServices []string // All available service names - Network struct { - PeersCount int32 // Number of connected peers - PeerAddresses []string // List of peer addresses - } - Rank int32 // Rank in top supernodes list (0 if not in top list) - IPAddress string // Supernode IP address with port - // Optional detailed P2P metrics (present when requested) - P2PMetrics struct { - DhtMetrics struct { - StoreSuccessRecent []struct { - TimeUnix int64 - Requests int32 - Successful int32 - SuccessRate float64 - } - BatchRetrieveRecent []struct { - TimeUnix int64 - Keys int32 - Required int32 - FoundLocal int32 - FoundNetwork int32 - DurationMS int64 - } - HotPathBannedSkips int64 - HotPathBanIncrements int64 - } - NetworkHandleMetrics map[string]struct { - Total int64 - Success int64 - Failure int64 - Timeout int64 - } - ConnPoolMetrics map[string]int64 - BanList []struct { - ID string - IP string - Port uint32 - Count int32 - CreatedAtUnix int64 - AgeSeconds int64 - } - Database struct { - P2PDBSizeMB float64 - P2PDBRecordsCount int64 - } - Disk struct { - AllMB float64 - UsedMB float64 - FreeMB float64 - } - } -} +// Use generated proto types directly for status type CascadeSupernodeDownloadRequest struct { ActionID string TaskID string @@ -131,7 +46,7 @@ type CascadeSupernodeDownloadResponse struct { //go:generate mockery --name=CascadeServiceClient --output=testutil/mocks --outpkg=mocks --filename=cascade_service_mock.go type CascadeServiceClient interface { - CascadeSupernodeRegister(ctx context.Context, in *CascadeSupernodeRegisterRequest, opts ...grpc.CallOption) (*CascadeSupernodeRegisterResponse, error) - GetSupernodeStatus(ctx context.Context) (SupernodeStatusresponse, error) - CascadeSupernodeDownload(ctx context.Context, in *CascadeSupernodeDownloadRequest, opts ...grpc.CallOption) (*CascadeSupernodeDownloadResponse, error) + CascadeSupernodeRegister(ctx context.Context, in *CascadeSupernodeRegisterRequest, opts ...grpc.CallOption) (*CascadeSupernodeRegisterResponse, error) + GetSupernodeStatus(ctx context.Context) (*pb.StatusResponse, error) + CascadeSupernodeDownload(ctx context.Context, in *CascadeSupernodeDownloadRequest, opts ...grpc.CallOption) (*CascadeSupernodeDownloadResponse, error) } diff --git a/sdk/event/keys.go b/sdk/event/keys.go index 9d68b818..04e27bd3 100644 --- a/sdk/event/keys.go +++ b/sdk/event/keys.go @@ -30,26 +30,5 @@ const ( KeyTaskID EventDataKey = "task_id" KeyActionID EventDataKey = "action_id" - // Removed legacy cascade storage metrics keys (meta/sym timings and nodes) - - // Combined store metrics (metadata + symbols) — new minimal only - KeyStoreDurationMS EventDataKey = "store_duration_ms" - // New minimal store metrics - KeyStoreSymbolsFirstPass EventDataKey = "store_symbols_first_pass" - KeyStoreSymbolsTotal EventDataKey = "store_symbols_total" - KeyStoreIDFilesCount EventDataKey = "store_id_files_count" - KeyStoreCallsByIP EventDataKey = "store_calls_by_ip" - - // Download (retrieve) detailed metrics — new minimal only - KeyRetrieveFoundLocal EventDataKey = "retrieve_found_local" - KeyRetrieveMS EventDataKey = "retrieve_ms" - KeyDecodeMS EventDataKey = "decode_ms" - KeyRetrieveCallsByIP EventDataKey = "retrieve_calls_by_ip" - // Additional retrieve summary fields - KeyRetrieveKeys EventDataKey = "retrieve_keys" - KeyRetrieveRequired EventDataKey = "retrieve_required" - KeyRetrieveFoundNet EventDataKey = "retrieve_found_net" - KeyTargetRequiredPercent EventDataKey = "target_required_percent" - KeyTargetRequiredCount EventDataKey = "target_required_count" - KeyTotalSymbols EventDataKey = "total_symbols" + // Removed legacy cascade storage/retrieve metrics keys ) diff --git a/sdk/net/client.go b/sdk/net/client.go index dc8950df..b88fe75b 100644 --- a/sdk/net/client.go +++ b/sdk/net/client.go @@ -1,11 +1,12 @@ package net import ( - "context" + "context" - "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" - "google.golang.org/grpc" - "google.golang.org/grpc/health/grpc_health_v1" + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" + "google.golang.org/grpc" + "google.golang.org/grpc/health/grpc_health_v1" ) // SupernodeClient defines the interface for communicating with supernodes @@ -15,7 +16,7 @@ type SupernodeClient interface { // HealthCheck performs a health check on the supernode HealthCheck(ctx context.Context) (*grpc_health_v1.HealthCheckResponse, error) - GetSupernodeStatus(ctx context.Context) (*supernodeservice.SupernodeStatusresponse, error) + GetSupernodeStatus(ctx context.Context) (*pb.StatusResponse, error) // Download downloads the cascade action file Download(ctx context.Context, in *supernodeservice.CascadeSupernodeDownloadRequest, opts ...grpc.CallOption) (*supernodeservice.CascadeSupernodeDownloadResponse, error) diff --git a/sdk/net/impl.go b/sdk/net/impl.go index ab0f7b28..cd6bf10f 100644 --- a/sdk/net/impl.go +++ b/sdk/net/impl.go @@ -1,20 +1,21 @@ package net import ( - "context" - "fmt" - - "github.com/LumeraProtocol/lumera/x/lumeraid/securekeyx" - ltc "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials" - "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials/alts/conn" - "github.com/LumeraProtocol/supernode/v2/pkg/net/grpc/client" - "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" - "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" - "github.com/LumeraProtocol/supernode/v2/sdk/log" - - "github.com/cosmos/cosmos-sdk/crypto/keyring" - "google.golang.org/grpc" - "google.golang.org/grpc/health/grpc_health_v1" + "context" + "fmt" + + "github.com/LumeraProtocol/lumera/x/lumeraid/securekeyx" + ltc "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials" + "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials/alts/conn" + "github.com/LumeraProtocol/supernode/v2/pkg/net/grpc/client" + "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" + "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" + "github.com/LumeraProtocol/supernode/v2/sdk/log" + + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "google.golang.org/grpc" + "google.golang.org/grpc/health/grpc_health_v1" ) // supernodeClient implements the SupernodeClient interface @@ -128,14 +129,14 @@ func (c *supernodeClient) HealthCheck(ctx context.Context) (*grpc_health_v1.Heal return resp, nil } -func (c *supernodeClient) GetSupernodeStatus(ctx context.Context) (*supernodeservice.SupernodeStatusresponse, error) { - resp, err := c.cascadeClient.GetSupernodeStatus(ctx) - if err != nil { - return nil, fmt.Errorf("failed to get supernode status: %w", err) - } +func (c *supernodeClient) GetSupernodeStatus(ctx context.Context) (*pb.StatusResponse, error) { + resp, err := c.cascadeClient.GetSupernodeStatus(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get supernode status: %w", err) + } - c.logger.Debug(ctx, "Supernode status retrieved successfully") - return &resp, nil + c.logger.Debug(ctx, "Supernode status retrieved successfully") + return resp, nil } // Download downloads the cascade action file diff --git a/sdk/task/task.go b/sdk/task/task.go index 2a87b201..bb402975 100644 --- a/sdk/task/task.go +++ b/sdk/task/task.go @@ -11,7 +11,6 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" txmod "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx" "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" - snsvc "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" "github.com/LumeraProtocol/supernode/v2/sdk/config" "github.com/LumeraProtocol/supernode/v2/sdk/event" "github.com/LumeraProtocol/supernode/v2/sdk/log" @@ -136,8 +135,8 @@ func (t *BaseTask) isServing(parent context.Context, sn lumera.Supernode) bool { return false } - // Then check P2P peers count via status (include P2P metrics) - status, err := client.GetSupernodeStatus(snsvc.WithIncludeP2PMetrics(ctx)) + // Then check P2P peers count via status + status, err := client.GetSupernodeStatus(ctx) if err != nil { return false } diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index 4529aec2..8c754e47 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -92,11 +92,8 @@ The supernode will connect to the Lumera network and begin participating in the logtrace.Fatal(ctx, "Failed to initialize RaptorQ store", logtrace.Fields{"error": err.Error()}) } - // Manually set the disable flag at the highest level - disableMetrics := true - - // Initialize P2P service with explicit disable flag - p2pService, err := initP2PService(ctx, appConfig, lumeraClient, kr, rqStore, nil, nil, disableMetrics) + // Initialize P2P service + p2pService, err := initP2PService(ctx, appConfig, lumeraClient, kr, rqStore, nil, nil) if err != nil { logtrace.Fatal(ctx, "Failed to initialize P2P service", logtrace.Fields{"error": err.Error()}) } @@ -108,19 +105,18 @@ The supernode will connect to the Lumera network and begin participating in the } // Configure cascade service - cService := cascadeService.NewCascadeService( - &cascadeService.Config{ - Config: common.Config{ - SupernodeAccountAddress: appConfig.SupernodeConfig.Identity, - }, - RqFilesDir: appConfig.GetRaptorQFilesDir(), - MetricsDisabled: disableMetrics, - }, - lumeraClient, - *p2pService, - codec.NewRaptorQCodec(appConfig.GetRaptorQFilesDir()), - rqStore, - ) + cService := cascadeService.NewCascadeService( + &cascadeService.Config{ + Config: common.Config{ + SupernodeAccountAddress: appConfig.SupernodeConfig.Identity, + }, + RqFilesDir: appConfig.GetRaptorQFilesDir(), + }, + lumeraClient, + *p2pService, + codec.NewRaptorQCodec(appConfig.GetRaptorQFilesDir()), + rqStore, + ) // Create cascade action server cascadeActionServer := cascade.NewCascadeActionServer(cService) @@ -202,7 +198,7 @@ func init() { } // initP2PService initializes the P2P service -func initP2PService(ctx context.Context, config *config.Config, lumeraClient lumera.Client, kr cKeyring.Keyring, rqStore rqstore.Store, cloud cloud.Storage, mst *sqlite.MigrationMetaStore, metricsDisabled bool) (*p2p.P2P, error) { +func initP2PService(ctx context.Context, config *config.Config, lumeraClient lumera.Client, kr cKeyring.Keyring, rqStore rqstore.Store, cloud cloud.Storage, mst *sqlite.MigrationMetaStore) (*p2p.P2P, error) { // Get the supernode address from the keyring keyInfo, err := kr.Key(config.SupernodeConfig.KeyName) if err != nil { @@ -218,7 +214,7 @@ func initP2PService(ctx context.Context, config *config.Config, lumeraClient lum logtrace.Debug(ctx, "Initializing P2P service", logtrace.Fields{"address": p2pConfig.ListenAddress, "port": p2pConfig.Port, "data_dir": p2pConfig.DataDir, "supernode_id": address.String()}) - p2pService, err := p2p.New(ctx, p2pConfig, lumeraClient, kr, rqStore, cloud, mst, metricsDisabled) + p2pService, err := p2p.New(ctx, p2pConfig, lumeraClient, kr, rqStore, cloud, mst) if err != nil { return nil, fmt.Errorf("failed to initialize p2p service: %w", err) } diff --git a/supernode/node/supernode/server/status_server.go b/supernode/node/supernode/server/status_server.go index d90b1e3e..8b061a3b 100644 --- a/supernode/node/supernode/server/status_server.go +++ b/supernode/node/supernode/server/status_server.go @@ -174,68 +174,7 @@ func (s *SupernodeServer) GetStatus(ctx context.Context, req *pb.StatusRequest) pbpm.Disk.UsedMb = pm.Disk.UsedMB pbpm.Disk.FreeMb = pm.Disk.FreeMB - // Recent batch store - for _, e := range pm.RecentBatchStore { - pbpm.RecentBatchStore = append(pbpm.RecentBatchStore, &pb.StatusResponse_P2PMetrics_RecentBatchStoreEntry{ - TimeUnix: e.TimeUnix, - SenderId: e.SenderID, - SenderIp: e.SenderIP, - Keys: int32(e.Keys), - DurationMs: e.DurationMS, - Ok: e.OK, - Error: e.Error, - }) - } - // Recent batch retrieve - for _, e := range pm.RecentBatchRetrieve { - pbpm.RecentBatchRetrieve = append(pbpm.RecentBatchRetrieve, &pb.StatusResponse_P2PMetrics_RecentBatchRetrieveEntry{ - TimeUnix: e.TimeUnix, - SenderId: e.SenderID, - SenderIp: e.SenderIP, - Requested: int32(e.Requested), - Found: int32(e.Found), - DurationMs: e.DurationMS, - Error: e.Error, - }) - } - - // Per-IP buckets - if pm.RecentBatchStoreByIP != nil { - pbpm.RecentBatchStoreByIp = map[string]*pb.StatusResponse_P2PMetrics_RecentBatchStoreList{} - for ip, list := range pm.RecentBatchStoreByIP { - pbList := &pb.StatusResponse_P2PMetrics_RecentBatchStoreList{} - for _, e := range list { - pbList.Entries = append(pbList.Entries, &pb.StatusResponse_P2PMetrics_RecentBatchStoreEntry{ - TimeUnix: e.TimeUnix, - SenderId: e.SenderID, - SenderIp: e.SenderIP, - Keys: int32(e.Keys), - DurationMs: e.DurationMS, - Ok: e.OK, - Error: e.Error, - }) - } - pbpm.RecentBatchStoreByIp[ip] = pbList - } - } - if pm.RecentBatchRetrieveByIP != nil { - pbpm.RecentBatchRetrieveByIp = map[string]*pb.StatusResponse_P2PMetrics_RecentBatchRetrieveList{} - for ip, list := range pm.RecentBatchRetrieveByIP { - pbList := &pb.StatusResponse_P2PMetrics_RecentBatchRetrieveList{} - for _, e := range list { - pbList.Entries = append(pbList.Entries, &pb.StatusResponse_P2PMetrics_RecentBatchRetrieveEntry{ - TimeUnix: e.TimeUnix, - SenderId: e.SenderID, - SenderIp: e.SenderIP, - Requested: int32(e.Requested), - Found: int32(e.Found), - DurationMs: e.DurationMS, - Error: e.Error, - }) - } - pbpm.RecentBatchRetrieveByIp[ip] = pbList - } - } + // Detailed recent per-request lists removed from API response.P2PMetrics = pbpm } diff --git a/supernode/services/cascade/adaptors/p2p.go b/supernode/services/cascade/adaptors/p2p.go index 7b5a51e4..d1fd6ab9 100644 --- a/supernode/services/cascade/adaptors/p2p.go +++ b/supernode/services/cascade/adaptors/p2p.go @@ -13,7 +13,6 @@ import ( "github.com/LumeraProtocol/supernode/v2/p2p" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - cm "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics" "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" "github.com/LumeraProtocol/supernode/v2/pkg/utils" "github.com/LumeraProtocol/supernode/v2/supernode/services/common/storage" @@ -41,12 +40,11 @@ type P2PService interface { type p2pImpl struct { p2p p2p.Client rqStore rqstore.Store - metricsDisabled bool } // NewP2PService returns a concrete implementation of P2PService. -func NewP2PService(client p2p.Client, store rqstore.Store, metricsDisabled bool) P2PService { - return &p2pImpl{p2p: client, rqStore: store, metricsDisabled: metricsDisabled} +func NewP2PService(client p2p.Client, store rqstore.Store) P2PService { + return &p2pImpl{p2p: client, rqStore: store} } type StoreArtefactsRequest struct { @@ -59,12 +57,6 @@ type StoreArtefactsRequest struct { func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) error { logtrace.Info(ctx, "store: p2p start", logtrace.Fields{"taskID": req.TaskID, "actionID": req.ActionID, "id_files": len(req.IDFiles), "symbols_dir": req.SymbolsDir}) - // Optionally enable per-node store RPC capture for this task - if !p.metricsDisabled { - cm.StartStoreCapture(req.TaskID) - defer cm.StopStoreCapture(req.TaskID) - } - start := time.Now() firstPassSymbols, totalSymbols, err := p.storeCascadeSymbolsAndData(ctx, req.TaskID, req.ActionID, req.SymbolsDir, req.IDFiles) if err != nil { @@ -82,8 +74,7 @@ func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, if remaining == 0 { logtrace.Info(ctx, "store: dir empty after first-pass", logtrace.Fields{"taskID": req.TaskID, "dir": req.SymbolsDir}) } - // Record store summary for later event emission - cm.SetStoreSummary(req.TaskID, firstPassSymbols, totalSymbols, len(req.IDFiles), dur) + // Metrics collection removed; logs retained return nil } @@ -166,7 +157,6 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action // Send as the same data type you use for symbols logtrace.Info(ctx, "store: batch send (first)", logtrace.Fields{"taskID": taskID, "metadata_count": len(metadataFiles), "symbols_in_batch": len(symBytes), "payload_total": len(payload)}) bctx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout) - bctx = cm.WithTaskID(bctx, taskID) err = p.p2p.StoreBatch(bctx, payload, storage.P2PDataRaptorQSymbol, taskID) cancel() if err != nil { @@ -264,7 +254,6 @@ func (c *p2pImpl) storeSymbolsInP2P(ctx context.Context, taskID, root string, fi } symCtx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout) - symCtx = cm.WithTaskID(symCtx, taskID) defer cancel() logtrace.Info(ctx, "store: batch send (symbols)", logtrace.Fields{"taskID": taskID, "symbols_in_batch": len(symbols)}) diff --git a/supernode/services/cascade/config.go b/supernode/services/cascade/config.go index 01401d41..df6abd1f 100644 --- a/supernode/services/cascade/config.go +++ b/supernode/services/cascade/config.go @@ -6,10 +6,8 @@ import ( // Config contains settings for the cascade service type Config struct { - common.Config `mapstructure:",squash" json:"-"` + common.Config `mapstructure:",squash" json:"-"` RaptorQServiceAddress string `mapstructure:"-" json:"-"` RqFilesDir string `mapstructure:"rq_files_dir" json:"rq_files_dir,omitempty"` - // MetricsDisabled toggles upload/download metrics for cascade service - MetricsDisabled bool `mapstructure:"-" json:"-"` } diff --git a/supernode/services/cascade/download.go b/supernode/services/cascade/download.go index 3b30d7e4..6ad40aab 100644 --- a/supernode/services/cascade/download.go +++ b/supernode/services/cascade/download.go @@ -14,7 +14,6 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/crypto" "github.com/LumeraProtocol/supernode/v2/pkg/errors" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - cm "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics" "github.com/LumeraProtocol/supernode/v2/pkg/utils" "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" "github.com/LumeraProtocol/supernode/v2/supernode/services/common" @@ -221,15 +220,9 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( } logtrace.Info(ctx, "download: plan symbols", logtrace.Fields{"total_symbols": totalSymbols, "target_required_percent": targetRequiredPercent, "target_required_count": targetRequiredCount}) - if !task.config.MetricsDisabled { - cm.StartRetrieveCapture(actionID) - defer cm.StopRetrieveCapture(actionID) - } - // Measure symbols batch retrieve duration retrieveStart := time.Now() - // Tag context with metrics task ID (actionID) - ctxRetrieve := cm.WithTaskID(ctx, actionID) + // Use context as-is; metrics task tagging removed // Retrieve only a fraction of symbols (targetRequiredCount) based on redundancy // The DHT will short-circuit once it finds the required number across the provided keys reqCount := targetRequiredCount @@ -238,7 +231,7 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( } rStart := time.Now() logtrace.Info(ctx, "download: batch retrieve start", logtrace.Fields{"action_id": actionID, "requested": reqCount, "total_candidates": totalSymbols}) - symbols, err := task.P2PClient.BatchRetrieve(ctxRetrieve, allSymbols, reqCount, actionID) + symbols, err := task.P2PClient.BatchRetrieve(ctx, allSymbols, reqCount, actionID) if err != nil { fields[logtrace.FieldError] = err.Error() logtrace.Error(ctx, "batch retrieve failed", fields) @@ -264,21 +257,18 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( decodeMS := time.Since(decodeStart).Milliseconds() logtrace.Info(ctx, "download: decode ok", logtrace.Fields{"action_id": actionID, "ms": time.Since(dStart).Milliseconds(), "tmp_dir": decodeInfo.DecodeTmpDir, "file_path": decodeInfo.FilePath}) - // Set minimal retrieve summary and emit event strictly from internal collector - if !task.config.MetricsDisabled { - cm.SetRetrieveSummary(actionID, retrieveMS, decodeMS) - payload := cm.BuildDownloadEventPayloadFromCollector(actionID) - if retrieve, ok := payload["retrieve"].(map[string]any); ok { - retrieve["target_required_percent"] = targetRequiredPercent - retrieve["target_required_count"] = targetRequiredCount - retrieve["total_symbols"] = totalSymbols - } - if b, err := json.MarshalIndent(payload, "", " "); err == nil { - task.streamDownloadEvent(SupernodeEventTypeArtefactsDownloaded, string(b), "", "", send) - } - } else { - // Send minimal hardcoded event when metrics disabled - task.streamDownloadEvent(SupernodeEventTypeArtefactsDownloaded, "Download completed (metrics disabled)", "", "", send) + // Emit minimal JSON payload (metrics system removed) + minPayload := map[string]any{ + "retrieve": map[string]any{ + "retrieve_ms": retrieveMS, + "decode_ms": decodeMS, + "target_required_percent": targetRequiredPercent, + "target_required_count": targetRequiredCount, + "total_symbols": totalSymbols, + }, + } + if b, err := json.MarshalIndent(minPayload, "", " "); err == nil { + task.streamDownloadEvent(SupernodeEventTypeArtefactsDownloaded, string(b), "", "", send) } fileHash, err := crypto.HashFileIncrementally(decodeInfo.FilePath, 0) diff --git a/supernode/services/cascade/helper.go b/supernode/services/cascade/helper.go index c9e91106..99d3985a 100644 --- a/supernode/services/cascade/helper.go +++ b/supernode/services/cascade/helper.go @@ -14,7 +14,6 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/errors" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" - cm "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics" "github.com/LumeraProtocol/supernode/v2/pkg/utils" "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" @@ -228,15 +227,10 @@ func (task *CascadeRegistrationTask) emitArtefactsStored( fields = logtrace.Fields{} } - // Build payload strictly from internal collector (no P2P snapshots) - payload := cm.BuildStoreEventPayloadFromCollector(task.ID()) - - b, _ := json.MarshalIndent(payload, "", " ") - msg := string(b) - fields["metrics_json"] = msg + // Emit a minimal event message (metrics system removed) + msg := "Artefacts stored" logtrace.Debug(ctx, "artefacts have been stored", fields) task.streamEvent(SupernodeEventTypeArtefactsStored, msg, "", send) - // No central state to clear; adaptor returns calls inline } // extractSignatureAndFirstPart extracts the signature and first part from the encoded data diff --git a/supernode/services/cascade/register.go b/supernode/services/cascade/register.go index 5fc5cdbb..1e8659f3 100644 --- a/supernode/services/cascade/register.go +++ b/supernode/services/cascade/register.go @@ -171,10 +171,8 @@ func (task *CascadeRegistrationTask) Register( if err := task.storeArtefacts(ctx, action.ActionID, rqidResp.RedundantMetadataFiles, encResp.SymbolsDir, fields); err != nil { return err } - // Emit compact analytics payload from centralized metrics collector (optional) - if !task.config.MetricsDisabled { - task.emitArtefactsStored(ctx, fields, encResp.Metadata, send) - } + // Emit artefacts stored event (metrics payload removed; logs preserved) + task.emitArtefactsStored(ctx, fields, encResp.Metadata, send) resp, err := task.LumeraClient.FinalizeAction(ctx, action.ActionID, rqidResp.RQIDs) if err != nil { diff --git a/supernode/services/cascade/service.go b/supernode/services/cascade/service.go index b5b2870a..f88c284b 100644 --- a/supernode/services/cascade/service.go +++ b/supernode/services/cascade/service.go @@ -60,7 +60,7 @@ func NewCascadeService(config *Config, lumera lumera.Client, p2pClient p2p.Clien config: config, SuperNodeService: base.NewSuperNodeService(p2pClient), LumeraClient: adaptors.NewLumeraClient(lumera), - P2P: adaptors.NewP2PService(p2pClient, rqstore, config.MetricsDisabled), + P2P: adaptors.NewP2PService(p2pClient, rqstore), RQ: adaptors.NewCodecService(codec), } } diff --git a/supernode/services/common/storage/handler.go b/supernode/services/common/storage/handler.go index 3967fe2d..9e570d03 100644 --- a/supernode/services/common/storage/handler.go +++ b/supernode/services/common/storage/handler.go @@ -14,7 +14,6 @@ import ( "github.com/LumeraProtocol/supernode/v2/p2p" "github.com/LumeraProtocol/supernode/v2/pkg/errors" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics" "github.com/LumeraProtocol/supernode/v2/pkg/storage/files" "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" "github.com/LumeraProtocol/supernode/v2/pkg/utils" @@ -75,8 +74,6 @@ func (h *StorageHandler) StoreBatch(ctx context.Context, list [][]byte, typ int) } logtrace.Debug(ctx, "task_id in storeList", logtrace.Fields{logtrace.FieldTaskID: taskID}) - // Add taskID to context for metrics - ctx = p2pmetrics.WithTaskID(ctx, taskID) return h.P2PClient.StoreBatch(ctx, list, typ, taskID) } @@ -167,8 +164,6 @@ func (h *StorageHandler) storeSymbolsInP2P(ctx context.Context, taskID, root str return fmt.Errorf("load symbols: %w", err) } - // Add taskID to context for metrics - ctx = p2pmetrics.WithTaskID(ctx, taskID) if err := h.P2PClient.StoreBatch(ctx, symbols, P2PDataRaptorQSymbol, taskID); err != nil { return fmt.Errorf("p2p store batch: %w", err) } diff --git a/supernode/services/common/supernode/service.go b/supernode/services/common/supernode/service.go index 81bac456..1d0b9dd0 100644 --- a/supernode/services/common/supernode/service.go +++ b/supernode/services/common/supernode/service.go @@ -218,104 +218,7 @@ func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetric } } - // Recent batch store/retrieve (overall lists) - if rbs, ok := dhtStats["recent_batch_store_overall"].([]kademlia.RecentBatchStoreEntry); ok { - for _, e := range rbs { - metrics.RecentBatchStore = append(metrics.RecentBatchStore, RecentBatchStoreEntry{ - TimeUnix: e.TimeUnix, - SenderID: e.SenderID, - SenderIP: e.SenderIP, - Keys: e.Keys, - DurationMS: e.DurationMS, - OK: e.OK, - Error: e.Error, - }) - } - } else if anyList, ok := dhtStats["recent_batch_store_overall"].([]interface{}); ok { - for _, vi := range anyList { - if e, ok := vi.(kademlia.RecentBatchStoreEntry); ok { - metrics.RecentBatchStore = append(metrics.RecentBatchStore, RecentBatchStoreEntry{ - TimeUnix: e.TimeUnix, - SenderID: e.SenderID, - SenderIP: e.SenderIP, - Keys: e.Keys, - DurationMS: e.DurationMS, - OK: e.OK, - Error: e.Error, - }) - } - } - } - if rbr, ok := dhtStats["recent_batch_retrieve_overall"].([]kademlia.RecentBatchRetrieveEntry); ok { - for _, e := range rbr { - metrics.RecentBatchRetrieve = append(metrics.RecentBatchRetrieve, RecentBatchRetrieveEntry{ - TimeUnix: e.TimeUnix, - SenderID: e.SenderID, - SenderIP: e.SenderIP, - Requested: e.Requested, - Found: e.Found, - DurationMS: e.DurationMS, - Error: e.Error, - }) - } - } else if anyList, ok := dhtStats["recent_batch_retrieve_overall"].([]interface{}); ok { - for _, vi := range anyList { - if e, ok := vi.(kademlia.RecentBatchRetrieveEntry); ok { - metrics.RecentBatchRetrieve = append(metrics.RecentBatchRetrieve, RecentBatchRetrieveEntry{ - TimeUnix: e.TimeUnix, - SenderID: e.SenderID, - SenderIP: e.SenderIP, - Requested: e.Requested, - Found: e.Found, - DurationMS: e.DurationMS, - Error: e.Error, - }) - } - } - } - - // Per-IP buckets - if byip, ok := dhtStats["recent_batch_store_by_ip"].(map[string][]kademlia.RecentBatchStoreEntry); ok { - for ip, list := range byip { - bucket := make([]RecentBatchStoreEntry, 0, len(list)) - for _, e := range list { - bucket = append(bucket, RecentBatchStoreEntry{ - TimeUnix: e.TimeUnix, - SenderID: e.SenderID, - SenderIP: e.SenderIP, - Keys: e.Keys, - DurationMS: e.DurationMS, - OK: e.OK, - Error: e.Error, - }) - } - // initialize map if needed - if metrics.RecentBatchStoreByIP == nil { - metrics.RecentBatchStoreByIP = map[string][]RecentBatchStoreEntry{} - } - metrics.RecentBatchStoreByIP[ip] = bucket - } - } - if byip, ok := dhtStats["recent_batch_retrieve_by_ip"].(map[string][]kademlia.RecentBatchRetrieveEntry); ok { - for ip, list := range byip { - bucket := make([]RecentBatchRetrieveEntry, 0, len(list)) - for _, e := range list { - bucket = append(bucket, RecentBatchRetrieveEntry{ - TimeUnix: e.TimeUnix, - SenderID: e.SenderID, - SenderIP: e.SenderIP, - Requested: e.Requested, - Found: e.Found, - DurationMS: e.DurationMS, - Error: e.Error, - }) - } - if metrics.RecentBatchRetrieveByIP == nil { - metrics.RecentBatchRetrieveByIP = map[string][]RecentBatchRetrieveEntry{} - } - metrics.RecentBatchRetrieveByIP[ip] = bucket - } - } + // Detailed recent per-request lists removed from API mapping } // DHT rolling metrics snapshot is attached at top-level under dht_metrics diff --git a/supernode/services/common/supernode/types.go b/supernode/services/common/supernode/types.go index 9a6f0953..e84b954a 100644 --- a/supernode/services/common/supernode/types.go +++ b/supernode/services/common/supernode/types.go @@ -60,16 +60,12 @@ type NetworkInfo struct { // P2PMetrics mirrors the proto P2P metrics for status API type P2PMetrics struct { - DhtMetrics DhtMetrics - NetworkHandleMetrics map[string]HandleCounters - ConnPoolMetrics map[string]int64 - BanList []BanEntry - Database DatabaseStats - Disk DiskStatus - RecentBatchStore []RecentBatchStoreEntry - RecentBatchRetrieve []RecentBatchRetrieveEntry - RecentBatchStoreByIP map[string][]RecentBatchStoreEntry - RecentBatchRetrieveByIP map[string][]RecentBatchRetrieveEntry + DhtMetrics DhtMetrics + NetworkHandleMetrics map[string]HandleCounters + ConnPoolMetrics map[string]int64 + BanList []BanEntry + Database DatabaseStats + Disk DiskStatus } type StoreSuccessPoint struct { @@ -122,25 +118,7 @@ type DiskStatus struct { FreeMB float64 } -type RecentBatchStoreEntry struct { - TimeUnix int64 - SenderID string - SenderIP string - Keys int - DurationMS int64 - OK bool - Error string -} - -type RecentBatchRetrieveEntry struct { - TimeUnix int64 - SenderID string - SenderIP string - Requested int - Found int - DurationMS int64 - Error string -} +// Removed: recent per-request lists from public API // TaskProvider interface defines the contract for services to provide // their running task information to the status service diff --git a/tests/integration/p2p/p2p_integration_test.go b/tests/integration/p2p/p2p_integration_test.go index d5df6dc2..478711d2 100644 --- a/tests/integration/p2p/p2p_integration_test.go +++ b/tests/integration/p2p/p2p_integration_test.go @@ -204,7 +204,7 @@ func SetupTestP2PNodes(t *testing.T, ctx context.Context) ([]p2p.Client, []*rqst rqStores = append(rqStores, rqStore) // Disable metrics in integration tests by default - service, err := p2p.New(ctx, p2pConfig, mockClient, kr, rqStore, nil, nil, true) + service, err := p2p.New(ctx, p2pConfig, mockClient, kr, rqStore, nil, nil) require.NoError(t, err, "failed to create p2p service for node %d: %v", i, err) // Start P2P service From 57c92f5381db07252ddc5ff950738b90fac33816 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Fri, 3 Oct 2025 19:31:16 +0500 Subject: [PATCH 23/27] Increase raptorq memory --- pkg/codec/raptorq.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/codec/raptorq.go b/pkg/codec/raptorq.go index 7c28d0c5..541aac58 100644 --- a/pkg/codec/raptorq.go +++ b/pkg/codec/raptorq.go @@ -15,7 +15,7 @@ const ( rqSymbolSize uint16 = 65535 rqRedundancyFactor uint8 = 6 // Limit RaptorQ processor memory usage to ~2 GiB - rqMaxMemoryMB uint64 = 2 * 1024 // MB + rqMaxMemoryMB uint64 = 4 * 1024 // MB // Concurrency tuned for 2 GiB limit and typical 8+ core CPUs rqConcurrency uint64 = 1 // Target single-block output for up to 1 GiB files with padding headroom (~1.25 GiB) From 6d3f03e5e3aacdb072c0286772e289c6d12dd728 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Fri, 3 Oct 2025 23:17:02 +0500 Subject: [PATCH 24/27] Add cascadekit; refactor services, e2e, docs --- docs/cascade-performance.md | 191 ---------------- docs/cascade-store-artifacts.md | 152 ------------- docs/p2p-metrics-capture.md | 23 -- pkg/cascade/signature.go | 134 ------------ pkg/cascadekit/doc.go | 17 ++ pkg/cascadekit/hash.go | 28 +++ pkg/cascadekit/highlevel.go | 30 +++ pkg/cascadekit/ids.go | 101 +++++++++ pkg/cascadekit/index.go | 63 ++++++ pkg/cascadekit/index_parse.go | 23 ++ pkg/cascadekit/metadata.go | 18 ++ pkg/cascadekit/metadata_helpers.go | 27 +++ pkg/cascadekit/parsers.go | 40 ++++ pkg/cascadekit/rqid.go | 63 ++++++ pkg/cascadekit/signatures.go | 66 ++++++ pkg/cascadekit/verify.go | 23 ++ sdk/README.md | 56 +++-- sdk/action/client.go | 138 ++++++++++++ sdk/adapters/lumera/adapter.go | 28 ++- supernode/cmd/start.go | 32 +-- supernode/services/cascade/download.go | 60 +++--- supernode/services/cascade/helper.go | 228 ++++++-------------- supernode/services/cascade/helper_test.go | 117 ---------- supernode/services/cascade/metadata.go | 127 ----------- supernode/services/cascade/metadata_test.go | 95 -------- supernode/services/cascade/register.go | 14 +- tests/system/e2e_cascade_test.go | 128 +++-------- tests/system/go.mod | 5 +- tests/system/go.sum | 7 +- tests/system/signature_utils.go | 17 -- 30 files changed, 850 insertions(+), 1201 deletions(-) delete mode 100644 docs/cascade-performance.md delete mode 100644 docs/cascade-store-artifacts.md delete mode 100644 docs/p2p-metrics-capture.md delete mode 100644 pkg/cascade/signature.go create mode 100644 pkg/cascadekit/doc.go create mode 100644 pkg/cascadekit/hash.go create mode 100644 pkg/cascadekit/highlevel.go create mode 100644 pkg/cascadekit/ids.go create mode 100644 pkg/cascadekit/index.go create mode 100644 pkg/cascadekit/index_parse.go create mode 100644 pkg/cascadekit/metadata.go create mode 100644 pkg/cascadekit/metadata_helpers.go create mode 100644 pkg/cascadekit/parsers.go create mode 100644 pkg/cascadekit/rqid.go create mode 100644 pkg/cascadekit/signatures.go create mode 100644 pkg/cascadekit/verify.go delete mode 100644 supernode/services/cascade/helper_test.go delete mode 100644 supernode/services/cascade/metadata.go delete mode 100644 supernode/services/cascade/metadata_test.go delete mode 100644 tests/system/signature_utils.go diff --git a/docs/cascade-performance.md b/docs/cascade-performance.md deleted file mode 100644 index 1cecf566..00000000 --- a/docs/cascade-performance.md +++ /dev/null @@ -1,191 +0,0 @@ -# Cascade Downloads & Performance: Concepts, Limits, and Tuning - -This document explains how Cascade encoding/decoding works, the performance and memory factors involved, and practical configuration guidance. It consolidates the “blocks and symbols” primer and expands it with deeper operational tuning, error references, and code pointers — in a concise, professional format. - -## Overview - -- Cascade uses RaptorQ forward error correction to split a file into blocks and symbols that can be stored/fetched from a P2P network. -- Decoding requires enough symbols to reconstruct each block; integrity is verified with hashes recorded in the layout. -- Performance and reliability are driven by four main levers: block size, redundancy, concurrency, and memory headroom. Batching and ordering in the store path, and supernode selection in the download path, also matter. - -## Current Defaults (Implementation) - -- RaptorQ (codec) - - Block cap: 256 MB (encode‑time upper bound per block) - - Decode concurrency: 1 - - Memory headroom: 20% of detected RAM - - Symbol size: ~65,535 bytes - - Redundancy: 5 - -- Store path (foreground adaptor) - - Batch size: 2,500 files per batch (≈156 MiB typical at default symbol size) - - Downsampling: if total files > 2,500, take 10% sorted prefix for initial store - - Per‑batch P2P store timeout: 5 minutes - -- Store path (background worker) - - Batch size: 1,000 files per batch (≈62.5 MiB typical) - -- Download path - - SDK per‑supernode download deadline: 10 minutes - - Supernode ranking: status probe ~2 seconds per node; sorted by available memory (desc) - - P2P exec timeouts (per RPC): - - FindValue: 5s - - BatchFindValues: 60s - - BatchGetValues: 75s - - StoreData: 10s - - BatchStoreData: 75s - - Replicate: 90s - -- Upload constraints - - Max file size: 1 GB (enforced in SDK and server) - - Adaptive upload chunk size: ~64 KB → 4 MB based on file size - -## Core Concepts - -- Block: A contiguous segment of the original file. Think of it as a “chapter”. -- Symbol: A small piece produced by RaptorQ for a block. You only need “enough” symbols to reconstruct the block. -- Layout: Metadata that lists all blocks (block_id, size, original offset, per‑block hash) and the symbol IDs belonging to each block. - -Encode (upload): -- Choose a block size; RaptorQ creates symbols per block; symbols + layout are stored. - -Decode (download): -- Fetch symbols from the network; reconstruct each block independently; write each block back at its original offset; verify hashes; stream the file. - -Key facts: -- Symbols never mix across blocks. -- Peak memory during decode scales roughly with the chosen block size (plus overhead). - -## File Size Limits & Upload Chunking - -- Maximum file size: 1 GB (enforced both in SDK and server handlers). -- Adaptive upload chunk size: ~64 KB → 4 MB depending on total file size for throughput vs memory stability. - -## Encoding/Decoding Workflow (high level) - -1) SDK uploads file to a supernode (gRPC stream). Server writes to a temporary file, validates size and integrity. -2) Server encodes with RaptorQ: produces a symbols directory and a layout JSON. -3) Server stores artefacts: layout/ID files and symbols into P2P in batches. -4) Later, SDK requests download; supernode fetches symbols progressively and decodes to reconstruct the file; integrity is verified. - -## Contexts & Timeouts (download path) - -- SDK: wraps the download RPC with a 10‑minute deadline. -- Server: uses that context; P2P layer applies per‑RPC timeouts (e.g., 5s for single key FindValue, ~75s for BatchGetValues), with internal early cancellation once enough symbols are found. -- RaptorQ: uses the same context for logging; no additional deadline inside decode. - -## Memory Model - -- Decoder memory is primarily a function of block size and concurrency. -- Headroom percentage reduces the usable memory budget to leave safety buffer for the OS and other processes. -- Example formula: usable_memory ≈ TotalRAM × (1 − headroom%). - -## Configuration Levers - -The implementation uses simple fixed constants for safety and predictability. You can adjust them and rebuild. - -1) Block Size Cap (`targetBlockMB`, encode‑time) -- What: Upper bound on block size. Actual used size = min(recommended_by_codec, cap). -- Effect: Smaller cap lowers peak decode memory (more blocks, more symbols/keys). Larger cap reduces block count (faster on big machines) but raises peak memory. -- Current default: 256 MB (good balance on well-provisioned machines). Only affects newly encoded artefacts. - -2) Redundancy (`defaultRedundancy`, encode‑time) -- What: Extra protection (more symbols) to tolerate missing data. -- Effect: Higher redundancy improves recoverability but costs more storage and network I/O. Does not materially change peak memory. -- Current default: 5 (good real‑world trade‑off). - -3) Concurrency (`fixedConcurrency`, decode‑time) -- What: Number of RaptorQ decode workers. -- Effect: Higher is faster but multiplies memory; lower is safer and predictable. -- Current default: 1 (safe default for wide environments). - -4) Headroom (`headroomPct`, decode‑time) -- What: Percentage of detected RAM left unused by the RaptorQ processor. -- Effect: More headroom = safer under load; less headroom = more memory available to decode. -- Current default: 20% (conservative and robust for shared hosts). - -## Batching Strategy (store path) - -Why batching matters: -- Store batches are loaded wholly into memory before sending to P2P. -- A fixed “files‑per‑batch” limit gives variable memory usage because symbol files can differ slightly in size. - -Current defaults: -- Foreground adaptor: `loadSymbolsBatchSize = 2500` → ≈ 2,500 × 65,535 B ≈ 156 MiB per batch (typical). -- Background worker: `loadSymbolsBatchSize = 1000` → ≈ 62.5 MiB per batch. - -Byte‑budget alternative (conceptual, not implemented): -- Cap the total bytes per batch (e.g., 128–256 MiB), with a secondary cap on file count. -- Benefits: predictable peak memory; better throughput on small symbols; avoids spikes on larger ones. - -## Ordering for Throughput (store path) - -- We sort relative file paths before batching (e.g., `block_0/...`, `block_1/...`) to improve filesystem locality and reduce disk seeks. This favors speed. -- Trade‑off: If a process stops mid‑way, earlier blocks (lexicographically smaller) are more likely stored than later ones. For fairness across blocks at partial completion, interleaving could be used at some CPU cost. - -## Supernode Selection (download path) - -- The SDK ranks supernodes by available memory (fast 2s status probe per node) and attempts downloads in that order. -- This increases the chances of successful decode for large files. - -## Defaults & Suggested Settings - -1 GB files (general) -- Block cap: 256 MB (≈4 blocks) -- Concurrency: 1 -- Headroom: 20% -- Redundancy: 5 - -Large‑memory machines (performance‑leaning) -- Block cap: 256 MB (or 512 MB) to reduce block count and increase throughput. -- Concurrency: 1–2. -- Headroom: 15–20% depending on other workloads. -- Redundancy: 5 (or 6 in sparse networks). - -Small‑memory machines -- Block cap: 64–128 MB -- Concurrency: 1 -- Headroom: 20% -- Redundancy: 5 - -## Error Reference - -- memory limit exceeded - - The decoder exceeded its memory budget. Reduce block size or concurrency, increase RAM, or lower headroom. - -- hash mismatch for block X - - Data reconstructed for the block did not match the expected hash. Often indicates wrong/corrupt symbols; can also occur when decoding fails mid‑way under memory pressure. Re‑fetching or re‑encoding may be required. - -- insufficient symbols - - Not enough valid symbols were available; the retriever will fetch more. - -- gRPC Internal on download stream - - The supernode returned an error during decode (e.g., memory failure). The SDK will try the next supernode. - -## Code Pointers - -- Block cap, headroom, concurrency (RaptorQ): `pkg/codec/raptorq.go` -- Store batching (foreground path): `supernode/services/cascade/adaptors/p2p.go` -- Store batching (background worker): `p2p/kademlia/rq_symbols.go` -- Batch symbol loading / deletion: `pkg/utils/utils.go` (LoadSymbols, DeleteSymbols) -- Supernode ranking by memory (download): `sdk/task/download.go` -- File size cap & adaptive upload chunking: SDK and server sides (`sdk/adapters/supernodeservice/adapter.go`, `supernode/node/action/server/cascade/cascade_action_server.go`) - -## Notes & Scope - -- Changing block size only affects new encodes; existing artefacts keep their original layout. -- Tuning should reflect your fleet: prefer safety defaults for heterogeneous environments; be aggressive only on known large‑RAM hosts. - -## FAQ - -- Why might a smaller file decode but a larger file fail? - - Peak memory grows with data size and chosen block size. A smaller file may fit within the decoder’s memory budget on a given machine, while a larger one may exceed it. Smaller blocks and/or more RAM resolve this. - -- Does changing block size affect old files? - - No. It only affects newly encoded content. Existing artefacts retain their original layout. - -- Will smaller blocks slow things down? - - Slightly, due to more pieces and network lookups. For constrained machines, the reliability gain outweighs the small performance cost. - -- What’s the best block size? - - There’s no single best value. 128 MB is a solid default. Use 64 MB for smaller machines and 256–512 MB for large servers when maximizing throughput. diff --git a/docs/cascade-store-artifacts.md b/docs/cascade-store-artifacts.md deleted file mode 100644 index c2cf4892..00000000 --- a/docs/cascade-store-artifacts.md +++ /dev/null @@ -1,152 +0,0 @@ -# Cascade Artefacts Storage Flow - -This document explains how Cascade artefacts (ID files + RaptorQ symbols) are persisted to the P2P network, the control flow from the API to the P2P layer, and which background workers continue the process after the API call returns. - -## Scope & Terminology - -- Artefacts: The data produced for a Cascade action that must be stored on the network. - - ID files (a.k.a. redundant metadata files): compact metadata payloads derived from the layout/index. - - Symbols: RaptorQ-encoded chunks of the input file. -- Request IDs and files are generated during the registration flow; storing starts after validation and simulation succeed. - -## High‑Level Sequence - -1) Client calls `Register` with input file and action metadata. -2) The service verifies the action, fee, eligibility, signature and layout consistency, then encodes the input into RaptorQ symbols. -3) Finalize simulation is performed on chain to ensure the action can finalize. -4) If simulation passes, artefacts are persisted: - - ID files are stored first as a single batch. - - Symbols are stored in batches; a first pass may downsample for large directories. - - A background worker continues storing the remainder (no sampling) after the call returns. -5) Action is finalized on chain and control returns to the caller. - -Code reference: -- `supernode/services/cascade/register.go` (Register flow, steps 1–11) -- `supernode/services/cascade/helper.go` (wrappers and helpers) -- `supernode/services/cascade/adaptors/p2p.go` (P2P adaptor for storage) -- `p2p/p2p.go`, `p2p/kademlia/dht.go`, `p2p/kademlia/rq_symbols.go` (P2P and Kademlia implementation) - -## Register Flow Up To Storage - -Register performs the following (simplified): - -- Fetches and validates the on‑chain action. -- Verifies fee and that this node is in the top supernodes for the block height. -- Decodes cascade metadata and verifies that the uploaded data hash matches the ticket. -- Encodes the input using RaptorQ; produces `SymbolsDir` and `Metadata` (layout). -- Verifies layout signature (creator), generates RQ‑ID files and validates IDs. -- Simulates finalize (chain dry‑run). If simulation fails, the call returns with an error (no storage). -- Calls `storeArtefacts(...)` to persist artefacts to P2P. - -Events are streamed throughout via `send(*RegisterResponse)`, including when artefacts are stored and when the action is finalized. - -## The storeArtefacts Wrapper - -Function: `supernode/services/cascade/helper.go::storeArtefacts` - -- Thin pass‑through that packages a `StoreArtefactsRequest` and forwards to the P2P adaptor (`task.P2P.StoreArtefacts`). -- Parameters: - - `IDFiles [][]byte`: the redundant metadata files to store. - - `SymbolsDir string`: filesystem directory where symbols were written. - - `TaskID string` and `ActionID string`: identifiers for logging and DB association. - -Does not return metrics; logs provide visibility. - -## P2P Adaptor: StoreArtefacts - -Implementation: `supernode/services/cascade/adaptors/p2p.go` - -1) Store metadata (ID files) using `p2p.Client.StoreBatch(...)`. - -2) Store symbols using `storeCascadeSymbols(...)`: - - Records the symbol directory in a small SQLite store: `rqStore.StoreSymbolDirectory(taskID, symbolsDir)`. - - Walks `symbolsDir` to list symbol files. If there are more than 2,500 symbols, downsamples to 10% for this first pass (random sample, sorted deterministically afterward). - - Streams symbols in fixed‑size batches of 2,500 files: - - Each batch loads files, calls `p2p.Client.StoreBatch(...)` with a 5‑minute timeout, and deletes successfully uploaded files. - - Marks “first batch stored” for this action: `rqStore.UpdateIsFirstBatchStored(actionID)`. - - Logs counts and timings; no metrics are returned. - -3) Return: - - No metrics aggregation; return indicates success/failure only. - -Notes: -- This adaptor only performs a first pass of symbol storage. For large directories it may downsample; the background worker completes the remaining symbols later (see Background Worker section). - -## P2P Client and DHT: StoreBatch - -`p2p.Client.StoreBatch` proxies to `DHT.StoreBatch`: - -- Local persist first: `store.StoreBatch(ctx, values, typ, true)` ensures local DB/storage contains the items. -- Network store: `DHT.IterateBatchStore(ctx, values, typ, taskID)`: - - For each value, compute its Blake3 hash; compute the top‑K closest nodes from the routing table. - - Build a node→items map and invoke `batchStoreNetwork(...)` with bounded concurrency (a goroutine per node, limited via a semaphore; all joined before returning). - - If the measured success rate is below an internal threshold, DHT returns an error. - -Important distinctions: -- `requests` is the number of per‑node RPCs attempted; it is not the number of items in the batch. -- Success rate is based on successful node acknowledgements divided by `requests`. - -## Metrics & Events - -`Register` logs and emits an informational event (Artefacts stored), then proceeds to finalize the action on chain. - -## Background Worker (Symbols Continuation) - -Started in DHT `run()` when P2P service starts: - -- Function: `p2p/kademlia/rq_symbols.go::startStoreSymbolsWorker` -- Every 30 seconds: - - Queries `rq_symbols_dir` for rows where `is_first_batch_stored = TRUE` and `is_completed = FALSE`. - - For each directory, scans and stores ALL remaining symbols (no sampling) in 1,000‑file batches using the same `StoreBatch` API. - - Deletes files after successful upload. - - Marks the directory as completed: `rqstore.SetIsCompleted(txid)`. - -Effectively, the API call performs a first pass, and the background worker ensures eventual completion. - -## Storage Bookkeeping (SQLite) - -Table: `rq_symbols_dir` - -- Columns: - - `txid TEXT PRIMARY KEY` — action/task identifier. - - `dir TEXT NOT NULL` — filesystem path to the symbols directory. - - `is_first_batch_stored BOOLEAN NOT NULL DEFAULT FALSE` — set true after first pass completes. - - `is_completed BOOLEAN NOT NULL DEFAULT FALSE` — set true after the background worker completes. - - `created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP`. - -APIs: -- `StoreSymbolDirectory(txid, dir)` — insert entry when first pass starts. -- `UpdateIsFirstBatchStored(txid)` — mark first pass completion. -- `GetToDoStoreSymbolDirs()` — list txids/dirs awaiting background completion. -- `SetIsCompleted(txid)` — mark directory as fully processed. - -## Timeouts, Limits, and Knobs - -- First‑pass symbol batches: 2,500 items; per‑batch timeout: 5 minutes. -- Sampling threshold: if symbol count > 2,500, downsample to 10% for first pass. -- DHT minimum success rate: 75% — batch returns error if not met. -- Background worker batch size: 1,000; runs every 30 seconds; no sampling. - -These values can be tuned in: -- `supernode/services/cascade/adaptors/p2p.go` (batching, sampling for first pass). -- `p2p/kademlia/rq_symbols.go` (background worker interval and batch size). -- `p2p/kademlia/dht.go` (minimum success rate, internal concurrencies). - -## Error Handling & Return Semantics - -- If finalize simulation fails: Register returns an error before any storage. -- If metadata store fails: `StoreArtefacts` returns error; Register wraps and returns. -- If symbol first pass fails: same; background worker does not start because `is_first_batch_stored` is not set. -- If the network success rate is below the threshold: DHT returns an error; adaptor propagates it. -- File I/O errors (load/delete) abort the corresponding batch with a wrapped error. - -## Concurrency Model - -- Within `StoreArtefacts` → `DHT.StoreBatch`, network calls are concurrent (goroutines per node) but **joined before return**. There is no detached goroutine in the first pass. -- The only long‑running background activity is the P2P‑level worker (`startStoreSymbolsWorker`) launched when the P2P service starts, not by the API call itself. - -## Cleanup Behavior - -- First pass deletes uploaded symbol files per batch (`utils.DeleteSymbols`) after a successful store batch. -- Background worker also deletes files after each batch store. -- The uploaded raw input file is removed by `Register` in a `defer` block regardless of outcome. diff --git a/docs/p2p-metrics-capture.md b/docs/p2p-metrics-capture.md deleted file mode 100644 index b13bc393..00000000 --- a/docs/p2p-metrics-capture.md +++ /dev/null @@ -1,23 +0,0 @@ -# P2P Metrics — Current Behavior - -We removed the custom per‑RPC metrics capture and the `pkg/p2pmetrics` package. Logs are the source of truth for store/retrieve visibility, and the Status API provides a rolling DHT snapshot for high‑level metrics. - -What remains -- Status API metrics: DHT rolling windows (store success, batch retrieve), network handle counters, ban list, DB/disk stats, and connection pool metrics. -- Logs: detailed send/ok/fail lines for RPCs at both client and server. - -What was removed -- Per‑RPC metrics capture and grouping by IP for events. -- Metrics collectors and context tagging helpers. -- Recent per‑request lists from the Status API. - -Events -- The supernode emits minimal events (e.g., artefacts stored, downloaded). These events no longer include metrics payloads. Use logs for detailed troubleshooting. - -Status API -- To include P2P metrics and peer info, clients set `include_p2p_metrics=true` on `StatusRequest`. -- The SDK adapter already includes this flag by default to populate peer count for eligibility checks. - -References -- Status proto: `proto/supernode/status.proto` -- Service proto: `proto/supernode/service.proto` diff --git a/pkg/cascade/signature.go b/pkg/cascade/signature.go deleted file mode 100644 index 4cb83cf1..00000000 --- a/pkg/cascade/signature.go +++ /dev/null @@ -1,134 +0,0 @@ -package cascade - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "io" - - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/keyring" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/cosmos/btcutil/base58" - cosmoskeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" - "lukechampine.com/blake3" -) - -// CreateLayoutSignature creates the cascade signature format for a given layout file. -// It returns the signature format and index file IDs needed for CASCADE action. -func CreateLayoutSignature(metadataFile codec.Layout, kr cosmoskeyring.Keyring, userKeyName string, ic uint32, maxFiles uint32) (signatureFormat string, indexFileIDs []string, err error) { - // Step 1: Convert metadata to JSON then base64 - me, err := json.Marshal(metadataFile) - if err != nil { - return "", nil, fmt.Errorf("failed to marshal metadata: %w", err) - } - layoutBase64 := base64.StdEncoding.EncodeToString(me) - - // Step 2: Sign the layout data - layoutSignature, err := keyring.SignBytes(kr, userKeyName, []byte(layoutBase64)) - if err != nil { - return "", nil, fmt.Errorf("failed to sign layout: %w", err) - } - layoutSignatureB64 := base64.StdEncoding.EncodeToString(layoutSignature) - - // Step 3: Generate redundant layout file IDs - layoutIDs := GenerateLayoutIDsBatch(layoutBase64, layoutSignatureB64, ic, maxFiles) - - // Step 4: Create index file containing layout references - indexFile := map[string]interface{}{ - "layout_ids": layoutIDs, - "layout_signature": layoutSignatureB64, - } - - // Step 5: Sign the index file - indexFileJSON, err := json.Marshal(indexFile) - if err != nil { - return "", nil, fmt.Errorf("failed to marshal index file: %w", err) - } - indexFileBase64 := base64.StdEncoding.EncodeToString(indexFileJSON) - - creatorSignature, err := keyring.SignBytes(kr, userKeyName, []byte(indexFileBase64)) - if err != nil { - return "", nil, fmt.Errorf("failed to sign index file: %w", err) - } - creatorSignatureB64 := base64.StdEncoding.EncodeToString(creatorSignature) - - // Step 6: Create final signature format - signatureFormat = fmt.Sprintf("%s.%s", indexFileBase64, creatorSignatureB64) - - // Step 7: Generate final index file IDs for submission - indexFileIDs = GenerateIndexIDsBatch(signatureFormat, ic, maxFiles) - - return signatureFormat, indexFileIDs, nil -} - -// GenerateLayoutIDsBatch generates layout IDs using the process: -// combine data -> add counter -> compress -> hash -> Base58 encode -func GenerateLayoutIDsBatch(layoutBase64, layoutSignatureB64 string, ic, maxFiles uint32) []string { - layoutWithSig := fmt.Sprintf("%s.%s", layoutBase64, layoutSignatureB64) - layoutIDs := make([]string, maxFiles) - - var buffer bytes.Buffer - buffer.Grow(len(layoutWithSig) + 10) - - for i := uint32(0); i < maxFiles; i++ { - // Build unique content with counter - buffer.Reset() - buffer.WriteString(layoutWithSig) - buffer.WriteByte('.') - buffer.WriteString(fmt.Sprintf("%d", ic+i)) - - // Compress for efficiency - compressedData, err := utils.ZstdCompress(buffer.Bytes()) - if err != nil { - continue - } - - // Hash for uniqueness - hash, err := utils.Blake3Hash(compressedData) - if err != nil { - continue - } - - // Base58 encode for readable ID - layoutIDs[i] = base58.Encode(hash) - } - - return layoutIDs -} - -// GenerateIndexIDsBatch generates index file IDs using same process as layout IDs -func GenerateIndexIDsBatch(signatureFormat string, ic, maxFiles uint32) []string { - indexFileIDs := make([]string, maxFiles) - - var buffer bytes.Buffer - buffer.Grow(len(signatureFormat) + 10) - - for i := uint32(0); i < maxFiles; i++ { - buffer.Reset() - buffer.WriteString(signatureFormat) - buffer.WriteByte('.') - buffer.WriteString(fmt.Sprintf("%d", ic+i)) - - compressedData, err := utils.ZstdCompress(buffer.Bytes()) - if err != nil { - continue - } - hash, err := utils.Blake3Hash(compressedData) - if err != nil { - continue - } - indexFileIDs[i] = base58.Encode(hash) - } - return indexFileIDs -} - -// ComputeBlake3Hash computes Blake3 hash of the given message -func ComputeBlake3Hash(msg []byte) ([]byte, error) { - hasher := blake3.New(32, nil) - if _, err := io.Copy(hasher, bytes.NewReader(msg)); err != nil { - return nil, err - } - return hasher.Sum(nil), nil -} \ No newline at end of file diff --git a/pkg/cascadekit/doc.go b/pkg/cascadekit/doc.go new file mode 100644 index 00000000..ab8ce081 --- /dev/null +++ b/pkg/cascadekit/doc.go @@ -0,0 +1,17 @@ +// Package cascadekit provides small, pure utilities for generating, +// parsing, signing and validating Cascade artefacts used by the supernode +// register/download flows. +// +// Scope: +// - Build and sign layout metadata (RaptorQ layout) and index files +// - Generate redundant metadata files and index files + their IDs +// - Extract and decode index payloads from the on-chain signatures string +// - Compute data hashes for request metadata +// - Verify single-block layout consistency (explicit error if more than 1 block) +// +// Non-goals: +// - No network or chain dependencies (verification is left to callers) +// - No logging; keep functions small and deterministic +// - No orchestration helpers; this package exposes building blocks only +package cascadekit + diff --git a/pkg/cascadekit/hash.go b/pkg/cascadekit/hash.go new file mode 100644 index 00000000..55288123 --- /dev/null +++ b/pkg/cascadekit/hash.go @@ -0,0 +1,28 @@ +package cascadekit + +import ( + "bytes" + "encoding/base64" + "io" + + "lukechampine.com/blake3" +) + +// ComputeBlake3Hash computes a 32-byte Blake3 hash of the given data. +func ComputeBlake3Hash(msg []byte) ([]byte, error) { + hasher := blake3.New(32, nil) + if _, err := io.Copy(hasher, bytes.NewReader(msg)); err != nil { + return nil, err + } + return hasher.Sum(nil), nil +} + +// ComputeBlake3DataHashB64 computes a Blake3 hash of the input and +// returns it as a base64-encoded string. +func ComputeBlake3DataHashB64(data []byte) (string, error) { + h, err := ComputeBlake3Hash(data) + if err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(h), nil +} diff --git a/pkg/cascadekit/highlevel.go b/pkg/cascadekit/highlevel.go new file mode 100644 index 00000000..16c0072d --- /dev/null +++ b/pkg/cascadekit/highlevel.go @@ -0,0 +1,30 @@ +package cascadekit + +import ( + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + keyringpkg "github.com/LumeraProtocol/supernode/v2/pkg/keyring" + cosmoskeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" +) + +// CreateSignaturesWithKeyring signs layout and index using a Cosmos keyring. +func CreateSignaturesWithKeyring(layout codec.Layout, kr cosmoskeyring.Keyring, keyName string, ic, max uint32) (string, []string, error) { + signer := func(msg []byte) ([]byte, error) { return keyringpkg.SignBytes(kr, keyName, msg) } + return CreateSignatures(layout, signer, ic, max) +} + +// BuildCascadeRequest builds a Cascade request metadata from layout and file bytes. +// It computes blake3(data) base64, creates the signatures string and index IDs, +// and returns a CascadeMetadata ready for RequestAction. +func BuildCascadeRequest(layout codec.Layout, fileBytes []byte, fileName string, kr cosmoskeyring.Keyring, keyName string, ic, max uint32, public bool) (actiontypes.CascadeMetadata, []string, error) { + dataHashB64, err := ComputeBlake3DataHashB64(fileBytes) + if err != nil { + return actiontypes.CascadeMetadata{}, nil, err + } + signatures, indexIDs, err := CreateSignaturesWithKeyring(layout, kr, keyName, ic, max) + if err != nil { + return actiontypes.CascadeMetadata{}, nil, err + } + meta := NewCascadeMetadata(dataHashB64, fileName, uint64(ic), signatures, public) + return meta, indexIDs, nil +} diff --git a/pkg/cascadekit/ids.go b/pkg/cascadekit/ids.go new file mode 100644 index 00000000..5c2b404d --- /dev/null +++ b/pkg/cascadekit/ids.go @@ -0,0 +1,101 @@ +package cascadekit + +import ( + "bytes" + "fmt" + "strconv" + + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/cosmos/btcutil/base58" +) + +// GenerateLayoutIDs computes IDs for redundant layout files (not the final index IDs). +// The ID is base58(blake3(zstd(layout_b64.layout_sig_b64.counter))). +func GenerateLayoutIDs(layoutB64, layoutSigB64 string, ic, max uint32) []string { + layoutWithSig := fmt.Sprintf("%s.%s", layoutB64, layoutSigB64) + layoutIDs := make([]string, max) + + var buffer bytes.Buffer + buffer.Grow(len(layoutWithSig) + 10) + + for i := uint32(0); i < max; i++ { + buffer.Reset() + buffer.WriteString(layoutWithSig) + buffer.WriteByte('.') + buffer.WriteString(fmt.Sprintf("%d", ic+i)) + + compressedData, err := utils.ZstdCompress(buffer.Bytes()) + if err != nil { + continue + } + + hash, err := utils.Blake3Hash(compressedData) + if err != nil { + continue + } + + layoutIDs[i] = base58.Encode(hash) + } + + return layoutIDs +} + +// GenerateIndexIDs computes IDs for index files from the full signatures string. +func GenerateIndexIDs(signatures string, ic, max uint32) []string { + indexFileIDs := make([]string, max) + + var buffer bytes.Buffer + buffer.Grow(len(signatures) + 10) + + for i := uint32(0); i < max; i++ { + buffer.Reset() + buffer.WriteString(signatures) + buffer.WriteByte('.') + buffer.WriteString(fmt.Sprintf("%d", ic+i)) + + compressedData, err := utils.ZstdCompress(buffer.Bytes()) + if err != nil { + continue + } + hash, err := utils.Blake3Hash(compressedData) + if err != nil { + continue + } + indexFileIDs[i] = base58.Encode(hash) + } + return indexFileIDs +} + +// getIDFiles generates ID files by appending a '.' and counter, compressing, +// and returning both IDs and compressed payloads. +func getIDFiles(file []byte, ic uint32, max uint32) (ids []string, files [][]byte, err error) { + idFiles := make([][]byte, 0, max) + ids = make([]string, 0, max) + var buffer bytes.Buffer + + for i := uint32(0); i < max; i++ { + buffer.Reset() + counter := ic + i + + buffer.Write(file) + buffer.WriteByte(SeparatorByte) + buffer.WriteString(strconv.Itoa(int(counter))) + + compressedData, err := utils.ZstdCompress(buffer.Bytes()) + if err != nil { + return ids, idFiles, errors.Errorf("compress identifiers file: %w", err) + } + + idFiles = append(idFiles, compressedData) + + hash, err := utils.Blake3Hash(compressedData) + if err != nil { + return ids, idFiles, errors.Errorf("sha3-256-hash error getting an id file: %w", err) + } + + ids = append(ids, base58.Encode(hash)) + } + + return ids, idFiles, nil +} diff --git a/pkg/cascadekit/index.go b/pkg/cascadekit/index.go new file mode 100644 index 00000000..bd9c040d --- /dev/null +++ b/pkg/cascadekit/index.go @@ -0,0 +1,63 @@ +package cascadekit + +import ( + "encoding/base64" + "encoding/json" + "strings" + + "github.com/LumeraProtocol/supernode/v2/pkg/errors" +) + +// SeparatorByte is the '.' separator used when composing payloads with counters. +const SeparatorByte byte = 46 + +// IndexFile represents the structure of the index file referenced on-chain. +// The JSON fields must match the existing format. +type IndexFile struct { + Version int `json:"version,omitempty"` + LayoutIDs []string `json:"layout_ids"` + LayoutSignature string `json:"layout_signature"` +} + +// BuildIndex creates an IndexFile from layout IDs and the layout signature. +func BuildIndex(layoutIDs []string, layoutSigB64 string) IndexFile { + return IndexFile{LayoutIDs: layoutIDs, LayoutSignature: layoutSigB64} +} + +// EncodeIndexB64 marshals an index file and returns both the raw JSON and base64. +func EncodeIndexB64(idx IndexFile) (b64 string, raw []byte, err error) { + raw, err = json.Marshal(idx) + if err != nil { + return "", nil, errors.Errorf("marshal index file: %w", err) + } + return base64.StdEncoding.EncodeToString(raw), raw, nil +} + +// DecodeIndexB64 decodes base64(JSON(IndexFile)). +func DecodeIndexB64(data string) (IndexFile, error) { + var indexFile IndexFile + decodedData, err := base64.StdEncoding.DecodeString(data) + if err != nil { + return indexFile, errors.Errorf("failed to decode index file: %w", err) + } + if err := json.Unmarshal(decodedData, &indexFile); err != nil { + return indexFile, errors.Errorf("failed to unmarshal index file: %w", err) + } + return indexFile, nil +} + +// ExtractIndexAndCreatorSig splits a signatures string formatted as: +// Base64(index_json).Base64(creator_signature) +func ExtractIndexAndCreatorSig(signatures string) (indexB64 string, creatorSigB64 string, err error) { + parts := strings.Split(signatures, ".") + if len(parts) < 2 { + return "", "", errors.New("invalid signatures format") + } + return parts[0], parts[1], nil +} + +// MakeSignatureFormat composes the final signatures string. +func MakeSignatureFormat(indexB64, creatorSigB64 string) string { + return indexB64 + "." + creatorSigB64 +} + diff --git a/pkg/cascadekit/index_parse.go b/pkg/cascadekit/index_parse.go new file mode 100644 index 00000000..9629398e --- /dev/null +++ b/pkg/cascadekit/index_parse.go @@ -0,0 +1,23 @@ +package cascadekit + +import ( + "bytes" + + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" +) + +// ParseCompressedIndexFile parses a compressed index file into an IndexFile. +// The compressed format is: base64(IndexJSON).creator_signature.counter +func ParseCompressedIndexFile(data []byte) (IndexFile, error) { + decompressed, err := utils.ZstdDecompress(data) + if err != nil { + return IndexFile{}, errors.Errorf("decompress index file: %w", err) + } + parts := bytes.Split(decompressed, []byte{SeparatorByte}) + if len(parts) < 2 { + return IndexFile{}, errors.New("invalid index file format") + } + return DecodeIndexB64(string(parts[0])) +} + diff --git a/pkg/cascadekit/metadata.go b/pkg/cascadekit/metadata.go new file mode 100644 index 00000000..79969280 --- /dev/null +++ b/pkg/cascadekit/metadata.go @@ -0,0 +1,18 @@ +package cascadekit + +import ( + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" +) + +// NewCascadeMetadata creates a types.CascadeMetadata for RequestAction. +// The keeper will populate rq_ids_max; rq_ids_ids is for FinalizeAction only. +func NewCascadeMetadata(dataHashB64, fileName string, rqIdsIc uint64, signatures string, public bool) actiontypes.CascadeMetadata { + return actiontypes.CascadeMetadata{ + DataHash: dataHashB64, + FileName: fileName, + RqIdsIc: rqIdsIc, + Signatures: signatures, + Public: public, + } +} + diff --git a/pkg/cascadekit/metadata_helpers.go b/pkg/cascadekit/metadata_helpers.go new file mode 100644 index 00000000..c98aaa83 --- /dev/null +++ b/pkg/cascadekit/metadata_helpers.go @@ -0,0 +1,27 @@ +package cascadekit + +import ( + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/golang/protobuf/proto" +) + +// UnmarshalCascadeMetadata decodes action metadata bytes into CascadeMetadata. +func UnmarshalCascadeMetadata(raw []byte) (actiontypes.CascadeMetadata, error) { + var meta actiontypes.CascadeMetadata + if err := proto.Unmarshal(raw, &meta); err != nil { + return meta, errors.Errorf("failed to unmarshal cascade metadata: %w", err) + } + return meta, nil +} + +// VerifyB64DataHash compares a raw hash with an expected base64 string. +func VerifyB64DataHash(raw []byte, expectedB64 string) error { + b64 := utils.B64Encode(raw) + if string(b64) != expectedB64 { + return errors.New("data hash doesn't match") + } + return nil +} + diff --git a/pkg/cascadekit/parsers.go b/pkg/cascadekit/parsers.go new file mode 100644 index 00000000..ed8e270b --- /dev/null +++ b/pkg/cascadekit/parsers.go @@ -0,0 +1,40 @@ +package cascadekit + +import ( + "bytes" + + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" + json "github.com/json-iterator/go" +) + +// ParseRQMetadataFile parses a compressed rq metadata file into layout, signature and counter. +// File format: base64(JSON(layout)).signature.counter (all parts separated by '.') +func ParseRQMetadataFile(data []byte) (layout codec.Layout, signature string, counter string, err error) { + decompressed, err := utils.ZstdDecompress(data) + if err != nil { + return layout, "", "", errors.Errorf("decompress rq metadata file: %w", err) + } + + // base64EncodeMetadata.Signature.Counter + parts := bytes.Split(decompressed, []byte{SeparatorByte}) + if len(parts) != 3 { + return layout, "", "", errors.New("invalid rq metadata format: expecting 3 parts (layout, signature, counter)") + } + + layoutJson, err := utils.B64Decode(parts[0]) + if err != nil { + return layout, "", "", errors.Errorf("base64 decode failed: %w", err) + } + + if err := json.Unmarshal(layoutJson, &layout); err != nil { + return layout, "", "", errors.Errorf("unmarshal layout: %w", err) + } + + signature = string(parts[1]) + counter = string(parts[2]) + + return layout, signature, counter, nil +} + diff --git a/pkg/cascadekit/rqid.go b/pkg/cascadekit/rqid.go new file mode 100644 index 00000000..97066b11 --- /dev/null +++ b/pkg/cascadekit/rqid.go @@ -0,0 +1,63 @@ +package cascadekit + +import ( + "context" + "encoding/json" + + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" +) + +// GenRQIdentifiersFilesResponse groups the generated files and their IDs. +type GenRQIdentifiersFilesResponse struct { + // IDs of the Redundant Metadata Files -- len(RQIDs) == len(RedundantMetadataFiles) + RQIDs []string + // RedundantMetadataFiles is a list of redundant files generated from the Metadata file + RedundantMetadataFiles [][]byte +} + +// GenerateLayoutFiles builds redundant metadata files from layout and signature. +// The content is: base64(JSON(layout)).layout_signature +func GenerateLayoutFiles(ctx context.Context, layout codec.Layout, layoutSigB64 string, ic uint32, max uint32) (GenRQIdentifiersFilesResponse, error) { + // Validate single-block to match package invariant + if len(layout.Blocks) != 1 { + return GenRQIdentifiersFilesResponse{}, errors.New("layout must contain exactly one block") + } + + metadataFile, err := jsonMarshal(layout) + if err != nil { + return GenRQIdentifiersFilesResponse{}, errors.Errorf("marshal layout: %w", err) + } + b64Encoded := utils.B64Encode(metadataFile) + + // Compose: base64(JSON(layout)).layout_signature + enc := make([]byte, 0, len(b64Encoded)+1+len(layoutSigB64)) + enc = append(enc, b64Encoded...) + enc = append(enc, SeparatorByte) + enc = append(enc, []byte(layoutSigB64)...) + + ids, files, err := getIDFiles(enc, ic, max) + if err != nil { + return GenRQIdentifiersFilesResponse{}, errors.Errorf("get ID Files: %w", err) + } + + return GenRQIdentifiersFilesResponse{ + RedundantMetadataFiles: files, + RQIDs: ids, + }, nil +} + +// GenerateIndexFiles generates index files and their IDs from the full signatures format. +func GenerateIndexFiles(ctx context.Context, signaturesFormat string, ic uint32, max uint32) (indexIDs []string, indexFiles [][]byte, err error) { + // Use the full signatures format that matches what was sent during RequestAction + // The chain expects this exact format for ID generation + indexIDs, indexFiles, err = getIDFiles([]byte(signaturesFormat), ic, max) + if err != nil { + return nil, nil, errors.Errorf("get index ID files: %w", err) + } + return indexIDs, indexFiles, nil +} + +// jsonMarshal marshals a value to JSON. +func jsonMarshal(v interface{}) ([]byte, error) { return json.Marshal(v) } diff --git a/pkg/cascadekit/signatures.go b/pkg/cascadekit/signatures.go new file mode 100644 index 00000000..6653c5bc --- /dev/null +++ b/pkg/cascadekit/signatures.go @@ -0,0 +1,66 @@ +package cascadekit + +import ( + "encoding/base64" + "encoding/json" + "fmt" + + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" +) + +// Signer is a function that signs the provided message and returns the raw signature bytes. +type Signer func(msg []byte) ([]byte, error) + +// SignLayoutB64 validates single-block layout, marshals to JSON, base64-encodes it, +// and signs the base64 payload, returning both the layout base64 and signature base64. +func SignLayoutB64(layout codec.Layout, signer Signer) (layoutB64 string, layoutSigB64 string, err error) { + if len(layout.Blocks) != 1 { + return "", "", errors.New("layout must contain exactly one block") + } + + me, err := json.Marshal(layout) + if err != nil { + return "", "", errors.Errorf("marshal layout: %w", err) + } + layoutB64 = base64.StdEncoding.EncodeToString(me) + + sig, err := signer([]byte(layoutB64)) + if err != nil { + return "", "", errors.Errorf("sign layout: %w", err) + } + layoutSigB64 = base64.StdEncoding.EncodeToString(sig) + return layoutB64, layoutSigB64, nil +} + +// CreateSignatures reproduces the cascade signature format and index IDs: +// Base64(index_json).Base64(creator_signature) +// It validates the layout has exactly one block. +func CreateSignatures(layout codec.Layout, signer Signer, ic, max uint32) (signatures string, indexIDs []string, err error) { + layoutB64, layoutSigB64, err := SignLayoutB64(layout, signer) + if err != nil { + return "", nil, err + } + + // Generate layout IDs (not returned; used to populate the index file) + layoutIDs := GenerateLayoutIDs(layoutB64, layoutSigB64, ic, max) + + // Build and sign the index file + idx := BuildIndex(layoutIDs, layoutSigB64) + indexB64, _, err := EncodeIndexB64(idx) + if err != nil { + return "", nil, err + } + + creatorSig, err := signer([]byte(indexB64)) + if err != nil { + return "", nil, errors.Errorf("sign index: %w", err) + } + creatorSigB64 := base64.StdEncoding.EncodeToString(creatorSig) + signatures = fmt.Sprintf("%s.%s", indexB64, creatorSigB64) + + // Generate the index IDs (these are the RQIDs sent to chain) + indexIDs = GenerateIndexIDs(signatures, ic, max) + return signatures, indexIDs, nil +} + diff --git a/pkg/cascadekit/verify.go b/pkg/cascadekit/verify.go new file mode 100644 index 00000000..4e7217be --- /dev/null +++ b/pkg/cascadekit/verify.go @@ -0,0 +1,23 @@ +package cascadekit + +import ( + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" +) + +// VerifySingleBlockIDs enforces single-block layouts and verifies that the +// symbols and block hash of ticket and local layouts match for block 0. +func VerifySingleBlockIDs(ticket, local codec.Layout) error { + if len(ticket.Blocks) != 1 || len(local.Blocks) != 1 { + return errors.New("layout must contain exactly one block") + } + if err := utils.EqualStrList(ticket.Blocks[0].Symbols, local.Blocks[0].Symbols); err != nil { + return errors.Errorf("symbol identifiers don't match: %w", err) + } + if ticket.Blocks[0].Hash != local.Blocks[0].Hash { + return errors.New("block hashes don't match") + } + return nil +} + diff --git a/sdk/README.md b/sdk/README.md index f8385eef..cea41654 100644 --- a/sdk/README.md +++ b/sdk/README.md @@ -2,8 +2,49 @@ The Lumera Supernode SDK is a comprehensive toolkit for interacting with the Lumera Protocol's supernode network to perform cascade operations +## Cascade End-to-End + +This walks through building Cascade metadata, submitting the on‑chain action, starting Cascade, and downloading the result using the SDK (sdk/action), low‑level helpers (pkg/cascadekit), and the Lumera client (pkg/lumera). + +1) Build metadata (+ price, expiration) +``` +meta, price, expiration, err := client.BuildCascadeMetadataFromFile(ctx, filePath, /*public=*/false) +if err != nil { /* handle */ } +``` +Under the hood: encodes file to a single‑block layout, signs layout/index (creator key), computes blake3(data), picks a random ic (1..100), derives max from chain params, computes price from file size + fee params, and expiration from chain duration (+1h buffer). + +2) Submit RequestAction (via pkg/lumera) +``` +b, _ := json.Marshal(meta) +resp, err := lumeraClient.ActionMsg().RequestAction(ctx, "CASCADE", string(b), price, expiration) +if err != nil { /* handle */ } +// Extract actionID from tx events or query later +``` + +3) Start Cascade +``` +sig, _ := client.GenerateStartCascadeSignatureFromFile(ctx, filePath) +taskID, err := client.StartCascade(ctx, filePath, actionID, sig) +``` + +4) Download Cascade +``` +// Public (meta.Public == true): empty signature +taskID, _ := client.DownloadCascade(ctx, actionID, outDir, "") + +// Private: sign only the actionID with the creator's key (helper shown) +dlSig, _ := client.GenerateDownloadSignature(ctx, actionID, creatorAddr) +taskID, _ = client.DownloadCascade(ctx, actionID, outDir, dlSig) +``` + +Notes +- Public downloads require no signature. +- The SDK derives ic/max/price/expiration internally; you don’t need to fetch params yourself. + ## Table of Contents +- [Cascade End-to-End](#cascade-end-to-end) + - [Configuration](#configuration) - [Client Initialization](#client-initialization) - [Action Client Methods](#action-client-methods) @@ -230,20 +271,11 @@ Note: If the action's cascade metadata sets `public: true`, the signature may be - `signature string`: Base64-encoded signature for download authorization (leave empty for public cascades) **Signature Creation for Download:** -The download signature is created by combining the action ID with the creator's address, signing it, and base64 encoding the result. +For private cascades, sign only the action ID with the creator's key and base64‑encode the result. ```go -// Create signature data: actionID.creatorAddress -signatureData := fmt.Sprintf("%s.%s", actionID, creatorAddress) - -// Sign the signature data -signedSignature, err := keyring.SignBytes(keyring, keyName, []byte(signatureData)) -if err != nil { - // Handle error -} - -// Base64 encode the signature -signature := base64.StdEncoding.EncodeToString(signedSignature) +sig, err := client.GenerateDownloadSignature(ctx, actionID, creatorAddress) +// Pass `sig` to DownloadCascade ``` **Returns:** diff --git a/sdk/action/client.go b/sdk/action/client.go index db5a932f..02f2bafc 100644 --- a/sdk/action/client.go +++ b/sdk/action/client.go @@ -2,7 +2,14 @@ package action import ( "context" + crand "crypto/rand" + "encoding/base64" "fmt" + "math/big" + "os" + "path/filepath" + "strconv" + "time" pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" @@ -12,6 +19,10 @@ import ( "github.com/LumeraProtocol/supernode/v2/sdk/net" "github.com/LumeraProtocol/supernode/v2/sdk/task" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + keyringpkg "github.com/LumeraProtocol/supernode/v2/pkg/keyring" "github.com/cosmos/cosmos-sdk/crypto/keyring" ) @@ -29,6 +40,16 @@ type Client interface { GetSupernodeStatus(ctx context.Context, supernodeAddress string) (*pb.StatusResponse, error) // DownloadCascade downloads cascade to outputDir, filename determined by action ID DownloadCascade(ctx context.Context, actionID, outputDir, signature string) (string, error) + // BuildCascadeMetadataFromFile encodes the file to produce a single-block layout, + // generates the cascade signatures, computes the blake3 data hash (base64), + // and returns CascadeMetadata (with signatures) along with price and expiration time. + // Internally derives ic (random in [1..100]), max (from chain params), price (GetActionFee), + // and expiration (params duration + 1h buffer). + BuildCascadeMetadataFromFile(ctx context.Context, filePath string, public bool) (actiontypes.CascadeMetadata, string, string, error) + // GenerateStartCascadeSignatureFromFile computes blake3(file) and signs it with the configured key; returns base64 signature. + GenerateStartCascadeSignatureFromFile(ctx context.Context, filePath string) (string, error) + // GenerateDownloadSignature signs the payload "actionID.creatorAddress"; returns base64 signature. + GenerateDownloadSignature(ctx context.Context, actionID, creatorAddr string) (string, error) } // ClientImpl implements the Client interface @@ -216,3 +237,120 @@ func (c *ClientImpl) DownloadCascade(ctx context.Context, actionID, outputDir, s return taskID, nil } + +// BuildCascadeMetadataFromFile produces Cascade metadata (including signatures) from a local file path. +// It uses a temporary RaptorQ workspace, enforces single-block layout via the codec, and cleans up after. +// BuildCascadeMetadataFromFile builds Cascade metadata, price and expiration from a file path. +func (c *ClientImpl) BuildCascadeMetadataFromFile(ctx context.Context, filePath string, public bool) (actiontypes.CascadeMetadata, string, string, error) { + if filePath == "" { + return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("file path is empty") + } + fi, err := os.Stat(filePath) + if err != nil { + return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("stat file: %w", err) + } + data, err := os.ReadFile(filePath) + if err != nil { + return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("read file: %w", err) + } + + // Create temp workspace for codec symbols; remove after + baseDir, err := os.MkdirTemp("", "rq_files_*") + if err != nil { + return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("create temp dir: %w", err) + } + defer os.RemoveAll(baseDir) + + rq := codec.NewRaptorQCodec(baseDir) + // Use a simple task ID with epoch to avoid collisions + taskID := fmt.Sprintf("sdk-%d", time.Now().UnixNano()) + enc, err := rq.Encode(ctx, codec.EncodeRequest{TaskID: taskID, Path: filePath, DataSize: int(fi.Size())}) + if err != nil { + return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("raptorq encode: %w", err) + } + + // Derive `max` from chain params, then create signatures and index IDs + paramsResp, err := c.lumeraClient.GetActionParams(ctx) + if err != nil { + return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("get action params: %w", err) + } + // Use MaxDdAndFingerprints as the count for rq_ids generation (chain maps this to rq_ids_max for Cascade) + var max uint32 + if paramsResp != nil && paramsResp.Params.MaxDdAndFingerprints > 0 { + max = uint32(paramsResp.Params.MaxDdAndFingerprints) + } else { + // Fallback to a sane default if params missing + max = 50 + } + // Pick a random initial counter in [1,100] + rnd, _ := crand.Int(crand.Reader, big.NewInt(100)) + ic := uint32(rnd.Int64() + 1) // 1..100 + signatures, _, err := cascadekit.CreateSignaturesWithKeyring(enc.Metadata, c.keyring, c.config.Account.KeyName, ic, max) + if err != nil { + return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("create signatures: %w", err) + } + + // Compute data hash (blake3) as base64 + dataHashB64, err := cascadekit.ComputeBlake3DataHashB64(data) + if err != nil { + return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("hash data: %w", err) + } + + // Derive file name from path + fileName := filepath.Base(filePath) + + // Build metadata proto + meta := cascadekit.NewCascadeMetadata(dataHashB64, fileName, uint64(ic), signatures, public) + + // Fetch params (already fetched) to get denom and expiration duration + denom := paramsResp.Params.BaseActionFee.Denom + exp := paramsResp.Params.ExpirationDuration + + // Compute data size in KB for fee + kb := int(fi.Size()) / 1024 + feeResp, err := c.lumeraClient.GetActionFee(ctx, strconv.Itoa(kb)) + if err != nil { + return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("get action fee: %w", err) + } + price := feeResp.Amount + denom + + // Expiration: now + chain duration + 1h buffer (to avoid off-by-margin rejections) + expirationUnix := time.Now().Add(exp).Add(1 * time.Hour).Unix() + expirationTime := fmt.Sprintf("%d", expirationUnix) + + return meta, price, expirationTime, nil +} + +// GenerateStartCascadeSignatureFromFile computes blake3(file) and signs it with the configured key. +// Returns base64-encoded signature suitable for StartCascade. +func (c *ClientImpl) GenerateStartCascadeSignatureFromFile(ctx context.Context, filePath string) (string, error) { + data, err := os.ReadFile(filePath) + if err != nil { + return "", fmt.Errorf("read file: %w", err) + } + hash, err := cascadekit.ComputeBlake3Hash(data) + if err != nil { + return "", fmt.Errorf("blake3: %w", err) + } + sig, err := keyringpkg.SignBytes(c.keyring, c.config.Account.KeyName, hash) + if err != nil { + return "", fmt.Errorf("sign hash: %w", err) + } + return base64.StdEncoding.EncodeToString(sig), nil +} + +// GenerateDownloadSignature signs the payload "actionID" and returns base64 signature. +func (c *ClientImpl) GenerateDownloadSignature(ctx context.Context, actionID, creatorAddr string) (string, error) { + if actionID == "" { + return "", fmt.Errorf("actionID is empty") + } + if creatorAddr == "" { + return "", fmt.Errorf("creator address is empty") + } + // Sign only the actionID; creatorAddr is provided but not included in payload. + sig, err := keyringpkg.SignBytes(c.keyring, c.config.Account.KeyName, []byte(actionID)) + if err != nil { + return "", fmt.Errorf("sign download payload: %w", err) + } + return base64.StdEncoding.EncodeToString(sig), nil +} diff --git a/sdk/adapters/lumera/adapter.go b/sdk/adapters/lumera/adapter.go index 1c20acdd..bacf8cd2 100644 --- a/sdk/adapters/lumera/adapter.go +++ b/sdk/adapters/lumera/adapter.go @@ -26,8 +26,12 @@ type Client interface { GetSupernodeWithLatestAddress(ctx context.Context, address string) (*SuperNodeInfo, error) DecodeCascadeMetadata(ctx context.Context, action Action) (actiontypes.CascadeMetadata, error) VerifySignature(ctx context.Context, accountAddr string, data []byte, signature []byte) error - // GetBalance returns the bank balance for the given address and denom. - GetBalance(ctx context.Context, address string, denom string) (*banktypes.QueryBalanceResponse, error) + // GetBalance returns the bank balance for the given address and denom. + GetBalance(ctx context.Context, address string, denom string) (*banktypes.QueryBalanceResponse, error) + // GetActionParams returns the action module parameters. + GetActionParams(ctx context.Context) (*actiontypes.QueryParamsResponse, error) + // GetActionFee returns the fee amount for a given data size (in KB) for RequestAction. + GetActionFee(ctx context.Context, dataSizeKB string) (*actiontypes.QueryGetActionFeeResponse, error) } // SuperNodeInfo contains supernode information with latest address @@ -216,6 +220,26 @@ func (a *Adapter) VerifySignature(ctx context.Context, accountAddr string, data, return nil } +// RequestAction intentionally not exposed via this adapter; use pkg/lumera directly if needed. + +// GetActionParams fetches the action module parameters via the underlying lumera client. +func (a *Adapter) GetActionParams(ctx context.Context) (*actiontypes.QueryParamsResponse, error) { + resp, err := a.client.Action().GetParams(ctx) + if err != nil { + return nil, fmt.Errorf("get action params: %w", err) + } + return resp, nil +} + +// GetActionFee fetches the action fee for a given data size (in KB). +func (a *Adapter) GetActionFee(ctx context.Context, dataSizeKB string) (*actiontypes.QueryGetActionFeeResponse, error) { + resp, err := a.client.Action().GetActionFee(ctx, dataSizeKB) + if err != nil { + return nil, fmt.Errorf("get action fee: %w", err) + } + return resp, nil +} + // GetBalance fetches the balance for a given address and denom via the underlying lumera client. func (a *Adapter) GetBalance(ctx context.Context, address string, denom string) (*banktypes.QueryBalanceResponse, error) { a.logger.Debug(ctx, "Querying bank balance", "address", address, "denom", denom) diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index 8c754e47..3d04f7a0 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -92,8 +92,8 @@ The supernode will connect to the Lumera network and begin participating in the logtrace.Fatal(ctx, "Failed to initialize RaptorQ store", logtrace.Fields{"error": err.Error()}) } - // Initialize P2P service - p2pService, err := initP2PService(ctx, appConfig, lumeraClient, kr, rqStore, nil, nil) + // Initialize P2P service + p2pService, err := initP2PService(ctx, appConfig, lumeraClient, kr, rqStore, nil, nil) if err != nil { logtrace.Fatal(ctx, "Failed to initialize P2P service", logtrace.Fields{"error": err.Error()}) } @@ -105,18 +105,18 @@ The supernode will connect to the Lumera network and begin participating in the } // Configure cascade service - cService := cascadeService.NewCascadeService( - &cascadeService.Config{ - Config: common.Config{ - SupernodeAccountAddress: appConfig.SupernodeConfig.Identity, - }, - RqFilesDir: appConfig.GetRaptorQFilesDir(), - }, - lumeraClient, - *p2pService, - codec.NewRaptorQCodec(appConfig.GetRaptorQFilesDir()), - rqStore, - ) + cService := cascadeService.NewCascadeService( + &cascadeService.Config{ + Config: common.Config{ + SupernodeAccountAddress: appConfig.SupernodeConfig.Identity, + }, + RqFilesDir: appConfig.GetRaptorQFilesDir(), + }, + lumeraClient, + *p2pService, + codec.NewRaptorQCodec(appConfig.GetRaptorQFilesDir()), + rqStore, + ) // Create cascade action server cascadeActionServer := cascade.NewCascadeActionServer(cService) @@ -153,7 +153,7 @@ The supernode will connect to the Lumera network and begin participating in the // Start profiling server on testnet only isTestnet := strings.Contains(strings.ToLower(appConfig.LumeraClientConfig.ChainID), "testnet") - if isTestnet { + if isTestnet && os.Getenv("INTEGRATION_TEST") != "true" { profilingAddr := "0.0.0.0:8082" logtrace.Debug(ctx, "Starting profiling server", logtrace.Fields{ @@ -214,7 +214,7 @@ func initP2PService(ctx context.Context, config *config.Config, lumeraClient lum logtrace.Debug(ctx, "Initializing P2P service", logtrace.Fields{"address": p2pConfig.ListenAddress, "port": p2pConfig.Port, "data_dir": p2pConfig.DataDir, "supernode_id": address.String()}) - p2pService, err := p2p.New(ctx, p2pConfig, lumeraClient, kr, rqStore, cloud, mst) + p2pService, err := p2p.New(ctx, p2pConfig, lumeraClient, kr, rqStore, cloud, mst) if err != nil { return nil, fmt.Errorf("failed to initialize p2p service: %w", err) } diff --git a/supernode/services/cascade/download.go b/supernode/services/cascade/download.go index 6ad40aab..0c5c9ed7 100644 --- a/supernode/services/cascade/download.go +++ b/supernode/services/cascade/download.go @@ -1,15 +1,15 @@ package cascade import ( - "bytes" - "context" - "encoding/json" - "fmt" - "os" - "sort" - "time" + "context" + "encoding/json" + "fmt" + "os" + "sort" + "time" actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" "github.com/LumeraProtocol/supernode/v2/pkg/codec" "github.com/LumeraProtocol/supernode/v2/pkg/crypto" "github.com/LumeraProtocol/supernode/v2/pkg/errors" @@ -82,7 +82,7 @@ func (task *CascadeRegistrationTask) Download( } logtrace.Info(ctx, "download: action state ok", fields) - metadata, err := task.decodeCascadeMetadata(ctx, actionDetails.GetAction().Metadata, fields) +metadata, err := cascadekit.UnmarshalCascadeMetadata(actionDetails.GetAction().Metadata) if err != nil { fields[logtrace.FieldError] = err.Error() return task.wrapErr(ctx, "error decoding cascade metadata", err, fields) @@ -150,8 +150,8 @@ func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, acti } logtrace.Debug(ctx, "Retrieve index file completed", logtrace.Fields{"index_id": indexID, "bytes": len(indexFile), "ms": time.Since(iStart).Milliseconds()}) - // Parse index file to get layout IDs - indexData, err := task.parseIndexFile(indexFile) + // Parse index file to get layout IDs + indexData, err := cascadekit.ParseCompressedIndexFile(indexFile) if err != nil { logtrace.Warn(ctx, "failed to parse index file", logtrace.Fields{"index_id": indexID, logtrace.FieldError: err.Error()}) continue @@ -260,8 +260,8 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( // Emit minimal JSON payload (metrics system removed) minPayload := map[string]any{ "retrieve": map[string]any{ - "retrieve_ms": retrieveMS, - "decode_ms": decodeMS, + "retrieve_ms": retrieveMS, + "decode_ms": decodeMS, "target_required_percent": targetRequiredPercent, "target_required_count": targetRequiredCount, "total_symbols": totalSymbols, @@ -283,13 +283,15 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( return "", "", errors.New("file hash is nil") } - err = task.verifyDataHash(ctx, fileHash, dataHash, fields) - if err != nil { - logtrace.Error(ctx, "failed to verify hash", fields) - fields[logtrace.FieldError] = err.Error() - return "", decodeInfo.DecodeTmpDir, err - } - // Log the state of the temporary decode directory + err = cascadekit.VerifyB64DataHash(fileHash, dataHash) + if err != nil { + logtrace.Error(ctx, "failed to verify hash", fields) + fields[logtrace.FieldError] = err.Error() + return "", decodeInfo.DecodeTmpDir, err + } + // Preserve original debug log for successful hash match + logtrace.Debug(ctx, "request data-hash has been matched with the action data-hash", fields) + // Log the state of the temporary decode directory if decodeInfo.DecodeTmpDir != "" { if set, derr := utils.ReadDirFilenames(decodeInfo.DecodeTmpDir); derr == nil { if left := len(set); left > 0 { @@ -314,24 +316,10 @@ func (task *CascadeRegistrationTask) streamDownloadEvent(eventType SupernodeEven } // parseIndexFile parses compressed index file to extract IndexFile structure -func (task *CascadeRegistrationTask) parseIndexFile(data []byte) (IndexFile, error) { - decompressed, err := utils.ZstdDecompress(data) - if err != nil { - return IndexFile{}, errors.Errorf("decompress index file: %w", err) - } - - // Parse decompressed data: base64IndexFile.signature.counter - parts := bytes.Split(decompressed, []byte{SeparatorByte}) - if len(parts) < 2 { - return IndexFile{}, errors.New("invalid index file format") - } - - // Decode the base64 index file - return decodeIndexFile(string(parts[0])) -} +// parseIndexFile moved to cascadekit.ParseCompressedIndexFile // retrieveLayoutFromIndex retrieves layout file using layout IDs from index file -func (task *CascadeRegistrationTask) retrieveLayoutFromIndex(ctx context.Context, indexData IndexFile, fields logtrace.Fields) (codec.Layout, int64, int64, int, error) { +func (task *CascadeRegistrationTask) retrieveLayoutFromIndex(ctx context.Context, indexData cascadekit.IndexFile, fields logtrace.Fields) (codec.Layout, int64, int64, int, error) { // Try to retrieve layout files using layout IDs from index file var ( totalFetchMS int64 @@ -351,7 +339,7 @@ func (task *CascadeRegistrationTask) retrieveLayoutFromIndex(ctx context.Context } t1 := time.Now() - layout, _, _, err := parseRQMetadataFile(layoutFile) + layout, _, _, err := cascadekit.ParseRQMetadataFile(layoutFile) decMS := time.Since(t1).Milliseconds() totalDecodeMS += decMS if err != nil { diff --git a/supernode/services/cascade/helper.go b/supernode/services/cascade/helper.go index 99d3985a..5a36b644 100644 --- a/supernode/services/cascade/helper.go +++ b/supernode/services/cascade/helper.go @@ -1,27 +1,25 @@ package cascade import ( - "bytes" - "context" - "encoding/base64" - "fmt" - "strconv" - "strings" - - "cosmossdk.io/math" - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/golang/protobuf/proto" - json "github.com/json-iterator/go" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" + "context" + "encoding/base64" + "fmt" + "strconv" + + "cosmossdk.io/math" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" + "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" + + sdk "github.com/cosmos/cosmos-sdk/types" + json "github.com/json-iterator/go" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) // layout stats helpers removed to keep download metrics minimal. @@ -64,23 +62,8 @@ func (task *CascadeRegistrationTask) ensureIsTopSupernode(ctx context.Context, b return nil } -func (task *CascadeRegistrationTask) decodeCascadeMetadata(ctx context.Context, raw []byte, f logtrace.Fields) (actiontypes.CascadeMetadata, error) { - var meta actiontypes.CascadeMetadata - if err := proto.Unmarshal(raw, &meta); err != nil { - return meta, task.wrapErr(ctx, "failed to unmarshal cascade metadata", err, f) - } - return meta, nil -} - -func (task *CascadeRegistrationTask) verifyDataHash(ctx context.Context, dh []byte, expected string, f logtrace.Fields) error { - b64 := utils.B64Encode(dh) - if string(b64) != expected { - return task.wrapErr(ctx, "data hash doesn't match", errors.New(""), f) - } - logtrace.Debug(ctx, "request data-hash has been matched with the action data-hash", f) - - return nil -} +// decodeCascadeMetadata moved to cascadekit.UnmarshalCascadeMetadata +// verifyDataHash moved to cascadekit.VerifyB64DataHash func (task *CascadeRegistrationTask) encodeInput(ctx context.Context, actionID string, path string, dataSize int, f logtrace.Fields) (*adaptors.EncodeResult, error) { resp, err := task.RQ.EncodeInput(ctx, actionID, path, dataSize) @@ -91,11 +74,11 @@ func (task *CascadeRegistrationTask) encodeInput(ctx context.Context, actionID s } func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context.Context, encoded string, creator string, - encodedMeta codec.Layout, f logtrace.Fields) (codec.Layout, string, error) { + encodedMeta codec.Layout, f logtrace.Fields) (codec.Layout, string, error) { - // Extract index file and creator signature from encoded data - // The signatures field contains: Base64(index_file).creators_signature - indexFileB64, creatorSig, err := extractIndexFileAndSignature(encoded) + // Extract index file and creator signature from encoded data + // The signatures field contains: Base64(index_file).creators_signature + indexFileB64, creatorSig, err := cascadekit.ExtractIndexAndCreatorSig(encoded) if err != nil { return codec.Layout{}, "", task.wrapErr(ctx, "failed to extract index file and creator signature", err, f) } @@ -111,8 +94,8 @@ func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context. } logtrace.Debug(ctx, "creator signature successfully verified", f) - // Decode index file to get the layout signature - indexFile, err := decodeIndexFile(indexFileB64) + // Decode index file to get the layout signature + indexFile, err := cascadekit.DecodeIndexB64(indexFileB64) if err != nil { return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode index file", err, f) } @@ -133,42 +116,36 @@ func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context. } logtrace.Debug(ctx, "layout signature successfully verified", f) - return encodedMeta, indexFile.LayoutSignature, nil + return encodedMeta, indexFile.LayoutSignature, nil } func (task *CascadeRegistrationTask) generateRQIDFiles(ctx context.Context, meta actiontypes.CascadeMetadata, - sig, creator string, encodedMeta codec.Layout, f logtrace.Fields) (GenRQIdentifiersFilesResponse, error) { - // The signatures field contains: Base64(index_file).creators_signature - // This full format will be used for ID generation to match chain expectations - - // Generate layout files - layoutRes, err := GenRQIdentifiersFiles(ctx, GenRQIdentifiersFilesRequest{ - Metadata: encodedMeta, - CreatorSNAddress: creator, - RqMax: uint32(meta.RqIdsMax), - Signature: sig, - IC: uint32(meta.RqIdsIc), - }) - if err != nil { - return GenRQIdentifiersFilesResponse{}, - task.wrapErr(ctx, "failed to generate layout files", err, f) - } - - // Generate index files using full signatures format for ID generation (matches chain expectation) - indexIDs, indexFiles, err := GenIndexFiles(ctx, layoutRes.RedundantMetadataFiles, sig, meta.Signatures, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) - if err != nil { - return GenRQIdentifiersFilesResponse{}, - task.wrapErr(ctx, "failed to generate index files", err, f) - } - - // Store layout files and index files separately in P2P - allFiles := append(layoutRes.RedundantMetadataFiles, indexFiles...) - - // Return index IDs (sent to chain) and all files (stored in P2P) - return GenRQIdentifiersFilesResponse{ - RQIDs: indexIDs, - RedundantMetadataFiles: allFiles, - }, nil + sig, creator string, encodedMeta codec.Layout, f logtrace.Fields) (cascadekit.GenRQIdentifiersFilesResponse, error) { + // The signatures field contains: Base64(index_file).creators_signature + // This full format will be used for ID generation to match chain expectations + + // Generate layout files (redundant metadata files) + layoutRes, err := cascadekit.GenerateLayoutFiles(ctx, encodedMeta, sig, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) + if err != nil { + return cascadekit.GenRQIdentifiersFilesResponse{}, + task.wrapErr(ctx, "failed to generate layout files", err, f) + } + + // Generate index files using full signatures format for ID generation (matches chain expectation) + indexIDs, indexFiles, err := cascadekit.GenerateIndexFiles(ctx, meta.Signatures, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) + if err != nil { + return cascadekit.GenRQIdentifiersFilesResponse{}, + task.wrapErr(ctx, "failed to generate index files", err, f) + } + + // Store layout files and index files separately in P2P + allFiles := append(layoutRes.RedundantMetadataFiles, indexFiles...) + + // Return index IDs (sent to chain) and all files (stored in P2P) + return cascadekit.GenRQIdentifiersFilesResponse{ + RQIDs: indexIDs, + RedundantMetadataFiles: allFiles, + }, nil } // storeArtefacts persists cascade artefacts (ID files + RaptorQ symbols) via the @@ -233,46 +210,9 @@ func (task *CascadeRegistrationTask) emitArtefactsStored( task.streamEvent(SupernodeEventTypeArtefactsStored, msg, "", send) } -// extractSignatureAndFirstPart extracts the signature and first part from the encoded data -// data is expected to be in format: b64(JSON(Layout)).Signature -func extractSignatureAndFirstPart(data string) (encodedMetadata string, signature string, err error) { - parts := strings.Split(data, ".") - if len(parts) < 2 { - return "", "", errors.New("invalid data format") - } - - // The first part is the base64 encoded data - return parts[0], parts[1], nil -} - -func decodeMetadataFile(data string) (layout codec.Layout, err error) { - // Decode the base64 encoded data - decodedData, err := utils.B64Decode([]byte(data)) - if err != nil { - return layout, errors.Errorf("failed to decode data: %w", err) - } - - // Unmarshal the decoded data into a layout - if err := json.Unmarshal(decodedData, &layout); err != nil { - return layout, errors.Errorf("failed to unmarshal data: %w", err) - } +// Removed legacy helpers; functionality is centralized in cascadekit. - return layout, nil -} - -func verifyIDs(ticketMetadata, metadata codec.Layout) error { - // Verify that the symbol identifiers match between versions - if err := utils.EqualStrList(ticketMetadata.Blocks[0].Symbols, metadata.Blocks[0].Symbols); err != nil { - return errors.Errorf("symbol identifiers don't match: %w", err) - } - - // Verify that the block hashes match - if ticketMetadata.Blocks[0].Hash != metadata.Blocks[0].Hash { - return errors.New("block hashes don't match") - } - - return nil -} +// // verifyActionFee checks if the action fee is sufficient for the given data size // It fetches action parameters, calculates the required fee, and compares it with the action price @@ -310,55 +250,11 @@ func (task *CascadeRegistrationTask) verifyActionFee(ctx context.Context, action return nil } -func parseRQMetadataFile(data []byte) (layout codec.Layout, signature string, counter string, err error) { - decompressed, err := utils.ZstdDecompress(data) - if err != nil { - return layout, "", "", errors.Errorf("decompress rq metadata file: %w", err) - } - - // base64EncodeMetadata.Signature.Counter - parts := bytes.Split(decompressed, []byte{SeparatorByte}) - if len(parts) != 3 { - return layout, "", "", errors.New("invalid rq metadata format: expecting 3 parts (layout, signature, counter)") - } - - layoutJson, err := utils.B64Decode(parts[0]) - if err != nil { - return layout, "", "", errors.Errorf("base64 decode failed: %w", err) - } - - if err := json.Unmarshal(layoutJson, &layout); err != nil { - return layout, "", "", errors.Errorf("unmarshal layout: %w", err) - } - - signature = string(parts[1]) - counter = string(parts[2]) - - return layout, signature, counter, nil -} +// -// extractIndexFileAndSignature extracts index file and creator signature from signatures field -// data is expected to be in format: Base64(index_file).creators_signature -func extractIndexFileAndSignature(data string) (indexFileB64 string, creatorSignature string, err error) { - parts := strings.Split(data, ".") - if len(parts) < 2 { - return "", "", errors.New("invalid signatures format") - } - return parts[0], parts[1], nil -} +// -// decodeIndexFile decodes base64 encoded index file -func decodeIndexFile(data string) (IndexFile, error) { - var indexFile IndexFile - decodedData, err := utils.B64Decode([]byte(data)) - if err != nil { - return indexFile, errors.Errorf("failed to decode index file: %w", err) - } - if err := json.Unmarshal(decodedData, &indexFile); err != nil { - return indexFile, errors.Errorf("failed to unmarshal index file: %w", err) - } - return indexFile, nil -} +// // VerifyDownloadSignature verifies the download signature for actionID.creatorAddress func (task *CascadeRegistrationTask) VerifyDownloadSignature(ctx context.Context, actionID, signature string) error { @@ -376,9 +272,9 @@ func (task *CascadeRegistrationTask) VerifyDownloadSignature(ctx context.Context creatorAddress := actionDetails.GetAction().Creator fields["creator_address"] = creatorAddress - // Create the expected signature data: actionID.creatorAddress - signatureData := fmt.Sprintf("%s.%s", actionID, creatorAddress) - fields["signature_data"] = signatureData + // Create the expected signature data: actionID (creator address not included in payload) + signatureData := fmt.Sprintf("%s", actionID) + fields["signature_data"] = signatureData // Decode the base64 signature signatureBytes, err := base64.StdEncoding.DecodeString(signature) diff --git a/supernode/services/cascade/helper_test.go b/supernode/services/cascade/helper_test.go deleted file mode 100644 index b22f5436..00000000 --- a/supernode/services/cascade/helper_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package cascade - -import ( - "encoding/json" - "testing" - - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/stretchr/testify/assert" -) - -func Test_extractSignatureAndFirstPart(t *testing.T) { - tests := []struct { - name string - input string - expected string - sig string - hasErr bool - }{ - {"valid format", "data.sig", "data", "sig", false}, - {"no dot", "nodelimiter", "", "", true}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - data, sig, err := extractSignatureAndFirstPart(tt.input) - if tt.hasErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, tt.expected, data) - assert.Equal(t, tt.sig, sig) - } - }) - } -} - -func Test_decodeMetadataFile(t *testing.T) { - layout := codec.Layout{ - Blocks: []codec.Block{{BlockID: 1, Hash: "abc", Symbols: []string{"s"}}}, - } - jsonBytes, _ := json.Marshal(layout) - encoded := utils.B64Encode(jsonBytes) - - tests := []struct { - name string - input string - expectErr bool - wantHash string - }{ - {"valid base64+json", string(encoded), false, "abc"}, - {"invalid base64", "!@#$%", true, ""}, - {"bad json", string(utils.B64Encode([]byte("{broken"))), true, ""}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - out, err := decodeMetadataFile(tt.input) - if tt.expectErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, tt.wantHash, out.Blocks[0].Hash) - } - }) - } -} - -func Test_verifyIDs(t *testing.T) { - tests := []struct { - name string - ticket codec.Layout - metadata codec.Layout - expectErr string - }{ - { - name: "success match", - ticket: codec.Layout{Blocks: []codec.Block{ - {Symbols: []string{"A"}, Hash: "abc"}, - }}, - metadata: codec.Layout{Blocks: []codec.Block{ - {Symbols: []string{"A"}, Hash: "abc"}, - }}, - }, - { - name: "symbol mismatch", - ticket: codec.Layout{Blocks: []codec.Block{ - {Symbols: []string{"A"}}, - }}, - metadata: codec.Layout{Blocks: []codec.Block{ - {Symbols: []string{"B"}}, - }}, - expectErr: "symbol identifiers don't match", - }, - { - name: "hash mismatch", - ticket: codec.Layout{Blocks: []codec.Block{ - {Symbols: []string{"A"}, Hash: "a"}, - }}, - metadata: codec.Layout{Blocks: []codec.Block{ - {Symbols: []string{"A"}, Hash: "b"}, - }}, - expectErr: "block hashes don't match", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := verifyIDs(tt.ticket, tt.metadata) - if tt.expectErr != "" { - assert.ErrorContains(t, err, tt.expectErr) - } else { - assert.NoError(t, err) - } - }) - } -} diff --git a/supernode/services/cascade/metadata.go b/supernode/services/cascade/metadata.go deleted file mode 100644 index 5ae67c07..00000000 --- a/supernode/services/cascade/metadata.go +++ /dev/null @@ -1,127 +0,0 @@ -package cascade - -import ( - "context" - - "bytes" - - "strconv" - - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/cosmos/btcutil/base58" - json "github.com/json-iterator/go" -) - -const ( - SeparatorByte byte = 46 // separator in dd_and_fingerprints.signature i.e. '.' -) - -// IndexFile represents the structure of the index file -type IndexFile struct { - Version int `json:"version"` - LayoutIDs []string `json:"layout_ids"` - LayoutSignature string `json:"layout_signature"` -} - -type GenRQIdentifiersFilesRequest struct { - Metadata codec.Layout - RqMax uint32 - CreatorSNAddress string - Signature string - IC uint32 -} - -type GenRQIdentifiersFilesResponse struct { - // IDs of the Redundant Metadata Files -- len(RQIDs) == len(RedundantMetadataFiles) - RQIDs []string - // RedundantMetadataFiles is a list of redundant files that are generated from the Metadata file - RedundantMetadataFiles [][]byte -} - -// GenRQIdentifiersFiles generates Redundant Metadata Files and IDs -func GenRQIdentifiersFiles(ctx context.Context, req GenRQIdentifiersFilesRequest) (resp GenRQIdentifiersFilesResponse, err error) { - metadataFile, err := json.Marshal(req.Metadata) - if err != nil { - return resp, errors.Errorf("marshal rqID file: %w", err) - } - b64EncodedMetadataFile := utils.B64Encode(metadataFile) - - // Create the RQID file by combining the encoded file with the signature - var buffer bytes.Buffer - buffer.Write(b64EncodedMetadataFile) - buffer.WriteByte(SeparatorByte) - buffer.Write([]byte(req.Signature)) - encMetadataFileWithSignature := buffer.Bytes() - - // Generate the specified number of variant IDs - rqIdIds, rqIDsFiles, err := GetIDFiles(ctx, encMetadataFileWithSignature, req.IC, req.RqMax) - if err != nil { - return resp, errors.Errorf("get ID Files: %w", err) - } - - return GenRQIdentifiersFilesResponse{ - RedundantMetadataFiles: rqIDsFiles, - RQIDs: rqIdIds, - }, nil -} - -// GetIDFiles generates Redundant Files for dd_and_fingerprints files and rq_id files -// encMetadataFileWithSignature is b64 encoded layout file appended with signatures and compressed, ic is the initial counter -// and max is the number of ids to generate -func GetIDFiles(ctx context.Context, encMetadataFileWithSignature []byte, ic uint32, max uint32) (ids []string, files [][]byte, err error) { - idFiles := make([][]byte, 0, max) - ids = make([]string, 0, max) - var buffer bytes.Buffer - - for i := uint32(0); i < max; i++ { - buffer.Reset() - counter := ic + i - - buffer.Write(encMetadataFileWithSignature) - buffer.WriteByte(SeparatorByte) - buffer.WriteString(strconv.Itoa(int(counter))) // Using the string representation to maintain backward compatibility - - compressedData, err := utils.ZstdCompress(buffer.Bytes()) - if err != nil { - return ids, idFiles, errors.Errorf("compress identifiers file: %w", err) - } - - idFiles = append(idFiles, compressedData) - - hash, err := utils.Blake3Hash(compressedData) - if err != nil { - return ids, idFiles, errors.Errorf("sha3-256-hash error getting an id file: %w", err) - } - - ids = append(ids, base58.Encode(hash)) - } - - return ids, idFiles, nil -} - -// GenIndexFiles generates index files and their IDs from layout files using full signatures format -func GenIndexFiles(ctx context.Context, layoutFiles [][]byte, layoutSignature string, signaturesFormat string, ic uint32, max uint32) (indexIDs []string, indexFiles [][]byte, err error) { - // Create layout IDs from layout files - layoutIDs := make([]string, len(layoutFiles)) - for i, layoutFile := range layoutFiles { - hash, err := utils.Blake3Hash(layoutFile) - if err != nil { - return nil, nil, errors.Errorf("hash layout file: %w", err) - } - layoutIDs[i] = base58.Encode(hash) - } - - // Use the full signatures format that matches what was sent during RequestAction - // The chain expects this exact format for ID generation - indexFileWithSignatures := []byte(signaturesFormat) - - // Generate index file IDs using full signatures format - indexIDs, indexFiles, err = GetIDFiles(ctx, indexFileWithSignatures, ic, max) - if err != nil { - return nil, nil, errors.Errorf("get index ID files: %w", err) - } - - return indexIDs, indexFiles, nil -} diff --git a/supernode/services/cascade/metadata_test.go b/supernode/services/cascade/metadata_test.go deleted file mode 100644 index 48110d61..00000000 --- a/supernode/services/cascade/metadata_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package cascade - -import ( - "context" - "encoding/json" - "fmt" - "testing" - - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/cosmos/btcutil/base58" - "github.com/stretchr/testify/assert" -) - -func TestGenRQIdentifiersFiles(t *testing.T) { - tests := []struct { - name string - req GenRQIdentifiersFilesRequest - expectedCount int - }{ - { - name: "basic valid request", - req: GenRQIdentifiersFilesRequest{ - Metadata: codec.Layout{ - Blocks: []codec.Block{ - { - BlockID: 1, - EncoderParameters: []int{1, 2}, - OriginalOffset: 0, - Size: 10, - Symbols: []string{"s1", "s2"}, - Hash: "abcd1234", - }, - }, - }, - Signature: "sig", - RqMax: 2, - IC: 1, - }, - expectedCount: 2, - }, - { - name: "different IC value", - req: GenRQIdentifiersFilesRequest{ - Metadata: codec.Layout{ - Blocks: []codec.Block{ - { - BlockID: 5, - EncoderParameters: []int{9}, - OriginalOffset: 99, - Size: 42, - Symbols: []string{"x"}, - Hash: "z", - }, - }, - }, - Signature: "mysig", - RqMax: 1, - IC: 5, - }, - expectedCount: 1, - }, - } - - ctx := context.Background() - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - resp, err := GenRQIdentifiersFiles(ctx, tt.req) - assert.NoError(t, err) - assert.Len(t, resp.RQIDs, tt.expectedCount) - assert.Len(t, resp.RedundantMetadataFiles, tt.expectedCount) - - // independently compute expected response - metadataBytes, err := json.Marshal(tt.req.Metadata) - assert.NoError(t, err) - - base64Meta := utils.B64Encode(metadataBytes) - - for i := 0; i < tt.expectedCount; i++ { - composite := append(base64Meta, []byte(fmt.Sprintf(".%s.%d", tt.req.Signature, tt.req.IC+uint32(i)))...) - compressed, err := utils.ZstdCompress(composite) - assert.NoError(t, err) - - hash, err := utils.Blake3Hash(compressed) - assert.NoError(t, err) - - expectedRQID := base58.Encode(hash) - - assert.Equal(t, expectedRQID, resp.RQIDs[i]) - assert.Equal(t, compressed, resp.RedundantMetadataFiles[i]) - } - }) - } -} diff --git a/supernode/services/cascade/register.go b/supernode/services/cascade/register.go index 1e8659f3..866420aa 100644 --- a/supernode/services/cascade/register.go +++ b/supernode/services/cascade/register.go @@ -4,6 +4,7 @@ import ( "context" "os" + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "github.com/LumeraProtocol/supernode/v2/supernode/services/common" ) @@ -102,17 +103,18 @@ func (task *CascadeRegistrationTask) Register( task.streamEvent(SupernodeEventTypeTopSupernodeCheckPassed, "Top supernode eligibility confirmed", "", send) /* 4. Decode cascade metadata -------------------------------------------------- */ - cascadeMeta, err := task.decodeCascadeMetadata(ctx, action.Metadata, fields) + cascadeMeta, err := cascadekit.UnmarshalCascadeMetadata(action.Metadata) if err != nil { - return err + return task.wrapErr(ctx, "failed to unmarshal cascade metadata", err, fields) } logtrace.Info(ctx, "register: metadata decoded", fields) task.streamEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", send) /* 5. Verify data hash --------------------------------------------------------- */ - if err := task.verifyDataHash(ctx, req.DataHash, cascadeMeta.DataHash, fields); err != nil { + if err := cascadekit.VerifyB64DataHash(req.DataHash, cascadeMeta.DataHash); err != nil { return err } + logtrace.Debug(ctx, "request data-hash has been matched with the action data-hash", fields) logtrace.Info(ctx, "register: data hash matched", fields) task.streamEvent(SupernodeEventTypeDataHashVerified, "Data hash verified", "", send) @@ -147,7 +149,7 @@ func (task *CascadeRegistrationTask) Register( task.streamEvent(SupernodeEventTypeRQIDsGenerated, "RQID files generated", "", send) /* 9. Consistency checks ------------------------------------------------------- */ - if err := verifyIDs(layout, encResp.Metadata); err != nil { + if err := cascadekit.VerifySingleBlockIDs(layout, encResp.Metadata); err != nil { return task.wrapErr(ctx, "failed to verify IDs", err, fields) } logtrace.Info(ctx, "register: rqids validated", fields) @@ -171,8 +173,8 @@ func (task *CascadeRegistrationTask) Register( if err := task.storeArtefacts(ctx, action.ActionID, rqidResp.RedundantMetadataFiles, encResp.SymbolsDir, fields); err != nil { return err } - // Emit artefacts stored event (metrics payload removed; logs preserved) - task.emitArtefactsStored(ctx, fields, encResp.Metadata, send) + // Emit artefacts stored event (metrics payload removed; logs preserved) + task.emitArtefactsStored(ctx, fields, encResp.Metadata, send) resp, err := task.LumeraClient.FinalizeAction(ctx, action.ActionID, rqidResp.RQIDs) if err != nil { diff --git a/tests/system/e2e_cascade_test.go b/tests/system/e2e_cascade_test.go index e457ccd0..0e7541be 100644 --- a/tests/system/e2e_cascade_test.go +++ b/tests/system/e2e_cascade_test.go @@ -3,7 +3,6 @@ package system import ( "context" "crypto/sha256" - "encoding/base64" "encoding/json" "fmt" "io" @@ -14,7 +13,6 @@ import ( "testing" "time" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" "github.com/LumeraProtocol/supernode/v2/pkg/keyring" "github.com/LumeraProtocol/supernode/v2/pkg/lumera" "github.com/LumeraProtocol/supernode/v2/supernode/config" @@ -22,7 +20,6 @@ import ( "github.com/LumeraProtocol/supernode/v2/sdk/action" "github.com/LumeraProtocol/supernode/v2/sdk/event" - "github.com/LumeraProtocol/lumera/x/action/v1/types" sdkconfig "github.com/LumeraProtocol/supernode/v2/sdk/config" "github.com/stretchr/testify/require" @@ -67,10 +64,7 @@ func TestCascadeE2E(t *testing.T) { ) // Action request parameters - const ( - actionType = "CASCADE" // The action type for fountain code processing - price = "23800ulume" // Price for the action in ulume tokens - ) + const actionType = "CASCADE" // The action type for fountain code processing t.Log("Step 1: Starting all services") // Update the genesis file with action parameters @@ -274,78 +268,35 @@ func TestCascadeE2E(t *testing.T) { originalHash := sha256.Sum256(data) t.Logf("Original file SHA256 hash: %x", originalHash) - rqCodec := codec.NewRaptorQCodec(raptorQFilesDir) - - encodeRes, err := rqCodec.Encode(ctx, codec.EncodeRequest{ - Path: testFileFullpath, - DataSize: int(fileInfo.Size()), - TaskID: "1", - }) - - require.NoError(t, err, "Failed to encode data with RaptorQ") - - metadataFile := encodeRes.Metadata + // Cascade signature creation process (high-level via action SDK) - // Cascade signature creation process - const ic = uint32(121) - const maxFiles = uint32(50) - - // Create cascade signature format - signatureFormat, indexFileIDs, err := createCascadeLayoutSignature(metadataFile, keplrKeyring, userKeyName, ic, maxFiles) - require.NoError(t, err, "Failed to create cascade signature") - - t.Logf("Signature format prepared with length: %d bytes", len(signatureFormat)) - t.Logf("Generated %d index file IDs for chain verification", len(indexFileIDs)) - - // Data hash with blake3 - hash, err := ComputeBlake3Hash(data) - b64EncodedHash := base64.StdEncoding.EncodeToString(hash) - require.NoError(t, err, "Failed to compute Blake3 hash") + // Build action client for metadata generation and cascade operations + accConfig := sdkconfig.AccountConfig{LocalCosmosAddress: recoveredAddress, KeyName: testKeyName, Keyring: keplrKeyring} + lumraConfig := sdkconfig.LumeraConfig{GRPCAddr: lumeraGRPCAddr, ChainID: lumeraChainID} + actionConfig := sdkconfig.Config{Account: accConfig, Lumera: lumraConfig} + actionClient, err := action.NewClient(context.Background(), actionConfig, nil) + require.NoError(t, err, "Failed to create action client") - // Also Create a signature for the hash - signedHash, err := keyring.SignBytes(keplrKeyring, userKeyName, hash) - require.NoError(t, err, "Failed to sign hash") + // Use the new SDK helper to build Cascade metadata (includes signatures, price, and expiration) + builtMeta, autoPrice, expirationTime, err := actionClient.BuildCascadeMetadataFromFile(ctx, testFileFullpath, false) + require.NoError(t, err, "Failed to build cascade metadata from file") - // Encode the signed hash as base64 - signedHashBase64 := base64.StdEncoding.EncodeToString(signedHash) + // Create a signature for StartCascade using the SDK helper + signedHashBase64, err := actionClient.GenerateStartCascadeSignatureFromFile(ctx, testFileFullpath) + require.NoError(t, err, "Failed to generate StartCascade signature") // --------------------------------------- t.Log("Step 7: Creating metadata and submitting action request") - // Create CascadeMetadata struct with all required fields - cascadeMetadata := types.CascadeMetadata{ - DataHash: b64EncodedHash, // Hash of the original file - FileName: filepath.Base(testFileFullpath), // Original filename - RqIdsIc: uint64(121), // Count of RQ identifiers - Signatures: signatureFormat, // Combined signature format - } - - // Marshal the struct to JSON for the blockchain transaction - metadataBytes, err := json.Marshal(cascadeMetadata) + // Marshal the helper-built metadata to JSON for the blockchain transaction + metadataBytes, err := json.Marshal(builtMeta) require.NoError(t, err, "Failed to marshal CascadeMetadata to JSON") metadata := string(metadataBytes) - // Set expiration time 25 hours in the future (minimum is 24 hours) - // This defines how long the action request is valid - expirationTime := fmt.Sprintf("%d", time.Now().Add(25*time.Hour).Unix()) - t.Logf("Requesting cascade action with metadata: %s", metadata) - t.Logf("Action type: %s, Price: %s, Expiration: %s", actionType, price, expirationTime) - - // Submit the action request transaction to the blockchain using user key - // This registers the request with metadata for supernodes to process - // actionRequestResp := cli.CustomCommand( - // "tx", "action", "request-action", - // actionType, // CASCADE action type - // metadata, // JSON metadata with all required fields - // price, // Price in ulume tokens - // expirationTime, // Unix timestamp for expiration - // "--from", userKeyName, // Use user key for transaction submission - // "--gas", "auto", - // "--gas-adjustment", "1.5", - // ) - - response, err := lumeraClinet.ActionMsg().RequestAction(ctx, actionType, metadata, price, expirationTime) + t.Logf("Action type: %s, Price: %s, Expiration: %s", actionType, autoPrice, expirationTime) + + response, err := lumeraClinet.ActionMsg().RequestAction(ctx, actionType, metadata, autoPrice, expirationTime) txresp := response.TxResponse @@ -400,32 +351,6 @@ func TestCascadeE2E(t *testing.T) { require.NotEmpty(t, actionID, "Action ID should not be empty") t.Logf("Extracted action ID: %s", actionID) - // Set up action client configuration - // This defines how to connect to network services - accConfig := sdkconfig.AccountConfig{ - LocalCosmosAddress: recoveredAddress, - KeyName: testKeyName, - Keyring: keplrKeyring, - } - - lumraConfig := sdkconfig.LumeraConfig{ - GRPCAddr: lumeraGRPCAddr, - ChainID: lumeraChainID, - } - actionConfig := sdkconfig.Config{ - Account: accConfig, - Lumera: lumraConfig, - } - - // Initialize action client for cascade operations - actionClient, err := action.NewClient( - context.Background(), - actionConfig, - nil, // Nil logger - use default - - ) - require.NoError(t, err, "Failed to create action client") - // --------------------------------------- // Step 9: Subscribe to all events and extract tx hash // --------------------------------------- @@ -507,7 +432,7 @@ func TestCascadeE2E(t *testing.T) { if event.Get("type").String() == "coin_spent" { attrs := event.Get("attributes").Array() for i, attr := range attrs { - if attr.Get("key").String() == "amount" && attr.Get("value").String() == price { + if attr.Get("key").String() == "amount" && attr.Get("value").String() == autoPrice { feeSpent = true // Get the spender address from the same event group for j, addrAttr := range attrs { @@ -524,7 +449,7 @@ func TestCascadeE2E(t *testing.T) { if event.Get("type").String() == "coin_received" { attrs := event.Get("attributes").Array() for i, attr := range attrs { - if attr.Get("key").String() == "amount" && attr.Get("value").String() == price { + if attr.Get("key").String() == "amount" && attr.Get("value").String() == autoPrice { feeReceived = true // Get the receiver address from the same event group for j, addrAttr := range attrs { @@ -548,18 +473,13 @@ func TestCascadeE2E(t *testing.T) { t.Logf("Payment flow: %s paid %s to %s", fromAddress, amount, toAddress) require.NotEmpty(t, fromAddress, "Spender address should not be empty") require.NotEmpty(t, toAddress, "Receiver address should not be empty") - require.Equal(t, price, amount, "Payment amount should match action price") + require.Equal(t, autoPrice, amount, "Payment amount should match action price") time.Sleep(10 * time.Second) outputFileBaseDir := filepath.Join(".") - // Create signature: actionId.creatorsaddress (using the same address that was used for StartCascade) - signatureData := fmt.Sprintf("%s.%s", actionID, userAddress) - // Sign the signature data with user key - signedSignature, err := keyring.SignBytes(keplrKeyring, userKeyName, []byte(signatureData)) - require.NoError(t, err, "Failed to sign signature data") - // Base64 encode the signed signature - signature := base64.StdEncoding.EncodeToString(signedSignature) + // Create download signature for actionID (using the same address that was used for StartCascade) + signature, err := actionClient.GenerateDownloadSignature(context.Background(), actionID, userAddress) // Try to download the file using the action ID and signature dtaskID, err := actionClient.DownloadCascade(context.Background(), actionID, outputFileBaseDir, signature) diff --git a/tests/system/go.mod b/tests/system/go.mod index e6eb3bba..8e1d8840 100644 --- a/tests/system/go.mod +++ b/tests/system/go.mod @@ -27,7 +27,6 @@ require ( require ( cosmossdk.io/math v1.5.3 - github.com/LumeraProtocol/lumera v1.7.0 github.com/LumeraProtocol/supernode/v2 v2.0.0-00010101000000-000000000000 github.com/cometbft/cometbft v0.38.17 github.com/tidwall/gjson v1.14.2 @@ -51,6 +50,7 @@ require ( github.com/99designs/keyring v1.2.2 // indirect github.com/DataDog/datadog-go v3.2.0+incompatible // indirect github.com/DataDog/zstd v1.5.7 // indirect + github.com/LumeraProtocol/lumera v1.7.2 // indirect github.com/LumeraProtocol/rq-go v0.2.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/speakeasy v0.2.0 // indirect @@ -119,6 +119,7 @@ require ( github.com/improbable-eng/grpc-web v0.15.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.18.0 // indirect github.com/klauspost/cpuid/v2 v2.2.10 // indirect github.com/kr/pretty v0.3.1 // indirect @@ -128,6 +129,8 @@ require ( github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/minio/highwayhash v1.0.3 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mtibben/percent v0.2.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a // indirect diff --git a/tests/system/go.sum b/tests/system/go.sum index 6e9c0112..d00c5807 100644 --- a/tests/system/go.sum +++ b/tests/system/go.sum @@ -73,8 +73,8 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48 github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 h1:8nn+rsCvTq9axyEh382S0PFLBeaFwNsT43IrPWzctRU= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/LumeraProtocol/lumera v1.7.0 h1:F5zgRBnCtgGfdMB6jz01PFWIzbS8VjQfCu1H9OYt3BU= -github.com/LumeraProtocol/lumera v1.7.0/go.mod h1:c1M+sjewuCvxw+pznwlspUzenDJI8Y+suKB3RFKS2Wo= +github.com/LumeraProtocol/lumera v1.7.2 h1:qA0qwEOfCqW6yY232/MEK6gfLYq4HVYSmbcOCOZqEoc= +github.com/LumeraProtocol/lumera v1.7.2/go.mod h1:c1M+sjewuCvxw+pznwlspUzenDJI8Y+suKB3RFKS2Wo= github.com/LumeraProtocol/rq-go v0.2.1 h1:8B3UzRChLsGMmvZ+UVbJsJj6JZzL9P9iYxbdUwGsQI4= github.com/LumeraProtocol/rq-go v0.2.1/go.mod h1:APnKCZRh1Es2Vtrd2w4kCLgAyaL5Bqrkz/BURoRJ+O8= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= @@ -505,6 +505,7 @@ github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -578,9 +579,11 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= diff --git a/tests/system/signature_utils.go b/tests/system/signature_utils.go deleted file mode 100644 index 977c674c..00000000 --- a/tests/system/signature_utils.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -import ( - "github.com/LumeraProtocol/supernode/v2/pkg/cascade" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - cosmoskeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" -) - -// createCascadeLayoutSignature is a wrapper for the common cascade signature function -func createCascadeLayoutSignature(metadataFile codec.Layout, kr cosmoskeyring.Keyring, userKeyName string, ic uint32, maxFiles uint32) (signatureFormat string, indexFileIDs []string, err error) { - return cascade.CreateLayoutSignature(metadataFile, kr, userKeyName, ic, maxFiles) -} - -// ComputeBlake3Hash is a wrapper for the common Blake3 hash function -func ComputeBlake3Hash(msg []byte) ([]byte, error) { - return cascade.ComputeBlake3Hash(msg) -} \ No newline at end of file From fa9195f82d98ec25257d7ee736021a150ff20850 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Sat, 4 Oct 2025 00:25:21 +0500 Subject: [PATCH 25/27] remove stake --- tests/system/e2e_cascade_test.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/system/e2e_cascade_test.go b/tests/system/e2e_cascade_test.go index 0e7541be..3c9072a2 100644 --- a/tests/system/e2e_cascade_test.go +++ b/tests/system/e2e_cascade_test.go @@ -184,13 +184,11 @@ func TestCascadeE2E(t *testing.T) { // Fund the account with tokens for transactions t.Logf("Funding test address %s with %s", recoveredAddress, fundAmount) - cli.FundAddress(recoveredAddress, fundAmount) // ulume tokens for action fees - cli.FundAddress(recoveredAddress, "10000000stake") // stake tokens + cli.FundAddress(recoveredAddress, fundAmount) // ulume tokens for action fees // Fund user account t.Logf("Funding user address %s with %s", userAddress, fundAmount) - cli.FundAddress(userAddress, fundAmount) // ulume tokens for action fees - cli.FundAddress(userAddress, "10000000stake") // stake tokens + cli.FundAddress(userAddress, fundAmount) // ulume tokens for action fees sut.AwaitNextBlock(t) // Wait for funding transaction to be processed From 3e13e1fa5dcf4a29c455655934afa451fc61c182 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Sat, 4 Oct 2025 01:37:06 +0500 Subject: [PATCH 26/27] Use lume in genesis --- tests/system/e2e_cascade_test.go | 21 +++++++++++++++++---- tests/system/system.go | 1 + 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/tests/system/e2e_cascade_test.go b/tests/system/e2e_cascade_test.go index 3c9072a2..2db7ad09 100644 --- a/tests/system/e2e_cascade_test.go +++ b/tests/system/e2e_cascade_test.go @@ -67,8 +67,10 @@ func TestCascadeE2E(t *testing.T) { const actionType = "CASCADE" // The action type for fountain code processing t.Log("Step 1: Starting all services") - // Update the genesis file with action parameters - sut.ModifyGenesisJSON(t, SetActionParams(t)) + // Update the genesis file with required params before starting + // - Set staking bond denom to match ulume used by gentxs + // - Configure action module params used by the test + sut.ModifyGenesisJSON(t, SetStakingBondDenomUlume(t), SetActionParams(t)) // Reset and start the blockchain sut.StartChain(t) @@ -119,7 +121,7 @@ func TestCascadeE2E(t *testing.T) { args := []string{ "query", "supernode", - "get-top-super-nodes-for-block", + "get-top-supernodes-for-block", fmt.Sprint(queryHeight), "--output", "json", } @@ -269,7 +271,8 @@ func TestCascadeE2E(t *testing.T) { // Cascade signature creation process (high-level via action SDK) // Build action client for metadata generation and cascade operations - accConfig := sdkconfig.AccountConfig{LocalCosmosAddress: recoveredAddress, KeyName: testKeyName, Keyring: keplrKeyring} + // Use the same account that submits RequestAction so signatures match the on-chain creator + accConfig := sdkconfig.AccountConfig{LocalCosmosAddress: userAddress, KeyName: userKeyName, Keyring: keplrKeyring} lumraConfig := sdkconfig.LumeraConfig{GRPCAddr: lumeraGRPCAddr, ChainID: lumeraChainID} actionConfig := sdkconfig.Config{Account: accConfig, Lumera: lumraConfig} actionClient, err := action.NewClient(context.Background(), actionConfig, nil) @@ -599,3 +602,13 @@ func SetActionParams(t *testing.T) GenesisMutator { return state } } + +// SetStakingBondDenomUlume sets the staking module bond denom to "ulume" in genesis +func SetStakingBondDenomUlume(t *testing.T) GenesisMutator { + return func(genesis []byte) []byte { + t.Helper() + state, err := sjson.SetBytes(genesis, "app_state.staking.params.bond_denom", "ulume") + require.NoError(t, err) + return state + } +} diff --git a/tests/system/system.go b/tests/system/system.go index 8666c474..1a2a3bd7 100644 --- a/tests/system/system.go +++ b/tests/system/system.go @@ -236,6 +236,7 @@ func appendToBuf(r io.Reader, b *ring.Ring, stop <-chan struct{}) { func isLogNoise(text string) bool { for _, v := range []string{ "\x1b[36mmodule=\x1b[0mrpc-server", // "module=rpc-server", + "Upgrading IAVL storage for faster queries", } { if strings.Contains(text, v) { return true From 496702091a2b3ac45edf1af25d6aa22391d2a501 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Mon, 6 Oct 2025 17:01:45 +0500 Subject: [PATCH 27/27] fixes : sdk , format files --- p2p/kademlia/dht.go | 131 ++++++------ p2p/kademlia/network.go | 199 +++++++++--------- p2p/kademlia/rq_symbols.go | 38 ++-- p2p/p2p.go | 30 +-- pkg/cascadekit/doc.go | 1 - pkg/cascadekit/index.go | 57 +++-- pkg/cascadekit/index_parse.go | 25 ++- pkg/cascadekit/metadata.go | 17 +- pkg/cascadekit/metadata_helpers.go | 29 ++- pkg/cascadekit/parsers.go | 59 +++--- pkg/cascadekit/rqid.go | 84 ++++---- pkg/cascadekit/signatures.go | 85 ++++---- pkg/cascadekit/verify.go | 27 ++- pkg/codec/codec.go | 5 +- pkg/codec/codec_mock.go | 65 ------ pkg/codec/raptorq.go | 66 +++++- pkg/logtrace/datadog.go | 16 +- pkg/logtrace/log.go | 2 +- pkg/lumera/modules/action_msg/helpers.go | 90 ++++---- pkg/lumera/util/coin.go | 49 +++-- sdk/action/client.go | 30 +-- sdk/adapters/lumera/adapter.go | 32 +-- sdk/adapters/supernodeservice/types.go | 14 +- sdk/event/keys.go | 2 +- sdk/event/types.go | 64 +++--- sdk/net/client.go | 12 +- sdk/net/impl.go | 42 ++-- sdk/task/timeouts.go | 1 - sn-manager/internal/config/config.go | 13 +- supernode/cmd/config_update.go | 16 +- supernode/cmd/init.go | 1 - .../cascade/cascade_action_server_test.go | 2 +- .../node/action/server/cascade/helper.go | 14 +- supernode/node/supernode/gateway/swagger.go | 6 +- .../node/supernode/server/status_server.go | 2 +- .../supernode/server/status_server_test.go | 8 +- supernode/services/cascade/adaptors/p2p.go | 4 +- supernode/services/cascade/config.go | 6 +- supernode/services/cascade/download.go | 36 ++-- supernode/services/cascade/events.go | 38 ++-- supernode/services/cascade/helper.go | 113 +++++----- supernode/services/cascade/service.go | 14 +- supernode/services/cascade/status_test.go | 6 +- .../services/common/supernode/metrics.go | 4 +- .../services/common/supernode/service.go | 2 +- .../services/common/supernode/service_test.go | 16 +- supernode/services/common/supernode/types.go | 12 +- tests/integration/p2p/p2p_integration_test.go | 4 +- 48 files changed, 797 insertions(+), 792 deletions(-) delete mode 100644 pkg/codec/codec_mock.go diff --git a/p2p/kademlia/dht.go b/p2p/kademlia/dht.go index 9d029479..62aa2768 100644 --- a/p2p/kademlia/dht.go +++ b/p2p/kademlia/dht.go @@ -124,7 +124,7 @@ func (s *DHT) ConnPoolSnapshot() map[string]int64 { // Options contains configuration options for the queries node type Options struct { - ID []byte + ID []byte // The queries IPv4 or IPv6 address IP string @@ -141,7 +141,6 @@ type Options struct { // Keyring for credentials Keyring keyring.Keyring - } // NewDHT returns a new DHT node @@ -471,7 +470,7 @@ func (s *DHT) Stats(ctx context.Context) (map[string]interface{}, error) { dhtStats["peers_count"] = len(s.ht.nodes()) dhtStats["peers"] = s.ht.nodes() dhtStats["network"] = s.network.HandleMetricsSnapshot() - // Removed: recent per-request snapshots (logs provide visibility) + // Removed: recent per-request snapshots (logs provide visibility) dhtStats["database"] = dbStats return dhtStats, nil @@ -682,7 +681,7 @@ func (s *DHT) fetchAndAddLocalKeys(ctx context.Context, hexKeys []string, result } func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, txID string, localOnly ...bool) (result map[string][]byte, err error) { - logtrace.Debug(ctx, "DHT BatchRetrieve begin", logtrace.Fields{"txid": txID, "keys": len(keys), "required": required}) + logtrace.Debug(ctx, "DHT BatchRetrieve begin", logtrace.Fields{"txid": txID, "keys": len(keys), "required": required}) result = make(map[string][]byte) var resMap sync.Map var foundLocalCount int32 @@ -754,23 +753,23 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, if err != nil { return nil, fmt.Errorf("fetch and add local keys: %v", err) } - // Found locally count is logged via summary below; no external metrics + // Found locally count is logged via summary below; no external metrics if foundLocalCount >= required { return result, nil } - batchSize := batchRetrieveSize - var networkFound int32 - totalBatches := int(math.Ceil(float64(required) / float64(batchSize))) - parallelBatches := int(math.Min(float64(totalBatches), float64(fetchSymbolsBatchConcurrency))) + batchSize := batchRetrieveSize + var networkFound int32 + totalBatches := int(math.Ceil(float64(required) / float64(batchSize))) + parallelBatches := int(math.Min(float64(totalBatches), float64(fetchSymbolsBatchConcurrency))) - semaphore := make(chan struct{}, parallelBatches) - var wg sync.WaitGroup - gctx, cancel := context.WithCancel(ctx) - defer cancel() + semaphore := make(chan struct{}, parallelBatches) + var wg sync.WaitGroup + gctx, cancel := context.WithCancel(ctx) + defer cancel() - // Measure only the network retrieval phase (after local scan) - netStart := time.Now() + // Measure only the network retrieval phase (after local scan) + netStart := time.Now() for start := 0; start < len(keys); start += batchSize { end := start + batchSize @@ -802,15 +801,17 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, wg.Wait() - netFound := int(atomic.LoadInt32(&networkFound)) -{ - f := logtrace.Fields{"txid": txID, "found_local": foundLocalCount, "found_network": netFound, "required": required, "ms": time.Since(netStart).Milliseconds(), logtrace.FieldRole: "client"} - if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } - logtrace.Info(ctx, "dht: batch retrieve summary", f) -} - // Record batch retrieve stats for internal DHT snapshot window (network phase only) - s.metrics.RecordBatchRetrieve(len(keys), int(required), int(foundLocalCount), netFound, time.Since(netStart)) - // No per-task metrics collector updates + netFound := int(atomic.LoadInt32(&networkFound)) + { + f := logtrace.Fields{"txid": txID, "found_local": foundLocalCount, "found_network": netFound, "required": required, "ms": time.Since(netStart).Milliseconds(), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "dht: batch retrieve summary", f) + } + // Record batch retrieve stats for internal DHT snapshot window (network phase only) + s.metrics.RecordBatchRetrieve(len(keys), int(required), int(foundLocalCount), netFound, time.Since(netStart)) + // No per-task metrics collector updates return result, nil } @@ -942,7 +943,7 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, defer func() { <-semaphore }() } - indices := fetchMap[nodeID] + indices := fetchMap[nodeID] requestKeys := make(map[string]KeyValWithClosest) for _, idx := range indices { if idx < len(hexKeys) { @@ -966,9 +967,9 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, firstErr = err } mu.Unlock() - // per-node metrics removed; logs retained - return - } + // per-node metrics removed; logs retained + return + } returned := 0 for k, v := range decompressedData { @@ -988,9 +989,9 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, } } - // per-node metrics removed; logs retained - }(node, nodeID) - } + // per-node metrics removed; logs retained + }(node, nodeID) + } wg.Wait() @@ -1038,20 +1039,24 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, func (s *DHT) doBatchGetValuesCall(ctx context.Context, node *Node, requestKeys map[string]KeyValWithClosest) (map[string]KeyValWithClosest, error) { request := s.newMessage(BatchGetValues, node, &BatchGetValuesRequest{Data: requestKeys}) -{ - f := logtrace.Fields{"node": node.String(), "keys": len(requestKeys), logtrace.FieldRole: "client"} - if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } - logtrace.Info(ctx, "dht: batch get send", f) -} + { + f := logtrace.Fields{"node": node.String(), "keys": len(requestKeys), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "dht: batch get send", f) + } response, err := s.network.Call(ctx, request, false) if err != nil { return nil, fmt.Errorf("network call request %s failed: %w", request.String(), err) } -{ - f := logtrace.Fields{"node": node.String(), logtrace.FieldRole: "client"} - if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } - logtrace.Info(ctx, "dht: batch get ok", f) -} + { + f := logtrace.Fields{"node": node.String(), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "dht: batch get ok", f) + } resp, ok := response.Data.(*BatchGetValuesResponse) if !ok { @@ -1659,11 +1664,13 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i knownNodes := make(map[string]*Node) hashes := make([][]byte, len(values)) -{ - f := logtrace.Fields{logtrace.FieldModule: "dht", "task_id": id, "keys": len(values), "len_nodes": len(s.ht.nodes()), logtrace.FieldRole: "client"} - if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } - logtrace.Info(ctx, "dht: batch store start", f) -} + { + f := logtrace.Fields{logtrace.FieldModule: "dht", "task_id": id, "keys": len(values), "len_nodes": len(s.ht.nodes()), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "dht: batch store start", f) + } for i := 0; i < len(values); i++ { target, _ := utils.Blake3Hash(values[i]) hashes[i] = target @@ -1689,11 +1696,11 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i storeResponses := s.batchStoreNetwork(ctx, values, knownNodes, storageMap, typ) for response := range storeResponses { requests++ - var nodeAddr string + var nodeAddr string if response.Receiver != nil { - nodeAddr = response.Receiver.String() + nodeAddr = response.Receiver.String() } else if response.Message != nil && response.Message.Sender != nil { - nodeAddr = response.Message.Sender.String() + nodeAddr = response.Message.Sender.String() } errMsg := "" @@ -1724,7 +1731,7 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i } } - // per-node store metrics removed; logs retained + // per-node store metrics removed; logs retained } @@ -1797,11 +1804,13 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ totalBytes += len(values[idx]) } - { - f := logtrace.Fields{logtrace.FieldModule: "dht", "node": receiver.String(), "keys": len(toStore), "size_mb": utils.BytesIntToMB(totalBytes), logtrace.FieldRole: "client"} - if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } - logtrace.Info(ctx, "dht: batch store RPC send", f) - } + { + f := logtrace.Fields{logtrace.FieldModule: "dht", "node": receiver.String(), "keys": len(toStore), "size_mb": utils.BytesIntToMB(totalBytes), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "dht: batch store RPC send", f) + } // Skip empty payloads: avoid sending empty store RPCs and do not record no-op metrics. if len(toStore) == 0 { @@ -1827,11 +1836,13 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ return } - { - f := logtrace.Fields{logtrace.FieldModule: "p2p", "node": receiver.String(), "keys": len(toStore), "ms": dur, logtrace.FieldRole: "client"} - if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } - logtrace.Info(ctx, "dht: batch store RPC ok", f) - } + { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "node": receiver.String(), "keys": len(toStore), "ms": dur, logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "dht: batch store RPC ok", f) + } responses <- &MessageWithError{Message: response, KeysCount: len(toStore), Receiver: receiver, DurationMS: dur} } }(node, key) diff --git a/p2p/kademlia/network.go b/p2p/kademlia/network.go index e4ab76e5..ef542ee5 100644 --- a/p2p/kademlia/network.go +++ b/p2p/kademlia/network.go @@ -68,7 +68,6 @@ type Network struct { sem *semaphore.Weighted metrics sync.Map - } // NewNetwork returns a network service @@ -406,15 +405,15 @@ func (s *Network) handleConn(ctx context.Context, rawConn net.Conn) { }) return } - // stitch correlation + origin into context for downstream handler logs - if request != nil { - if s := strings.TrimSpace(request.CorrelationID); s != "" { - ctx = logtrace.CtxWithCorrelationID(ctx, s) - } - if o := strings.TrimSpace(request.Origin); o != "" { - ctx = logtrace.CtxWithOrigin(ctx, o) - } - } + // stitch correlation + origin into context for downstream handler logs + if request != nil { + if s := strings.TrimSpace(request.CorrelationID); s != "" { + ctx = logtrace.CtxWithCorrelationID(ctx, s) + } + if o := strings.TrimSpace(request.Origin); o != "" { + ctx = logtrace.CtxWithOrigin(ctx, o) + } + } reqID := uuid.New().String() mt := request.MessageType @@ -597,17 +596,17 @@ func (s *Network) Call(ctx context.Context, request *Message, isLong bool) (*Mes idStr := string(request.Receiver.ID) remoteAddr := fmt.Sprintf("%s@%s:%d", idStr, strings.TrimSpace(request.Receiver.IP), request.Receiver.Port) // Log raw RPC start (reduce noise: Info only for high-signal messages) - startFields := logtrace.Fields{ - logtrace.FieldModule: "p2p", - "remote": remoteAddr, - "message": msgName(request.MessageType), - "timeout_ms": int64(timeout / time.Millisecond), - } - // Tag role/origin for filtering - startFields[logtrace.FieldRole] = "client" - if o := logtrace.OriginFromContext(ctx); o != "" { - startFields[logtrace.FieldOrigin] = o - } + startFields := logtrace.Fields{ + logtrace.FieldModule: "p2p", + "remote": remoteAddr, + "message": msgName(request.MessageType), + "timeout_ms": int64(timeout / time.Millisecond), + } + // Tag role/origin for filtering + startFields[logtrace.FieldRole] = "client" + if o := logtrace.OriginFromContext(ctx); o != "" { + startFields[logtrace.FieldOrigin] = o + } if isHighSignalMsg(request.MessageType) { logtrace.Info(ctx, fmt.Sprintf("RPC %s start remote=%s timeout_ms=%d", msgName(request.MessageType), remoteAddr, int64(timeout/time.Millisecond)), startFields) } else { @@ -615,14 +614,14 @@ func (s *Network) Call(ctx context.Context, request *Message, isLong bool) (*Mes } // Attach correlation id only for high‑signal messages (store/retrieve batches) - if isHighSignalMsg(request.MessageType) { - if cid := logtrace.CorrelationIDFromContext(ctx); cid != "unknown" { - request.CorrelationID = cid - } - if o := logtrace.OriginFromContext(ctx); o != "" { - request.Origin = o - } - } + if isHighSignalMsg(request.MessageType) { + if cid := logtrace.CorrelationIDFromContext(ctx); cid != "unknown" { + request.CorrelationID = cid + } + if o := logtrace.OriginFromContext(ctx); o != "" { + request.Origin = o + } + } // try get from pool s.connPoolMtx.Lock() @@ -750,13 +749,15 @@ func (s *Network) rpcOnceWrapper(ctx context.Context, cw *connWrapper, remoteAdd return nil, errors.Errorf("conn read: %w", e) } // Single-line completion for successful outbound RPC - if isHighSignalMsg(msgType) { - f := logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"} - if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } - logtrace.Info(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), f) - } else { - logtrace.Debug(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"}) - } + if isHighSignalMsg(msgType) { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), f) + } else { + logtrace.Debug(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"}) + } return r, nil } } @@ -842,13 +843,15 @@ Retry: s.dropFromPool(remoteAddr, conn) return nil, errors.Errorf("conn read: %w", err) } - if isHighSignalMsg(msgType) { - f := logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"} - if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } - logtrace.Info(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), f) - } else { - logtrace.Debug(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"}) - } + if isHighSignalMsg(msgType) { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), f) + } else { + logtrace.Debug(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"}) + } return resp, nil } @@ -950,17 +953,17 @@ func (s *Network) handleBatchFindValues(ctx context.Context, message *Message, r } func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, reqID string) (res []byte, err error) { - defer func() { - if response, err := s.handlePanic(ctx, message.Sender, BatchGetValues); response != nil || err != nil { - res = response - } - }() - - request, ok := message.Data.(*BatchGetValuesRequest) - if !ok { - err := errors.New("invalid BatchGetValuesRequest") - return s.generateResponseMessage(ctx, BatchGetValues, message.Sender, ResultFailed, err.Error()) - } + defer func() { + if response, err := s.handlePanic(ctx, message.Sender, BatchGetValues); response != nil || err != nil { + res = response + } + }() + + request, ok := message.Data.(*BatchGetValuesRequest) + if !ok { + err := errors.New("invalid BatchGetValuesRequest") + return s.generateResponseMessage(ctx, BatchGetValues, message.Sender, ResultFailed, err.Error()) + } logtrace.Debug(ctx, "Batch get values request received", logtrace.Fields{ logtrace.FieldModule: "p2p", @@ -976,17 +979,19 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, i++ } - values, count, err := s.dht.store.RetrieveBatchValues(ctx, keys, false) - if err != nil { - err = errors.Errorf("batch find values: %w", err) - return s.generateResponseMessage(ctx, BatchGetValues, message.Sender, ResultFailed, err.Error()) - } + values, count, err := s.dht.store.RetrieveBatchValues(ctx, keys, false) + if err != nil { + err = errors.Errorf("batch find values: %w", err) + return s.generateResponseMessage(ctx, BatchGetValues, message.Sender, ResultFailed, err.Error()) + } - { - f := logtrace.Fields{logtrace.FieldModule: "p2p", "requested-keys": len(keys), "found": count, "sender": message.Sender.String(), logtrace.FieldRole: "server"} - if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } - logtrace.Info(ctx, "network: batch get values ok", f) - } + { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "requested-keys": len(keys), "found": count, "sender": message.Sender.String(), logtrace.FieldRole: "server"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "network: batch get values ok", f) + } for i, key := range keys { val := KeyValWithClosest{ @@ -1005,9 +1010,9 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, } // new a response message - resMsg := s.dht.newMessage(BatchGetValues, message.Sender, response) - resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx) - return s.encodeMesage(resMsg) + resMsg := s.dht.newMessage(BatchGetValues, message.Sender, response) + resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx) + return s.encodeMesage(resMsg) } func (s *Network) handleBatchFindValuesRequest(ctx context.Context, req *BatchFindValuesRequest, ip string, reqID string) (isDone bool, compressedData []byte, err error) { @@ -1178,32 +1183,34 @@ func findTopHeaviestKeys(dataMap map[string][]byte, size int) (int, []string) { } func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (res []byte, err error) { - defer func() { - if response, err := s.handlePanic(ctx, message.Sender, BatchStoreData); response != nil || err != nil { - res = response - } - }() - - request, ok := message.Data.(*BatchStoreDataRequest) - if !ok { - err := errors.New("invalid BatchStoreDataRequest") - return s.generateResponseMessage(ctx, BatchStoreData, message.Sender, ResultFailed, err.Error()) - } + defer func() { + if response, err := s.handlePanic(ctx, message.Sender, BatchStoreData); response != nil || err != nil { + res = response + } + }() + + request, ok := message.Data.(*BatchStoreDataRequest) + if !ok { + err := errors.New("invalid BatchStoreDataRequest") + return s.generateResponseMessage(ctx, BatchStoreData, message.Sender, ResultFailed, err.Error()) + } // log.P2P().WithContext(ctx).Info("handle batch store data request received") - { - f := logtrace.Fields{logtrace.FieldModule: "p2p", "sender": message.Sender.String(), "keys": len(request.Data), logtrace.FieldRole: "server"} - if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } - logtrace.Info(ctx, "network: batch store recv", f) - } + { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "sender": message.Sender.String(), "keys": len(request.Data), logtrace.FieldRole: "server"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "network: batch store recv", f) + } // add the sender to queries hash table s.dht.addNode(ctx, message.Sender) - if err := s.dht.store.StoreBatch(ctx, request.Data, 1, false); err != nil { - err = errors.Errorf("batch store the data: %w", err) - return s.generateResponseMessage(ctx, BatchStoreData, message.Sender, ResultFailed, err.Error()) - } + if err := s.dht.store.StoreBatch(ctx, request.Data, 1, false); err != nil { + err = errors.Errorf("batch store the data: %w", err) + return s.generateResponseMessage(ctx, BatchStoreData, message.Sender, ResultFailed, err.Error()) + } response := &StoreDataResponse{ Status: ResponseStatus{ @@ -1211,16 +1218,18 @@ func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (r }, } // log.P2P().WithContext(ctx).Info("handle batch store data request processed") - { - f := logtrace.Fields{logtrace.FieldModule: "p2p", "sender": message.Sender.String(), "keys": len(request.Data), logtrace.FieldRole: "server"} - if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o } - logtrace.Info(ctx, "network: batch store ok", f) - } + { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "sender": message.Sender.String(), "keys": len(request.Data), logtrace.FieldRole: "server"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "network: batch store ok", f) + } // new a response message - resMsg := s.dht.newMessage(BatchStoreData, message.Sender, response) - resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx) - return s.encodeMesage(resMsg) + resMsg := s.dht.newMessage(BatchStoreData, message.Sender, response) + resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx) + return s.encodeMesage(resMsg) } func (s *Network) handleBatchFindNode(ctx context.Context, message *Message) (res []byte, err error) { diff --git a/p2p/kademlia/rq_symbols.go b/p2p/kademlia/rq_symbols.go index 85367dec..7aa2c578 100644 --- a/p2p/kademlia/rq_symbols.go +++ b/p2p/kademlia/rq_symbols.go @@ -53,9 +53,9 @@ func (s *DHT) storeSymbols(ctx context.Context) error { } start := time.Now() logtrace.Info(wctx, "worker: dir start", logtrace.Fields{"dir": dir.Dir, "txid": dir.TXID, "symbols": preCount}) - if err := s.scanDirAndStoreSymbols(wctx, dir.Dir, dir.TXID); err != nil { - logtrace.Error(wctx, "scan and store symbols", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err}) - } + if err := s.scanDirAndStoreSymbols(wctx, dir.Dir, dir.TXID); err != nil { + logtrace.Error(wctx, "scan and store symbols", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err}) + } // Post-count remaining symbols remCount := -1 if set, rerr := utils.ReadDirFilenames(dir.Dir); rerr == nil { @@ -86,17 +86,17 @@ func (s *DHT) scanDirAndStoreSymbols(ctx context.Context, dir, txid string) erro logtrace.Info(ctx, "p2p-worker: storing ALL RaptorQ symbols", logtrace.Fields{"txid": txid, "dir": dir, "total": len(keys)}) - // Batch-flush at loadSymbolsBatchSize - for start := 0; start < len(keys); { - end := start + loadSymbolsBatchSize - if end > len(keys) { - end = len(keys) - } - if err := s.storeSymbolsInP2P(ctx, txid, dir, keys[start:end]); err != nil { - return err - } - start = end - } + // Batch-flush at loadSymbolsBatchSize + for start := 0; start < len(keys); { + end := start + loadSymbolsBatchSize + if end > len(keys) { + end = len(keys) + } + if err := s.storeSymbolsInP2P(ctx, txid, dir, keys[start:end]); err != nil { + return err + } + start = end + } // Mark this directory as completed in rqstore if err := s.rqstore.SetIsCompleted(txid); err != nil { @@ -110,7 +110,7 @@ func (s *DHT) scanDirAndStoreSymbols(ctx context.Context, dir, txid string) erro // --------------------------------------------------------------------- func (s *DHT) storeSymbolsInP2P(ctx context.Context, txid, dir string, keys []string) error { // Per-batch visibility for background worker - logtrace.Info(ctx, "worker: batch send", logtrace.Fields{"dir": dir, "keys": len(keys), logtrace.FieldTaskID: txid}) + logtrace.Info(ctx, "worker: batch send", logtrace.Fields{"dir": dir, "keys": len(keys), logtrace.FieldTaskID: txid}) start := time.Now() loaded, err := utils.LoadSymbols(dir, keys) @@ -118,11 +118,11 @@ func (s *DHT) storeSymbolsInP2P(ctx context.Context, txid, dir string, keys []st return fmt.Errorf("load symbols: %w", err) } - if err := s.StoreBatch(ctx, loaded, 1, txid); err != nil { - return fmt.Errorf("p2p store batch: %w", err) - } + if err := s.StoreBatch(ctx, loaded, 1, txid); err != nil { + return fmt.Errorf("p2p store batch: %w", err) + } - logtrace.Info(ctx, "worker: batch ok", logtrace.Fields{"dir": dir, "keys": len(loaded), "ms": time.Since(start).Milliseconds(), logtrace.FieldTaskID: txid}) + logtrace.Info(ctx, "worker: batch ok", logtrace.Fields{"dir": dir, "keys": len(loaded), "ms": time.Since(start).Milliseconds(), logtrace.FieldTaskID: txid}) if err := utils.DeleteSymbols(ctx, dir, keys); err != nil { return fmt.Errorf("delete symbols: %w", err) diff --git a/p2p/p2p.go b/p2p/p2p.go index bb38ac0c..f9a5f74e 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -40,14 +40,14 @@ type P2P interface { // p2p structure to implements interface type p2p struct { - store kademlia.Store // the store for kademlia network - metaStore kademlia.MetaStore - dht *kademlia.DHT // the kademlia network - config *Config // the service configuration - running bool // if the kademlia network is ready - lumeraClient lumera.Client - keyring keyring.Keyring // Add the keyring field - rqstore rqstore.Store + store kademlia.Store // the store for kademlia network + metaStore kademlia.MetaStore + dht *kademlia.DHT // the kademlia network + config *Config // the service configuration + running bool // if the kademlia network is ready + lumeraClient lumera.Client + keyring keyring.Keyring // Add the keyring field + rqstore rqstore.Store } // Run the kademlia network @@ -263,13 +263,13 @@ func New(ctx context.Context, config *Config, lumeraClient lumera.Client, kr key } return &p2p{ - store: store, - metaStore: meta, - config: config, - lumeraClient: lumeraClient, - keyring: kr, // Store the keyring - rqstore: rqstore, - }, nil + store: store, + metaStore: meta, + config: config, + lumeraClient: lumeraClient, + keyring: kr, // Store the keyring + rqstore: rqstore, + }, nil } // LocalStore store data into the kademlia network diff --git a/pkg/cascadekit/doc.go b/pkg/cascadekit/doc.go index ab8ce081..5fa61f7b 100644 --- a/pkg/cascadekit/doc.go +++ b/pkg/cascadekit/doc.go @@ -14,4 +14,3 @@ // - No logging; keep functions small and deterministic // - No orchestration helpers; this package exposes building blocks only package cascadekit - diff --git a/pkg/cascadekit/index.go b/pkg/cascadekit/index.go index bd9c040d..e0cb3dce 100644 --- a/pkg/cascadekit/index.go +++ b/pkg/cascadekit/index.go @@ -1,11 +1,11 @@ package cascadekit import ( - "encoding/base64" - "encoding/json" - "strings" + "encoding/base64" + "encoding/json" + "strings" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" ) // SeparatorByte is the '.' separator used when composing payloads with counters. @@ -14,50 +14,49 @@ const SeparatorByte byte = 46 // IndexFile represents the structure of the index file referenced on-chain. // The JSON fields must match the existing format. type IndexFile struct { - Version int `json:"version,omitempty"` - LayoutIDs []string `json:"layout_ids"` - LayoutSignature string `json:"layout_signature"` + Version int `json:"version,omitempty"` + LayoutIDs []string `json:"layout_ids"` + LayoutSignature string `json:"layout_signature"` } // BuildIndex creates an IndexFile from layout IDs and the layout signature. func BuildIndex(layoutIDs []string, layoutSigB64 string) IndexFile { - return IndexFile{LayoutIDs: layoutIDs, LayoutSignature: layoutSigB64} + return IndexFile{LayoutIDs: layoutIDs, LayoutSignature: layoutSigB64} } // EncodeIndexB64 marshals an index file and returns both the raw JSON and base64. func EncodeIndexB64(idx IndexFile) (b64 string, raw []byte, err error) { - raw, err = json.Marshal(idx) - if err != nil { - return "", nil, errors.Errorf("marshal index file: %w", err) - } - return base64.StdEncoding.EncodeToString(raw), raw, nil + raw, err = json.Marshal(idx) + if err != nil { + return "", nil, errors.Errorf("marshal index file: %w", err) + } + return base64.StdEncoding.EncodeToString(raw), raw, nil } // DecodeIndexB64 decodes base64(JSON(IndexFile)). func DecodeIndexB64(data string) (IndexFile, error) { - var indexFile IndexFile - decodedData, err := base64.StdEncoding.DecodeString(data) - if err != nil { - return indexFile, errors.Errorf("failed to decode index file: %w", err) - } - if err := json.Unmarshal(decodedData, &indexFile); err != nil { - return indexFile, errors.Errorf("failed to unmarshal index file: %w", err) - } - return indexFile, nil + var indexFile IndexFile + decodedData, err := base64.StdEncoding.DecodeString(data) + if err != nil { + return indexFile, errors.Errorf("failed to decode index file: %w", err) + } + if err := json.Unmarshal(decodedData, &indexFile); err != nil { + return indexFile, errors.Errorf("failed to unmarshal index file: %w", err) + } + return indexFile, nil } // ExtractIndexAndCreatorSig splits a signatures string formatted as: // Base64(index_json).Base64(creator_signature) func ExtractIndexAndCreatorSig(signatures string) (indexB64 string, creatorSigB64 string, err error) { - parts := strings.Split(signatures, ".") - if len(parts) < 2 { - return "", "", errors.New("invalid signatures format") - } - return parts[0], parts[1], nil + parts := strings.Split(signatures, ".") + if len(parts) < 2 { + return "", "", errors.New("invalid signatures format") + } + return parts[0], parts[1], nil } // MakeSignatureFormat composes the final signatures string. func MakeSignatureFormat(indexB64, creatorSigB64 string) string { - return indexB64 + "." + creatorSigB64 + return indexB64 + "." + creatorSigB64 } - diff --git a/pkg/cascadekit/index_parse.go b/pkg/cascadekit/index_parse.go index 9629398e..0fbf3dca 100644 --- a/pkg/cascadekit/index_parse.go +++ b/pkg/cascadekit/index_parse.go @@ -1,23 +1,22 @@ package cascadekit import ( - "bytes" + "bytes" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" ) // ParseCompressedIndexFile parses a compressed index file into an IndexFile. // The compressed format is: base64(IndexJSON).creator_signature.counter func ParseCompressedIndexFile(data []byte) (IndexFile, error) { - decompressed, err := utils.ZstdDecompress(data) - if err != nil { - return IndexFile{}, errors.Errorf("decompress index file: %w", err) - } - parts := bytes.Split(decompressed, []byte{SeparatorByte}) - if len(parts) < 2 { - return IndexFile{}, errors.New("invalid index file format") - } - return DecodeIndexB64(string(parts[0])) + decompressed, err := utils.ZstdDecompress(data) + if err != nil { + return IndexFile{}, errors.Errorf("decompress index file: %w", err) + } + parts := bytes.Split(decompressed, []byte{SeparatorByte}) + if len(parts) < 2 { + return IndexFile{}, errors.New("invalid index file format") + } + return DecodeIndexB64(string(parts[0])) } - diff --git a/pkg/cascadekit/metadata.go b/pkg/cascadekit/metadata.go index 79969280..534ef793 100644 --- a/pkg/cascadekit/metadata.go +++ b/pkg/cascadekit/metadata.go @@ -1,18 +1,17 @@ package cascadekit import ( - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" ) // NewCascadeMetadata creates a types.CascadeMetadata for RequestAction. // The keeper will populate rq_ids_max; rq_ids_ids is for FinalizeAction only. func NewCascadeMetadata(dataHashB64, fileName string, rqIdsIc uint64, signatures string, public bool) actiontypes.CascadeMetadata { - return actiontypes.CascadeMetadata{ - DataHash: dataHashB64, - FileName: fileName, - RqIdsIc: rqIdsIc, - Signatures: signatures, - Public: public, - } + return actiontypes.CascadeMetadata{ + DataHash: dataHashB64, + FileName: fileName, + RqIdsIc: rqIdsIc, + Signatures: signatures, + Public: public, + } } - diff --git a/pkg/cascadekit/metadata_helpers.go b/pkg/cascadekit/metadata_helpers.go index c98aaa83..94a20442 100644 --- a/pkg/cascadekit/metadata_helpers.go +++ b/pkg/cascadekit/metadata_helpers.go @@ -1,27 +1,26 @@ package cascadekit import ( - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/golang/protobuf/proto" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/golang/protobuf/proto" ) // UnmarshalCascadeMetadata decodes action metadata bytes into CascadeMetadata. func UnmarshalCascadeMetadata(raw []byte) (actiontypes.CascadeMetadata, error) { - var meta actiontypes.CascadeMetadata - if err := proto.Unmarshal(raw, &meta); err != nil { - return meta, errors.Errorf("failed to unmarshal cascade metadata: %w", err) - } - return meta, nil + var meta actiontypes.CascadeMetadata + if err := proto.Unmarshal(raw, &meta); err != nil { + return meta, errors.Errorf("failed to unmarshal cascade metadata: %w", err) + } + return meta, nil } // VerifyB64DataHash compares a raw hash with an expected base64 string. func VerifyB64DataHash(raw []byte, expectedB64 string) error { - b64 := utils.B64Encode(raw) - if string(b64) != expectedB64 { - return errors.New("data hash doesn't match") - } - return nil + b64 := utils.B64Encode(raw) + if string(b64) != expectedB64 { + return errors.New("data hash doesn't match") + } + return nil } - diff --git a/pkg/cascadekit/parsers.go b/pkg/cascadekit/parsers.go index ed8e270b..be950e4f 100644 --- a/pkg/cascadekit/parsers.go +++ b/pkg/cascadekit/parsers.go @@ -1,40 +1,39 @@ package cascadekit import ( - "bytes" + "bytes" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - json "github.com/json-iterator/go" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" + json "github.com/json-iterator/go" ) // ParseRQMetadataFile parses a compressed rq metadata file into layout, signature and counter. // File format: base64(JSON(layout)).signature.counter (all parts separated by '.') func ParseRQMetadataFile(data []byte) (layout codec.Layout, signature string, counter string, err error) { - decompressed, err := utils.ZstdDecompress(data) - if err != nil { - return layout, "", "", errors.Errorf("decompress rq metadata file: %w", err) - } - - // base64EncodeMetadata.Signature.Counter - parts := bytes.Split(decompressed, []byte{SeparatorByte}) - if len(parts) != 3 { - return layout, "", "", errors.New("invalid rq metadata format: expecting 3 parts (layout, signature, counter)") - } - - layoutJson, err := utils.B64Decode(parts[0]) - if err != nil { - return layout, "", "", errors.Errorf("base64 decode failed: %w", err) - } - - if err := json.Unmarshal(layoutJson, &layout); err != nil { - return layout, "", "", errors.Errorf("unmarshal layout: %w", err) - } - - signature = string(parts[1]) - counter = string(parts[2]) - - return layout, signature, counter, nil + decompressed, err := utils.ZstdDecompress(data) + if err != nil { + return layout, "", "", errors.Errorf("decompress rq metadata file: %w", err) + } + + // base64EncodeMetadata.Signature.Counter + parts := bytes.Split(decompressed, []byte{SeparatorByte}) + if len(parts) != 3 { + return layout, "", "", errors.New("invalid rq metadata format: expecting 3 parts (layout, signature, counter)") + } + + layoutJson, err := utils.B64Decode(parts[0]) + if err != nil { + return layout, "", "", errors.Errorf("base64 decode failed: %w", err) + } + + if err := json.Unmarshal(layoutJson, &layout); err != nil { + return layout, "", "", errors.Errorf("unmarshal layout: %w", err) + } + + signature = string(parts[1]) + counter = string(parts[2]) + + return layout, signature, counter, nil } - diff --git a/pkg/cascadekit/rqid.go b/pkg/cascadekit/rqid.go index 97066b11..3a05eb94 100644 --- a/pkg/cascadekit/rqid.go +++ b/pkg/cascadekit/rqid.go @@ -1,62 +1,62 @@ package cascadekit import ( - "context" - "encoding/json" + "context" + "encoding/json" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" ) // GenRQIdentifiersFilesResponse groups the generated files and their IDs. type GenRQIdentifiersFilesResponse struct { - // IDs of the Redundant Metadata Files -- len(RQIDs) == len(RedundantMetadataFiles) - RQIDs []string - // RedundantMetadataFiles is a list of redundant files generated from the Metadata file - RedundantMetadataFiles [][]byte + // IDs of the Redundant Metadata Files -- len(RQIDs) == len(RedundantMetadataFiles) + RQIDs []string + // RedundantMetadataFiles is a list of redundant files generated from the Metadata file + RedundantMetadataFiles [][]byte } // GenerateLayoutFiles builds redundant metadata files from layout and signature. // The content is: base64(JSON(layout)).layout_signature func GenerateLayoutFiles(ctx context.Context, layout codec.Layout, layoutSigB64 string, ic uint32, max uint32) (GenRQIdentifiersFilesResponse, error) { - // Validate single-block to match package invariant - if len(layout.Blocks) != 1 { - return GenRQIdentifiersFilesResponse{}, errors.New("layout must contain exactly one block") - } - - metadataFile, err := jsonMarshal(layout) - if err != nil { - return GenRQIdentifiersFilesResponse{}, errors.Errorf("marshal layout: %w", err) - } - b64Encoded := utils.B64Encode(metadataFile) - - // Compose: base64(JSON(layout)).layout_signature - enc := make([]byte, 0, len(b64Encoded)+1+len(layoutSigB64)) - enc = append(enc, b64Encoded...) - enc = append(enc, SeparatorByte) - enc = append(enc, []byte(layoutSigB64)...) - - ids, files, err := getIDFiles(enc, ic, max) - if err != nil { - return GenRQIdentifiersFilesResponse{}, errors.Errorf("get ID Files: %w", err) - } - - return GenRQIdentifiersFilesResponse{ - RedundantMetadataFiles: files, - RQIDs: ids, - }, nil + // Validate single-block to match package invariant + if len(layout.Blocks) != 1 { + return GenRQIdentifiersFilesResponse{}, errors.New("layout must contain exactly one block") + } + + metadataFile, err := jsonMarshal(layout) + if err != nil { + return GenRQIdentifiersFilesResponse{}, errors.Errorf("marshal layout: %w", err) + } + b64Encoded := utils.B64Encode(metadataFile) + + // Compose: base64(JSON(layout)).layout_signature + enc := make([]byte, 0, len(b64Encoded)+1+len(layoutSigB64)) + enc = append(enc, b64Encoded...) + enc = append(enc, SeparatorByte) + enc = append(enc, []byte(layoutSigB64)...) + + ids, files, err := getIDFiles(enc, ic, max) + if err != nil { + return GenRQIdentifiersFilesResponse{}, errors.Errorf("get ID Files: %w", err) + } + + return GenRQIdentifiersFilesResponse{ + RedundantMetadataFiles: files, + RQIDs: ids, + }, nil } // GenerateIndexFiles generates index files and their IDs from the full signatures format. func GenerateIndexFiles(ctx context.Context, signaturesFormat string, ic uint32, max uint32) (indexIDs []string, indexFiles [][]byte, err error) { - // Use the full signatures format that matches what was sent during RequestAction - // The chain expects this exact format for ID generation - indexIDs, indexFiles, err = getIDFiles([]byte(signaturesFormat), ic, max) - if err != nil { - return nil, nil, errors.Errorf("get index ID files: %w", err) - } - return indexIDs, indexFiles, nil + // Use the full signatures format that matches what was sent during RequestAction + // The chain expects this exact format for ID generation + indexIDs, indexFiles, err = getIDFiles([]byte(signaturesFormat), ic, max) + if err != nil { + return nil, nil, errors.Errorf("get index ID files: %w", err) + } + return indexIDs, indexFiles, nil } // jsonMarshal marshals a value to JSON. diff --git a/pkg/cascadekit/signatures.go b/pkg/cascadekit/signatures.go index 6653c5bc..0c71e492 100644 --- a/pkg/cascadekit/signatures.go +++ b/pkg/cascadekit/signatures.go @@ -1,12 +1,12 @@ package cascadekit import ( - "encoding/base64" - "encoding/json" - "fmt" + "encoding/base64" + "encoding/json" + "fmt" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" ) // Signer is a function that signs the provided message and returns the raw signature bytes. @@ -15,52 +15,53 @@ type Signer func(msg []byte) ([]byte, error) // SignLayoutB64 validates single-block layout, marshals to JSON, base64-encodes it, // and signs the base64 payload, returning both the layout base64 and signature base64. func SignLayoutB64(layout codec.Layout, signer Signer) (layoutB64 string, layoutSigB64 string, err error) { - if len(layout.Blocks) != 1 { - return "", "", errors.New("layout must contain exactly one block") - } + if len(layout.Blocks) != 1 { + return "", "", errors.New("layout must contain exactly one block") + } - me, err := json.Marshal(layout) - if err != nil { - return "", "", errors.Errorf("marshal layout: %w", err) - } - layoutB64 = base64.StdEncoding.EncodeToString(me) + me, err := json.Marshal(layout) + if err != nil { + return "", "", errors.Errorf("marshal layout: %w", err) + } + layoutB64 = base64.StdEncoding.EncodeToString(me) - sig, err := signer([]byte(layoutB64)) - if err != nil { - return "", "", errors.Errorf("sign layout: %w", err) - } - layoutSigB64 = base64.StdEncoding.EncodeToString(sig) - return layoutB64, layoutSigB64, nil + sig, err := signer([]byte(layoutB64)) + if err != nil { + return "", "", errors.Errorf("sign layout: %w", err) + } + layoutSigB64 = base64.StdEncoding.EncodeToString(sig) + return layoutB64, layoutSigB64, nil } // CreateSignatures reproduces the cascade signature format and index IDs: -// Base64(index_json).Base64(creator_signature) +// +// Base64(index_json).Base64(creator_signature) +// // It validates the layout has exactly one block. func CreateSignatures(layout codec.Layout, signer Signer, ic, max uint32) (signatures string, indexIDs []string, err error) { - layoutB64, layoutSigB64, err := SignLayoutB64(layout, signer) - if err != nil { - return "", nil, err - } + layoutB64, layoutSigB64, err := SignLayoutB64(layout, signer) + if err != nil { + return "", nil, err + } - // Generate layout IDs (not returned; used to populate the index file) - layoutIDs := GenerateLayoutIDs(layoutB64, layoutSigB64, ic, max) + // Generate layout IDs (not returned; used to populate the index file) + layoutIDs := GenerateLayoutIDs(layoutB64, layoutSigB64, ic, max) - // Build and sign the index file - idx := BuildIndex(layoutIDs, layoutSigB64) - indexB64, _, err := EncodeIndexB64(idx) - if err != nil { - return "", nil, err - } + // Build and sign the index file + idx := BuildIndex(layoutIDs, layoutSigB64) + indexB64, _, err := EncodeIndexB64(idx) + if err != nil { + return "", nil, err + } - creatorSig, err := signer([]byte(indexB64)) - if err != nil { - return "", nil, errors.Errorf("sign index: %w", err) - } - creatorSigB64 := base64.StdEncoding.EncodeToString(creatorSig) - signatures = fmt.Sprintf("%s.%s", indexB64, creatorSigB64) + creatorSig, err := signer([]byte(indexB64)) + if err != nil { + return "", nil, errors.Errorf("sign index: %w", err) + } + creatorSigB64 := base64.StdEncoding.EncodeToString(creatorSig) + signatures = fmt.Sprintf("%s.%s", indexB64, creatorSigB64) - // Generate the index IDs (these are the RQIDs sent to chain) - indexIDs = GenerateIndexIDs(signatures, ic, max) - return signatures, indexIDs, nil + // Generate the index IDs (these are the RQIDs sent to chain) + indexIDs = GenerateIndexIDs(signatures, ic, max) + return signatures, indexIDs, nil } - diff --git a/pkg/cascadekit/verify.go b/pkg/cascadekit/verify.go index 4e7217be..5c4ff8a4 100644 --- a/pkg/cascadekit/verify.go +++ b/pkg/cascadekit/verify.go @@ -1,23 +1,22 @@ package cascadekit import ( - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" ) // VerifySingleBlockIDs enforces single-block layouts and verifies that the // symbols and block hash of ticket and local layouts match for block 0. func VerifySingleBlockIDs(ticket, local codec.Layout) error { - if len(ticket.Blocks) != 1 || len(local.Blocks) != 1 { - return errors.New("layout must contain exactly one block") - } - if err := utils.EqualStrList(ticket.Blocks[0].Symbols, local.Blocks[0].Symbols); err != nil { - return errors.Errorf("symbol identifiers don't match: %w", err) - } - if ticket.Blocks[0].Hash != local.Blocks[0].Hash { - return errors.New("block hashes don't match") - } - return nil + if len(ticket.Blocks) != 1 || len(local.Blocks) != 1 { + return errors.New("layout must contain exactly one block") + } + if err := utils.EqualStrList(ticket.Blocks[0].Symbols, local.Blocks[0].Symbols); err != nil { + return errors.Errorf("symbol identifiers don't match: %w", err) + } + if ticket.Blocks[0].Hash != local.Blocks[0].Hash { + return errors.New("block hashes don't match") + } + return nil } - diff --git a/pkg/codec/codec.go b/pkg/codec/codec.go index 39029569..cd751a79 100644 --- a/pkg/codec/codec.go +++ b/pkg/codec/codec.go @@ -1,5 +1,3 @@ -//go:generate mockgen -destination=codec_mock.go -package=codec -source=codec.go - package codec import ( @@ -38,4 +36,7 @@ type Codec interface { // Encode a file Encode(ctx context.Context, req EncodeRequest) (EncodeResponse, error) Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) + // CreateMetadata builds the single-block layout metadata for the given file + // without generating RaptorQ symbols. + CreateMetadata(ctx context.Context, path string) (Layout, error) } diff --git a/pkg/codec/codec_mock.go b/pkg/codec/codec_mock.go deleted file mode 100644 index 09484cee..00000000 --- a/pkg/codec/codec_mock.go +++ /dev/null @@ -1,65 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: codec.go - -// Package codec is a generated GoMock package. -package codec - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" -) - -// MockCodec is a mock of Codec interface. -type MockCodec struct { - ctrl *gomock.Controller - recorder *MockCodecMockRecorder -} - -// MockCodecMockRecorder is the mock recorder for MockCodec. -type MockCodecMockRecorder struct { - mock *MockCodec -} - -// NewMockCodec creates a new mock instance. -func NewMockCodec(ctrl *gomock.Controller) *MockCodec { - mock := &MockCodec{ctrl: ctrl} - mock.recorder = &MockCodecMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockCodec) EXPECT() *MockCodecMockRecorder { - return m.recorder -} - -// Decode mocks base method. -func (m *MockCodec) Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Decode", ctx, req) - ret0, _ := ret[0].(DecodeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Decode indicates an expected call of Decode. -func (mr *MockCodecMockRecorder) Decode(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Decode", reflect.TypeOf((*MockCodec)(nil).Decode), ctx, req) -} - -// Encode mocks base method. -func (m *MockCodec) Encode(ctx context.Context, req EncodeRequest) (EncodeResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Encode", ctx, req) - ret0, _ := ret[0].(EncodeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Encode indicates an expected call of Encode. -func (mr *MockCodecMockRecorder) Encode(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Encode", reflect.TypeOf((*MockCodec)(nil).Encode), ctx, req) -} diff --git a/pkg/codec/raptorq.go b/pkg/codec/raptorq.go index 541aac58..14bad1d9 100644 --- a/pkg/codec/raptorq.go +++ b/pkg/codec/raptorq.go @@ -57,7 +57,6 @@ func (rq *raptorQ) Encode(ctx context.Context, req EncodeRequest) (EncodeRespons symbolsDir := filepath.Join(rq.symbolsBaseDir, req.TaskID) if err := os.MkdirAll(symbolsDir, 0o755); err != nil { fields[logtrace.FieldError] = err.Error() - os.Remove(req.Path) return EncodeResponse{}, fmt.Errorf("mkdir %s: %w", symbolsDir, err) } logtrace.Debug(ctx, "RaptorQ processor encoding", fields) @@ -65,13 +64,9 @@ func (rq *raptorQ) Encode(ctx context.Context, req EncodeRequest) (EncodeRespons resp, err := processor.EncodeFile(req.Path, symbolsDir, blockSize) if err != nil { fields[logtrace.FieldError] = err.Error() - os.Remove(req.Path) return EncodeResponse{}, fmt.Errorf("raptorq encode: %w", err) } - /* we no longer need the temp file */ - // _ = os.Remove(tmpPath) - /* ---------- 2. read the layout JSON ---------- */ layoutData, err := os.ReadFile(resp.LayoutFilePath) logtrace.Debug(ctx, "RaptorQ processor layout file", logtrace.Fields{ @@ -94,3 +89,64 @@ func (rq *raptorQ) Encode(ctx context.Context, req EncodeRequest) (EncodeRespons return encodeResp, nil } + +// CreateMetadata builds only the layout metadata for the given file without generating symbols. +func (rq *raptorQ) CreateMetadata(ctx context.Context, path string) (Layout, error) { + // Populate fields; include data-size by stat-ing the file to preserve existing log fields + fields := logtrace.Fields{ + logtrace.FieldMethod: "CreateMetadata", + logtrace.FieldModule: "rq", + "path": path, + } + if fi, err := os.Stat(path); err == nil { + fields["data-size"] = int(fi.Size()) + } + + processor, err := raptorq.NewRaptorQProcessor(rqSymbolSize, rqRedundancyFactor, rqMaxMemoryMB, rqConcurrency) + if err != nil { + return Layout{}, fmt.Errorf("create RaptorQ processor: %w", err) + } + defer processor.Free() + logtrace.Debug(ctx, "RaptorQ processor created", fields) + + // Deterministic: force single block + blockSize := rqBlockSize + + // Prepare a temporary path for the generated layout file + base := rq.symbolsBaseDir + if base == "" { + base = os.TempDir() + } + tmpDir, err := os.MkdirTemp(base, "rq_meta_*") + if err != nil { + fields[logtrace.FieldError] = err.Error() + return Layout{}, fmt.Errorf("mkdir temp dir: %w", err) + } + defer os.RemoveAll(tmpDir) + layoutPath := filepath.Join(tmpDir, "layout.json") + + // Use rq-go's metadata-only creation; no symbols are produced here. + resp, err := processor.CreateMetadata(path, layoutPath, blockSize) + if err != nil { + fields[logtrace.FieldError] = err.Error() + return Layout{}, fmt.Errorf("raptorq create metadata: %w", err) + } + + layoutData, err := os.ReadFile(resp.LayoutFilePath) + if err != nil { + fields[logtrace.FieldError] = err.Error() + return Layout{}, fmt.Errorf("read layout %s: %w", resp.LayoutFilePath, err) + } + + var layout Layout + if err := json.Unmarshal(layoutData, &layout); err != nil { + return Layout{}, fmt.Errorf("unmarshal layout: %w", err) + } + + // Enforce single-block output; abort if multiple blocks are produced + if n := len(layout.Blocks); n != 1 { + return Layout{}, fmt.Errorf("raptorq metadata produced %d blocks; single-block layout is required", n) + } + + return layout, nil +} diff --git a/pkg/logtrace/datadog.go b/pkg/logtrace/datadog.go index 6fb0ba86..5c739d12 100644 --- a/pkg/logtrace/datadog.go +++ b/pkg/logtrace/datadog.go @@ -116,14 +116,14 @@ func ddForward(level zapcore.Level, ctx context.Context, msg string, fields Fiel for k, v := range fields { attrs[k] = v } - // Attach correlation ID if present - if cid := extractCorrelationID(ctx); cid != "unknown" { - attrs["correlation_id"] = cid - } - // Attach origin/phase if present (first_pass | worker | download) - if o := OriginFromContext(ctx); o != "" { - attrs["origin"] = o - } + // Attach correlation ID if present + if cid := extractCorrelationID(ctx); cid != "unknown" { + attrs["correlation_id"] = cid + } + // Attach origin/phase if present (first_pass | worker | download) + if o := OriginFromContext(ctx); o != "" { + attrs["origin"] = o + } entry := map[string]any{ "message": msg, diff --git a/pkg/logtrace/log.go b/pkg/logtrace/log.go index 469b32e8..6e27b020 100644 --- a/pkg/logtrace/log.go +++ b/pkg/logtrace/log.go @@ -62,7 +62,7 @@ func Setup(serviceName string) { // getLogLevel returns the log level from environment variable LOG_LEVEL func getLogLevel() zapcore.Level { - levelStr := "info" + levelStr := strings.ToLower(os.Getenv("LOG_LEVEL")) switch levelStr { case "debug": return zapcore.DebugLevel diff --git a/pkg/lumera/modules/action_msg/helpers.go b/pkg/lumera/modules/action_msg/helpers.go index 6de5fb9f..b3b44193 100644 --- a/pkg/lumera/modules/action_msg/helpers.go +++ b/pkg/lumera/modules/action_msg/helpers.go @@ -1,58 +1,58 @@ package action_msg import ( - "fmt" - "strconv" - "time" + "fmt" + "strconv" + "time" - actionapi "github.com/LumeraProtocol/lumera/api/lumera/action" - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera/util" - "google.golang.org/protobuf/encoding/protojson" + actionapi "github.com/LumeraProtocol/lumera/api/lumera/action" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/util" + "google.golang.org/protobuf/encoding/protojson" ) func validateRequestActionParams(actionType, metadata, price, expirationTime string) error { - if actionType == "" { - return fmt.Errorf("action type cannot be empty") - } - if metadata == "" { - return fmt.Errorf("metadata cannot be empty") - } - if price == "" { - return fmt.Errorf("price cannot be empty") - } - // Validate price: must be integer coin in ulume (e.g., "1000ulume") - if err := util.ValidateUlumeIntCoin(price); err != nil { - return fmt.Errorf("invalid price: %w", err) - } - if expirationTime == "" { - return fmt.Errorf("expiration time cannot be empty") - } - // Validate expiration is a future unix timestamp - exp, err := strconv.ParseInt(expirationTime, 10, 64) - if err != nil { - return fmt.Errorf("invalid expirationTime: %w", err) - } - // Allow small clock skew; require strictly in the future - if exp <= time.Now().Add(30*time.Second).Unix() { - return fmt.Errorf("expiration time must be in the future") - } - return nil + if actionType == "" { + return fmt.Errorf("action type cannot be empty") + } + if metadata == "" { + return fmt.Errorf("metadata cannot be empty") + } + if price == "" { + return fmt.Errorf("price cannot be empty") + } + // Validate price: must be integer coin in ulume (e.g., "1000ulume") + if err := util.ValidateUlumeIntCoin(price); err != nil { + return fmt.Errorf("invalid price: %w", err) + } + if expirationTime == "" { + return fmt.Errorf("expiration time cannot be empty") + } + // Validate expiration is a future unix timestamp + exp, err := strconv.ParseInt(expirationTime, 10, 64) + if err != nil { + return fmt.Errorf("invalid expirationTime: %w", err) + } + // Allow small clock skew; require strictly in the future + if exp <= time.Now().Add(30*time.Second).Unix() { + return fmt.Errorf("expiration time must be in the future") + } + return nil } func validateFinalizeActionParams(actionId string, rqIdsIds []string) error { - if actionId == "" { - return fmt.Errorf("action ID cannot be empty") - } - if len(rqIdsIds) == 0 { - return fmt.Errorf("rq_ids_ids cannot be empty for cascade action") - } - for i, s := range rqIdsIds { - if s == "" { - return fmt.Errorf("rq_ids_ids[%d] cannot be empty", i) - } - } - return nil + if actionId == "" { + return fmt.Errorf("action ID cannot be empty") + } + if len(rqIdsIds) == 0 { + return fmt.Errorf("rq_ids_ids cannot be empty for cascade action") + } + for i, s := range rqIdsIds { + if s == "" { + return fmt.Errorf("rq_ids_ids[%d] cannot be empty", i) + } + } + return nil } func createRequestActionMessage(creator, actionType, metadata, price, expirationTime string) *actiontypes.MsgRequestAction { diff --git a/pkg/lumera/util/coin.go b/pkg/lumera/util/coin.go index 561f5560..6632c1fe 100644 --- a/pkg/lumera/util/coin.go +++ b/pkg/lumera/util/coin.go @@ -1,35 +1,34 @@ package util import ( - "fmt" - "strings" + "fmt" + "strings" ) // ValidateUlumeIntCoin checks that the input is a positive integer amount // with the 'ulume' denom, e.g., "1000ulume". It keeps validation simple // without pulling in SDK dependencies. func ValidateUlumeIntCoin(s string) error { - const denom = "ulume" - if !strings.HasSuffix(s, denom) { - return fmt.Errorf("denom must be '%s'", denom) - } - num := s[:len(s)-len(denom)] - if num == "" { - return fmt.Errorf("amount is required before denom") - } - // must be all digits, no leading +/-, no decimals - var val uint64 - for i := 0; i < len(num); i++ { - c := num[i] - if c < '0' || c > '9' { - return fmt.Errorf("amount must be an integer number") - } - // simple overflow-safe accumulation for uint64 - val = val*10 + uint64(c-'0') - } - if val == 0 { - return fmt.Errorf("amount must be greater than zero") - } - return nil + const denom = "ulume" + if !strings.HasSuffix(s, denom) { + return fmt.Errorf("denom must be '%s'", denom) + } + num := s[:len(s)-len(denom)] + if num == "" { + return fmt.Errorf("amount is required before denom") + } + // must be all digits, no leading +/-, no decimals + var val uint64 + for i := 0; i < len(num); i++ { + c := num[i] + if c < '0' || c > '9' { + return fmt.Errorf("amount must be an integer number") + } + // simple overflow-safe accumulation for uint64 + val = val*10 + uint64(c-'0') + } + if val == 0 { + return fmt.Errorf("amount must be greater than zero") + } + return nil } - diff --git a/sdk/action/client.go b/sdk/action/client.go index 02f2bafc..596e80f9 100644 --- a/sdk/action/client.go +++ b/sdk/action/client.go @@ -48,7 +48,7 @@ type Client interface { BuildCascadeMetadataFromFile(ctx context.Context, filePath string, public bool) (actiontypes.CascadeMetadata, string, string, error) // GenerateStartCascadeSignatureFromFile computes blake3(file) and signs it with the configured key; returns base64 signature. GenerateStartCascadeSignatureFromFile(ctx context.Context, filePath string) (string, error) - // GenerateDownloadSignature signs the payload "actionID.creatorAddress"; returns base64 signature. + // GenerateDownloadSignature signs the payload "actionID" and returns a base64 signature. GenerateDownloadSignature(ctx context.Context, actionID, creatorAddr string) (string, error) } @@ -239,8 +239,8 @@ func (c *ClientImpl) DownloadCascade(ctx context.Context, actionID, outputDir, s } // BuildCascadeMetadataFromFile produces Cascade metadata (including signatures) from a local file path. -// It uses a temporary RaptorQ workspace, enforces single-block layout via the codec, and cleans up after. -// BuildCascadeMetadataFromFile builds Cascade metadata, price and expiration from a file path. +// It generates only the single-block RaptorQ layout metadata (no symbols), signs it, +// and returns metadata, price and expiration. func (c *ClientImpl) BuildCascadeMetadataFromFile(ctx context.Context, filePath string, public bool) (actiontypes.CascadeMetadata, string, string, error) { if filePath == "" { return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("file path is empty") @@ -254,19 +254,11 @@ func (c *ClientImpl) BuildCascadeMetadataFromFile(ctx context.Context, filePath return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("read file: %w", err) } - // Create temp workspace for codec symbols; remove after - baseDir, err := os.MkdirTemp("", "rq_files_*") + // Build layout metadata only (no symbols). Supernodes will create symbols. + rq := codec.NewRaptorQCodec("") + layout, err := rq.CreateMetadata(ctx, filePath) if err != nil { - return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("create temp dir: %w", err) - } - defer os.RemoveAll(baseDir) - - rq := codec.NewRaptorQCodec(baseDir) - // Use a simple task ID with epoch to avoid collisions - taskID := fmt.Sprintf("sdk-%d", time.Now().UnixNano()) - enc, err := rq.Encode(ctx, codec.EncodeRequest{TaskID: taskID, Path: filePath, DataSize: int(fi.Size())}) - if err != nil { - return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("raptorq encode: %w", err) + return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("raptorq create metadata: %w", err) } // Derive `max` from chain params, then create signatures and index IDs @@ -274,10 +266,10 @@ func (c *ClientImpl) BuildCascadeMetadataFromFile(ctx context.Context, filePath if err != nil { return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("get action params: %w", err) } - // Use MaxDdAndFingerprints as the count for rq_ids generation (chain maps this to rq_ids_max for Cascade) + // Use MaxRaptorQSymbols as the count for rq_ids generation. var max uint32 - if paramsResp != nil && paramsResp.Params.MaxDdAndFingerprints > 0 { - max = uint32(paramsResp.Params.MaxDdAndFingerprints) + if paramsResp != nil && paramsResp.Params.MaxRaptorQSymbols > 0 { + max = uint32(paramsResp.Params.MaxRaptorQSymbols) } else { // Fallback to a sane default if params missing max = 50 @@ -285,7 +277,7 @@ func (c *ClientImpl) BuildCascadeMetadataFromFile(ctx context.Context, filePath // Pick a random initial counter in [1,100] rnd, _ := crand.Int(crand.Reader, big.NewInt(100)) ic := uint32(rnd.Int64() + 1) // 1..100 - signatures, _, err := cascadekit.CreateSignaturesWithKeyring(enc.Metadata, c.keyring, c.config.Account.KeyName, ic, max) + signatures, _, err := cascadekit.CreateSignaturesWithKeyring(layout, c.keyring, c.config.Account.KeyName, ic, max) if err != nil { return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("create signatures: %w", err) } diff --git a/sdk/adapters/lumera/adapter.go b/sdk/adapters/lumera/adapter.go index bacf8cd2..042c2273 100644 --- a/sdk/adapters/lumera/adapter.go +++ b/sdk/adapters/lumera/adapter.go @@ -26,12 +26,12 @@ type Client interface { GetSupernodeWithLatestAddress(ctx context.Context, address string) (*SuperNodeInfo, error) DecodeCascadeMetadata(ctx context.Context, action Action) (actiontypes.CascadeMetadata, error) VerifySignature(ctx context.Context, accountAddr string, data []byte, signature []byte) error - // GetBalance returns the bank balance for the given address and denom. - GetBalance(ctx context.Context, address string, denom string) (*banktypes.QueryBalanceResponse, error) - // GetActionParams returns the action module parameters. - GetActionParams(ctx context.Context) (*actiontypes.QueryParamsResponse, error) - // GetActionFee returns the fee amount for a given data size (in KB) for RequestAction. - GetActionFee(ctx context.Context, dataSizeKB string) (*actiontypes.QueryGetActionFeeResponse, error) + // GetBalance returns the bank balance for the given address and denom. + GetBalance(ctx context.Context, address string, denom string) (*banktypes.QueryBalanceResponse, error) + // GetActionParams returns the action module parameters. + GetActionParams(ctx context.Context) (*actiontypes.QueryParamsResponse, error) + // GetActionFee returns the fee amount for a given data size (in KB) for RequestAction. + GetActionFee(ctx context.Context, dataSizeKB string) (*actiontypes.QueryGetActionFeeResponse, error) } // SuperNodeInfo contains supernode information with latest address @@ -224,20 +224,20 @@ func (a *Adapter) VerifySignature(ctx context.Context, accountAddr string, data, // GetActionParams fetches the action module parameters via the underlying lumera client. func (a *Adapter) GetActionParams(ctx context.Context) (*actiontypes.QueryParamsResponse, error) { - resp, err := a.client.Action().GetParams(ctx) - if err != nil { - return nil, fmt.Errorf("get action params: %w", err) - } - return resp, nil + resp, err := a.client.Action().GetParams(ctx) + if err != nil { + return nil, fmt.Errorf("get action params: %w", err) + } + return resp, nil } // GetActionFee fetches the action fee for a given data size (in KB). func (a *Adapter) GetActionFee(ctx context.Context, dataSizeKB string) (*actiontypes.QueryGetActionFeeResponse, error) { - resp, err := a.client.Action().GetActionFee(ctx, dataSizeKB) - if err != nil { - return nil, fmt.Errorf("get action fee: %w", err) - } - return resp, nil + resp, err := a.client.Action().GetActionFee(ctx, dataSizeKB) + if err != nil { + return nil, fmt.Errorf("get action fee: %w", err) + } + return resp, nil } // GetBalance fetches the balance for a given address and denom via the underlying lumera client. diff --git a/sdk/adapters/supernodeservice/types.go b/sdk/adapters/supernodeservice/types.go index 89e04cae..1ba82c8f 100644 --- a/sdk/adapters/supernodeservice/types.go +++ b/sdk/adapters/supernodeservice/types.go @@ -1,12 +1,12 @@ package supernodeservice import ( - "context" + "context" - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" - "google.golang.org/grpc" + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + "google.golang.org/grpc" - "github.com/LumeraProtocol/supernode/v2/sdk/event" + "github.com/LumeraProtocol/supernode/v2/sdk/event" ) type LoggerFunc func( @@ -46,7 +46,7 @@ type CascadeSupernodeDownloadResponse struct { //go:generate mockery --name=CascadeServiceClient --output=testutil/mocks --outpkg=mocks --filename=cascade_service_mock.go type CascadeServiceClient interface { - CascadeSupernodeRegister(ctx context.Context, in *CascadeSupernodeRegisterRequest, opts ...grpc.CallOption) (*CascadeSupernodeRegisterResponse, error) - GetSupernodeStatus(ctx context.Context) (*pb.StatusResponse, error) - CascadeSupernodeDownload(ctx context.Context, in *CascadeSupernodeDownloadRequest, opts ...grpc.CallOption) (*CascadeSupernodeDownloadResponse, error) + CascadeSupernodeRegister(ctx context.Context, in *CascadeSupernodeRegisterRequest, opts ...grpc.CallOption) (*CascadeSupernodeRegisterResponse, error) + GetSupernodeStatus(ctx context.Context) (*pb.StatusResponse, error) + CascadeSupernodeDownload(ctx context.Context, in *CascadeSupernodeDownloadRequest, opts ...grpc.CallOption) (*CascadeSupernodeDownloadResponse, error) } diff --git a/sdk/event/keys.go b/sdk/event/keys.go index 04e27bd3..b138929f 100644 --- a/sdk/event/keys.go +++ b/sdk/event/keys.go @@ -30,5 +30,5 @@ const ( KeyTaskID EventDataKey = "task_id" KeyActionID EventDataKey = "action_id" - // Removed legacy cascade storage/retrieve metrics keys + // Removed legacy cascade storage/retrieve metrics keys ) diff --git a/sdk/event/types.go b/sdk/event/types.go index 635b1e2f..10f44856 100644 --- a/sdk/event/types.go +++ b/sdk/event/types.go @@ -14,7 +14,7 @@ type EventType string // These events are used to track the progress of tasks // and to notify subscribers about important changes in the system. const ( - SDKTaskStarted EventType = "sdk:started" + SDKTaskStarted EventType = "sdk:started" SDKSupernodesUnavailable EventType = "sdk:supernodes_unavailable" SDKSupernodesFound EventType = "sdk:supernodes_found" SDKRegistrationAttempt EventType = "sdk:registration_attempt" @@ -22,41 +22,41 @@ const ( SDKRegistrationSuccessful EventType = "sdk:registration_successful" SDKTaskTxHashReceived EventType = "sdk:txhash_received" SDKTaskCompleted EventType = "sdk:completed" - SDKTaskFailed EventType = "sdk:failed" - SDKConnectionEstablished EventType = "sdk:connection_established" - // Upload/processing phase events for cascade registration - SDKUploadStarted EventType = "sdk:upload_started" - SDKUploadCompleted EventType = "sdk:upload_completed" - SDKUploadFailed EventType = "sdk:upload_failed" // reason includes timeout - SDKProcessingStarted EventType = "sdk:processing_started" - SDKProcessingFailed EventType = "sdk:processing_failed" - SDKProcessingTimeout EventType = "sdk:processing_timeout" + SDKTaskFailed EventType = "sdk:failed" + SDKConnectionEstablished EventType = "sdk:connection_established" + // Upload/processing phase events for cascade registration + SDKUploadStarted EventType = "sdk:upload_started" + SDKUploadCompleted EventType = "sdk:upload_completed" + SDKUploadFailed EventType = "sdk:upload_failed" // reason includes timeout + SDKProcessingStarted EventType = "sdk:processing_started" + SDKProcessingFailed EventType = "sdk:processing_failed" + SDKProcessingTimeout EventType = "sdk:processing_timeout" - SDKDownloadAttempt EventType = "sdk:download_attempt" - SDKDownloadFailure EventType = "sdk:download_failure" - SDKDownloadStarted EventType = "sdk:download_started" - SDKDownloadCompleted EventType = "sdk:download_completed" + SDKDownloadAttempt EventType = "sdk:download_attempt" + SDKDownloadFailure EventType = "sdk:download_failure" + SDKDownloadStarted EventType = "sdk:download_started" + SDKDownloadCompleted EventType = "sdk:download_completed" ) const ( - SupernodeActionRetrieved EventType = "supernode:action_retrieved" - SupernodeActionFeeVerified EventType = "supernode:action_fee_verified" - SupernodeTopCheckPassed EventType = "supernode:top_check_passed" - SupernodeMetadataDecoded EventType = "supernode:metadata_decoded" - SupernodeDataHashVerified EventType = "supernode:data_hash_verified" - SupernodeInputEncoded EventType = "supernode:input_encoded" - SupernodeSignatureVerified EventType = "supernode:signature_verified" - SupernodeRQIDGenerated EventType = "supernode:rqid_generated" - SupernodeRQIDVerified EventType = "supernode:rqid_verified" - SupernodeFinalizeSimulated EventType = "supernode:finalize_simulated" - SupernodeArtefactsStored EventType = "supernode:artefacts_stored" - SupernodeActionFinalized EventType = "supernode:action_finalized" - SupernodeArtefactsDownloaded EventType = "supernode:artefacts_downloaded" - SupernodeNetworkRetrieveStarted EventType = "supernode:network_retrieve_started" - SupernodeDecodeCompleted EventType = "supernode:decode_completed" - SupernodeServeReady EventType = "supernode:serve_ready" - SupernodeUnknown EventType = "supernode:unknown" - SupernodeFinalizeSimulationFailed EventType = "supernode:finalize_simulation_failed" + SupernodeActionRetrieved EventType = "supernode:action_retrieved" + SupernodeActionFeeVerified EventType = "supernode:action_fee_verified" + SupernodeTopCheckPassed EventType = "supernode:top_check_passed" + SupernodeMetadataDecoded EventType = "supernode:metadata_decoded" + SupernodeDataHashVerified EventType = "supernode:data_hash_verified" + SupernodeInputEncoded EventType = "supernode:input_encoded" + SupernodeSignatureVerified EventType = "supernode:signature_verified" + SupernodeRQIDGenerated EventType = "supernode:rqid_generated" + SupernodeRQIDVerified EventType = "supernode:rqid_verified" + SupernodeFinalizeSimulated EventType = "supernode:finalize_simulated" + SupernodeArtefactsStored EventType = "supernode:artefacts_stored" + SupernodeActionFinalized EventType = "supernode:action_finalized" + SupernodeArtefactsDownloaded EventType = "supernode:artefacts_downloaded" + SupernodeNetworkRetrieveStarted EventType = "supernode:network_retrieve_started" + SupernodeDecodeCompleted EventType = "supernode:decode_completed" + SupernodeServeReady EventType = "supernode:serve_ready" + SupernodeUnknown EventType = "supernode:unknown" + SupernodeFinalizeSimulationFailed EventType = "supernode:finalize_simulation_failed" ) // EventData is a map of event data attributes using standardized keys diff --git a/sdk/net/client.go b/sdk/net/client.go index b88fe75b..96e5d7f5 100644 --- a/sdk/net/client.go +++ b/sdk/net/client.go @@ -1,12 +1,12 @@ package net import ( - "context" + "context" - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" - "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" - "google.golang.org/grpc" - "google.golang.org/grpc/health/grpc_health_v1" + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" + "google.golang.org/grpc" + "google.golang.org/grpc/health/grpc_health_v1" ) // SupernodeClient defines the interface for communicating with supernodes @@ -16,7 +16,7 @@ type SupernodeClient interface { // HealthCheck performs a health check on the supernode HealthCheck(ctx context.Context) (*grpc_health_v1.HealthCheckResponse, error) - GetSupernodeStatus(ctx context.Context) (*pb.StatusResponse, error) + GetSupernodeStatus(ctx context.Context) (*pb.StatusResponse, error) // Download downloads the cascade action file Download(ctx context.Context, in *supernodeservice.CascadeSupernodeDownloadRequest, opts ...grpc.CallOption) (*supernodeservice.CascadeSupernodeDownloadResponse, error) diff --git a/sdk/net/impl.go b/sdk/net/impl.go index cd6bf10f..77ac7de9 100644 --- a/sdk/net/impl.go +++ b/sdk/net/impl.go @@ -1,21 +1,21 @@ package net import ( - "context" - "fmt" - - "github.com/LumeraProtocol/lumera/x/lumeraid/securekeyx" - ltc "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials" - "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials/alts/conn" - "github.com/LumeraProtocol/supernode/v2/pkg/net/grpc/client" - "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" - "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" - "github.com/LumeraProtocol/supernode/v2/sdk/log" - - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" - "github.com/cosmos/cosmos-sdk/crypto/keyring" - "google.golang.org/grpc" - "google.golang.org/grpc/health/grpc_health_v1" + "context" + "fmt" + + "github.com/LumeraProtocol/lumera/x/lumeraid/securekeyx" + ltc "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials" + "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials/alts/conn" + "github.com/LumeraProtocol/supernode/v2/pkg/net/grpc/client" + "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" + "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" + "github.com/LumeraProtocol/supernode/v2/sdk/log" + + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "google.golang.org/grpc" + "google.golang.org/grpc/health/grpc_health_v1" ) // supernodeClient implements the SupernodeClient interface @@ -130,13 +130,13 @@ func (c *supernodeClient) HealthCheck(ctx context.Context) (*grpc_health_v1.Heal } func (c *supernodeClient) GetSupernodeStatus(ctx context.Context) (*pb.StatusResponse, error) { - resp, err := c.cascadeClient.GetSupernodeStatus(ctx) - if err != nil { - return nil, fmt.Errorf("failed to get supernode status: %w", err) - } + resp, err := c.cascadeClient.GetSupernodeStatus(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get supernode status: %w", err) + } - c.logger.Debug(ctx, "Supernode status retrieved successfully") - return resp, nil + c.logger.Debug(ctx, "Supernode status retrieved successfully") + return resp, nil } // Download downloads the cascade action file diff --git a/sdk/task/timeouts.go b/sdk/task/timeouts.go index f6e1e7e6..4498fdaf 100644 --- a/sdk/task/timeouts.go +++ b/sdk/task/timeouts.go @@ -5,4 +5,3 @@ import "time" // connectionTimeout bounds supernode health/connection probing. // Keep this short to preserve snappy discovery without impacting long uploads. const connectionTimeout = 10 * time.Second - diff --git a/sn-manager/internal/config/config.go b/sn-manager/internal/config/config.go index 87568580..050d36b4 100644 --- a/sn-manager/internal/config/config.go +++ b/sn-manager/internal/config/config.go @@ -12,10 +12,19 @@ import ( const ( // ManagerHomeDir is the constant home directory for sn-manager ManagerHomeDir = ".sn-manager" - // GitHubRepo is the constant GitHub repository for supernode - GitHubRepo = "LumeraProtocol/supernode" + // defaultGitHubRepo is the default GitHub repository for supernode + defaultGitHubRepo = "LumeraProtocol/supernode" ) +// GitHubRepo is the GitHub repository for supernode and can be overridden via +// the SNM_GITHUB_REPO environment variable. +var GitHubRepo = func() string { + if v := os.Getenv("SNM_GITHUB_REPO"); v != "" { + return v + } + return defaultGitHubRepo +}() + // Config represents the sn-manager configuration type Config struct { Updates UpdateConfig `yaml:"updates"` diff --git a/supernode/cmd/config_update.go b/supernode/cmd/config_update.go index 91807962..3b3ff255 100644 --- a/supernode/cmd/config_update.go +++ b/supernode/cmd/config_update.go @@ -8,8 +8,8 @@ import ( "github.com/AlecAivazis/survey/v2" "github.com/LumeraProtocol/supernode/v2/pkg/keyring" "github.com/LumeraProtocol/supernode/v2/supernode/config" - "github.com/spf13/cobra" cKeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/spf13/cobra" ) // configUpdateCmd represents the config update command @@ -51,7 +51,7 @@ func promptParameterSelection() (string, error) { Message: "Select parameter to update:", Options: []string{ "Supernode IP Address", - "Supernode Port", + "Supernode Port", "Lumera GRPC Address", "Chain ID", "Key Name", @@ -197,7 +197,7 @@ func updateKeyringBackend() error { // Show warning fmt.Println("⚠️ WARNING: Changing keyring backend will switch to a different keyring.") fmt.Println("You will need to select a key from the new keyring or recover one.") - + var proceed bool confirmPrompt := &survey.Confirm{ Message: "Do you want to continue?", @@ -225,14 +225,14 @@ func updateKeyringBackend() error { // Update keyring backend in config appConfig.KeyringConfig.Backend = backend - + // Save config with new keyring backend if err := saveConfig(); err != nil { return err } fmt.Printf("Updated keyring backend to: %s\n", backend) - + // Reload config to get the new keyring settings cfgFile := filepath.Join(baseDir, DefaultConfigFile) reloadedConfig, err := config.LoadConfig(cfgFile, baseDir) @@ -269,7 +269,7 @@ func selectKeyFromNewKeyring(kr cKeyring.Keyring, keyInfos []*cKeyring.Record) e func selectKeyFromKeyring(kr cKeyring.Keyring, keyInfos []*cKeyring.Record) error { // Build options list with display format options := []string{} - + // Add existing keys for _, info := range keyInfos { addr, err := info.GetAddress() @@ -278,7 +278,7 @@ func selectKeyFromKeyring(kr cKeyring.Keyring, keyInfos []*cKeyring.Record) erro } options = append(options, fmt.Sprintf("%s (%s)", info.Name, addr.String())) } - + // Always add option to recover new key options = append(options, "Add new key (recover from mnemonic)") @@ -316,4 +316,4 @@ func saveConfig() error { func init() { configCmd.AddCommand(configUpdateCmd) -} \ No newline at end of file +} diff --git a/supernode/cmd/init.go b/supernode/cmd/init.go index 6412d848..c4048eb0 100644 --- a/supernode/cmd/init.go +++ b/supernode/cmd/init.go @@ -618,7 +618,6 @@ func promptNetworkConfig(passedAddrs string, passedPort int, passedGRPC, passedC return "", 0, "", "", fmt.Errorf("invalid supernode port: %s", portStr) } - // Lumera GRPC address (full address with port) lumeraPrompt := &survey.Input{ Message: "Enter Lumera GRPC address:", diff --git a/supernode/node/action/server/cascade/cascade_action_server_test.go b/supernode/node/action/server/cascade/cascade_action_server_test.go index ff2738b3..c71c0173 100644 --- a/supernode/node/action/server/cascade/cascade_action_server_test.go +++ b/supernode/node/action/server/cascade/cascade_action_server_test.go @@ -9,8 +9,8 @@ import ( "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade" cascademocks "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/mocks" - "github.com/stretchr/testify/assert" "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" ) func TestRegister_Success(t *testing.T) { diff --git a/supernode/node/action/server/cascade/helper.go b/supernode/node/action/server/cascade/helper.go index ec005707..386e9ce6 100644 --- a/supernode/node/action/server/cascade/helper.go +++ b/supernode/node/action/server/cascade/helper.go @@ -10,15 +10,15 @@ import ( ) func initializeHasherAndTempFile() (*blake3.Hasher, *os.File, string, error) { - hasher := blake3.New(32, nil) + hasher := blake3.New(32, nil) - // Create a unique temp file to avoid collisions across concurrent calls - tempFile, err := os.CreateTemp("", "cascade-upload-*") - if err != nil { - return nil, nil, "", fmt.Errorf("could not create temp file: %w", err) - } + // Create a unique temp file to avoid collisions across concurrent calls + tempFile, err := os.CreateTemp("", "cascade-upload-*") + if err != nil { + return nil, nil, "", fmt.Errorf("could not create temp file: %w", err) + } - return hasher, tempFile, tempFile.Name(), nil + return hasher, tempFile, tempFile.Name(), nil } func replaceTempDirWithTaskDir(taskID, tempFilePath string, tempFile *os.File) (targetPath string, err error) { diff --git a/supernode/node/supernode/gateway/swagger.go b/supernode/node/supernode/gateway/swagger.go index d86d0ad9..ee815e52 100644 --- a/supernode/node/supernode/gateway/swagger.go +++ b/supernode/node/supernode/gateway/swagger.go @@ -58,12 +58,12 @@ func (s *Server) serveSwaggerJSON(w http.ResponseWriter, r *http.Request) { // serveSwaggerUI serves the Swagger UI interface func (s *Server) serveSwaggerUI(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/html") - + tmpl, err := template.New("swagger").Parse(swaggerUIHTML) if err != nil { http.Error(w, "Failed to load Swagger UI", http.StatusInternalServerError) return } - + tmpl.Execute(w, nil) -} \ No newline at end of file +} diff --git a/supernode/node/supernode/server/status_server.go b/supernode/node/supernode/server/status_server.go index 8b061a3b..9f94fac5 100644 --- a/supernode/node/supernode/server/status_server.go +++ b/supernode/node/supernode/server/status_server.go @@ -174,7 +174,7 @@ func (s *SupernodeServer) GetStatus(ctx context.Context, req *pb.StatusRequest) pbpm.Disk.UsedMb = pm.Disk.UsedMB pbpm.Disk.FreeMb = pm.Disk.FreeMB - // Detailed recent per-request lists removed from API + // Detailed recent per-request lists removed from API response.P2PMetrics = pbpm } diff --git a/supernode/node/supernode/server/status_server_test.go b/supernode/node/supernode/server/status_server_test.go index 7b2808d7..251cfd8d 100644 --- a/supernode/node/supernode/server/status_server_test.go +++ b/supernode/node/supernode/server/status_server_test.go @@ -32,10 +32,10 @@ func TestSupernodeServer_GetStatus(t *testing.T) { assert.NotNil(t, resp.Resources.Memory) assert.NotNil(t, resp.RunningTasks) assert.NotNil(t, resp.RegisteredServices) - + // Check version field assert.NotEmpty(t, resp.Version) - + // Check uptime field assert.True(t, resp.UptimeSeconds >= 0) @@ -48,7 +48,7 @@ func TestSupernodeServer_GetStatus(t *testing.T) { assert.True(t, resp.Resources.Memory.TotalGb > 0) assert.True(t, resp.Resources.Memory.UsagePercent >= 0) assert.True(t, resp.Resources.Memory.UsagePercent <= 100) - + // Check hardware summary if resp.Resources.Cpu.Cores > 0 && resp.Resources.Memory.TotalGb > 0 { assert.NotEmpty(t, resp.Resources.HardwareSummary) @@ -61,7 +61,7 @@ func TestSupernodeServer_GetStatus(t *testing.T) { // Should have no services initially assert.Empty(t, resp.RunningTasks) assert.Empty(t, resp.RegisteredServices) - + // Check new fields have default values assert.NotNil(t, resp.Network) assert.Equal(t, int32(0), resp.Network.PeersCount) diff --git a/supernode/services/cascade/adaptors/p2p.go b/supernode/services/cascade/adaptors/p2p.go index d1fd6ab9..f0c47ee5 100644 --- a/supernode/services/cascade/adaptors/p2p.go +++ b/supernode/services/cascade/adaptors/p2p.go @@ -38,8 +38,8 @@ type P2PService interface { // p2pImpl is the default implementation of the P2PService interface. type p2pImpl struct { - p2p p2p.Client - rqStore rqstore.Store + p2p p2p.Client + rqStore rqstore.Store } // NewP2PService returns a concrete implementation of P2PService. diff --git a/supernode/services/cascade/config.go b/supernode/services/cascade/config.go index df6abd1f..7a0f1ef2 100644 --- a/supernode/services/cascade/config.go +++ b/supernode/services/cascade/config.go @@ -6,8 +6,8 @@ import ( // Config contains settings for the cascade service type Config struct { - common.Config `mapstructure:",squash" json:"-"` + common.Config `mapstructure:",squash" json:"-"` - RaptorQServiceAddress string `mapstructure:"-" json:"-"` - RqFilesDir string `mapstructure:"rq_files_dir" json:"rq_files_dir,omitempty"` + RaptorQServiceAddress string `mapstructure:"-" json:"-"` + RqFilesDir string `mapstructure:"rq_files_dir" json:"rq_files_dir,omitempty"` } diff --git a/supernode/services/cascade/download.go b/supernode/services/cascade/download.go index 0c5c9ed7..3cce953c 100644 --- a/supernode/services/cascade/download.go +++ b/supernode/services/cascade/download.go @@ -1,12 +1,12 @@ package cascade import ( - "context" - "encoding/json" - "fmt" - "os" - "sort" - "time" + "context" + "encoding/json" + "fmt" + "os" + "sort" + "time" actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" @@ -82,7 +82,7 @@ func (task *CascadeRegistrationTask) Download( } logtrace.Info(ctx, "download: action state ok", fields) -metadata, err := cascadekit.UnmarshalCascadeMetadata(actionDetails.GetAction().Metadata) + metadata, err := cascadekit.UnmarshalCascadeMetadata(actionDetails.GetAction().Metadata) if err != nil { fields[logtrace.FieldError] = err.Error() return task.wrapErr(ctx, "error decoding cascade metadata", err, fields) @@ -150,8 +150,8 @@ func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, acti } logtrace.Debug(ctx, "Retrieve index file completed", logtrace.Fields{"index_id": indexID, "bytes": len(indexFile), "ms": time.Since(iStart).Milliseconds()}) - // Parse index file to get layout IDs - indexData, err := cascadekit.ParseCompressedIndexFile(indexFile) + // Parse index file to get layout IDs + indexData, err := cascadekit.ParseCompressedIndexFile(indexFile) if err != nil { logtrace.Warn(ctx, "failed to parse index file", logtrace.Fields{"index_id": indexID, logtrace.FieldError: err.Error()}) continue @@ -283,15 +283,15 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( return "", "", errors.New("file hash is nil") } - err = cascadekit.VerifyB64DataHash(fileHash, dataHash) - if err != nil { - logtrace.Error(ctx, "failed to verify hash", fields) - fields[logtrace.FieldError] = err.Error() - return "", decodeInfo.DecodeTmpDir, err - } - // Preserve original debug log for successful hash match - logtrace.Debug(ctx, "request data-hash has been matched with the action data-hash", fields) - // Log the state of the temporary decode directory + err = cascadekit.VerifyB64DataHash(fileHash, dataHash) + if err != nil { + logtrace.Error(ctx, "failed to verify hash", fields) + fields[logtrace.FieldError] = err.Error() + return "", decodeInfo.DecodeTmpDir, err + } + // Preserve original debug log for successful hash match + logtrace.Debug(ctx, "request data-hash has been matched with the action data-hash", fields) + // Log the state of the temporary decode directory if decodeInfo.DecodeTmpDir != "" { if set, derr := utils.ReadDirFilenames(decodeInfo.DecodeTmpDir); derr == nil { if left := len(set); left > 0 { diff --git a/supernode/services/cascade/events.go b/supernode/services/cascade/events.go index 0b25d3b8..f1314a1a 100644 --- a/supernode/services/cascade/events.go +++ b/supernode/services/cascade/events.go @@ -3,23 +3,23 @@ package cascade type SupernodeEventType int const ( - SupernodeEventTypeUNKNOWN SupernodeEventType = 0 - SupernodeEventTypeActionRetrieved SupernodeEventType = 1 - SupernodeEventTypeActionFeeVerified SupernodeEventType = 2 - SupernodeEventTypeTopSupernodeCheckPassed SupernodeEventType = 3 - SupernodeEventTypeMetadataDecoded SupernodeEventType = 4 - SupernodeEventTypeDataHashVerified SupernodeEventType = 5 - SupernodeEventTypeInputEncoded SupernodeEventType = 6 - SupernodeEventTypeSignatureVerified SupernodeEventType = 7 - SupernodeEventTypeRQIDsGenerated SupernodeEventType = 8 - SupernodeEventTypeRqIDsVerified SupernodeEventType = 9 - SupernodeEventTypeFinalizeSimulated SupernodeEventType = 10 - SupernodeEventTypeArtefactsStored SupernodeEventType = 11 - SupernodeEventTypeActionFinalized SupernodeEventType = 12 - SupernodeEventTypeArtefactsDownloaded SupernodeEventType = 13 - SupernodeEventTypeFinalizeSimulationFailed SupernodeEventType = 14 - // Download phase markers - SupernodeEventTypeNetworkRetrieveStarted SupernodeEventType = 15 - SupernodeEventTypeDecodeCompleted SupernodeEventType = 16 - SupernodeEventTypeServeReady SupernodeEventType = 17 + SupernodeEventTypeUNKNOWN SupernodeEventType = 0 + SupernodeEventTypeActionRetrieved SupernodeEventType = 1 + SupernodeEventTypeActionFeeVerified SupernodeEventType = 2 + SupernodeEventTypeTopSupernodeCheckPassed SupernodeEventType = 3 + SupernodeEventTypeMetadataDecoded SupernodeEventType = 4 + SupernodeEventTypeDataHashVerified SupernodeEventType = 5 + SupernodeEventTypeInputEncoded SupernodeEventType = 6 + SupernodeEventTypeSignatureVerified SupernodeEventType = 7 + SupernodeEventTypeRQIDsGenerated SupernodeEventType = 8 + SupernodeEventTypeRqIDsVerified SupernodeEventType = 9 + SupernodeEventTypeFinalizeSimulated SupernodeEventType = 10 + SupernodeEventTypeArtefactsStored SupernodeEventType = 11 + SupernodeEventTypeActionFinalized SupernodeEventType = 12 + SupernodeEventTypeArtefactsDownloaded SupernodeEventType = 13 + SupernodeEventTypeFinalizeSimulationFailed SupernodeEventType = 14 + // Download phase markers + SupernodeEventTypeNetworkRetrieveStarted SupernodeEventType = 15 + SupernodeEventTypeDecodeCompleted SupernodeEventType = 16 + SupernodeEventTypeServeReady SupernodeEventType = 17 ) diff --git a/supernode/services/cascade/helper.go b/supernode/services/cascade/helper.go index 5a36b644..4084243a 100644 --- a/supernode/services/cascade/helper.go +++ b/supernode/services/cascade/helper.go @@ -1,25 +1,25 @@ package cascade import ( - "context" - "encoding/base64" - "fmt" - "strconv" - - "cosmossdk.io/math" - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - - sdk "github.com/cosmos/cosmos-sdk/types" - json "github.com/json-iterator/go" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" + "context" + "encoding/base64" + "fmt" + "strconv" + + "cosmossdk.io/math" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" + + sdk "github.com/cosmos/cosmos-sdk/types" + json "github.com/json-iterator/go" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) // layout stats helpers removed to keep download metrics minimal. @@ -74,11 +74,11 @@ func (task *CascadeRegistrationTask) encodeInput(ctx context.Context, actionID s } func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context.Context, encoded string, creator string, - encodedMeta codec.Layout, f logtrace.Fields) (codec.Layout, string, error) { + encodedMeta codec.Layout, f logtrace.Fields) (codec.Layout, string, error) { - // Extract index file and creator signature from encoded data - // The signatures field contains: Base64(index_file).creators_signature - indexFileB64, creatorSig, err := cascadekit.ExtractIndexAndCreatorSig(encoded) + // Extract index file and creator signature from encoded data + // The signatures field contains: Base64(index_file).creators_signature + indexFileB64, creatorSig, err := cascadekit.ExtractIndexAndCreatorSig(encoded) if err != nil { return codec.Layout{}, "", task.wrapErr(ctx, "failed to extract index file and creator signature", err, f) } @@ -94,8 +94,8 @@ func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context. } logtrace.Debug(ctx, "creator signature successfully verified", f) - // Decode index file to get the layout signature - indexFile, err := cascadekit.DecodeIndexB64(indexFileB64) + // Decode index file to get the layout signature + indexFile, err := cascadekit.DecodeIndexB64(indexFileB64) if err != nil { return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode index file", err, f) } @@ -116,36 +116,36 @@ func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context. } logtrace.Debug(ctx, "layout signature successfully verified", f) - return encodedMeta, indexFile.LayoutSignature, nil + return encodedMeta, indexFile.LayoutSignature, nil } func (task *CascadeRegistrationTask) generateRQIDFiles(ctx context.Context, meta actiontypes.CascadeMetadata, - sig, creator string, encodedMeta codec.Layout, f logtrace.Fields) (cascadekit.GenRQIdentifiersFilesResponse, error) { - // The signatures field contains: Base64(index_file).creators_signature - // This full format will be used for ID generation to match chain expectations - - // Generate layout files (redundant metadata files) - layoutRes, err := cascadekit.GenerateLayoutFiles(ctx, encodedMeta, sig, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) - if err != nil { - return cascadekit.GenRQIdentifiersFilesResponse{}, - task.wrapErr(ctx, "failed to generate layout files", err, f) - } - - // Generate index files using full signatures format for ID generation (matches chain expectation) - indexIDs, indexFiles, err := cascadekit.GenerateIndexFiles(ctx, meta.Signatures, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) - if err != nil { - return cascadekit.GenRQIdentifiersFilesResponse{}, - task.wrapErr(ctx, "failed to generate index files", err, f) - } - - // Store layout files and index files separately in P2P - allFiles := append(layoutRes.RedundantMetadataFiles, indexFiles...) - - // Return index IDs (sent to chain) and all files (stored in P2P) - return cascadekit.GenRQIdentifiersFilesResponse{ - RQIDs: indexIDs, - RedundantMetadataFiles: allFiles, - }, nil + sig, creator string, encodedMeta codec.Layout, f logtrace.Fields) (cascadekit.GenRQIdentifiersFilesResponse, error) { + // The signatures field contains: Base64(index_file).creators_signature + // This full format will be used for ID generation to match chain expectations + + // Generate layout files (redundant metadata files) + layoutRes, err := cascadekit.GenerateLayoutFiles(ctx, encodedMeta, sig, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) + if err != nil { + return cascadekit.GenRQIdentifiersFilesResponse{}, + task.wrapErr(ctx, "failed to generate layout files", err, f) + } + + // Generate index files using full signatures format for ID generation (matches chain expectation) + indexIDs, indexFiles, err := cascadekit.GenerateIndexFiles(ctx, meta.Signatures, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) + if err != nil { + return cascadekit.GenRQIdentifiersFilesResponse{}, + task.wrapErr(ctx, "failed to generate index files", err, f) + } + + // Store layout files and index files separately in P2P + allFiles := append(layoutRes.RedundantMetadataFiles, indexFiles...) + + // Return index IDs (sent to chain) and all files (stored in P2P) + return cascadekit.GenRQIdentifiersFilesResponse{ + RQIDs: indexIDs, + RedundantMetadataFiles: allFiles, + }, nil } // storeArtefacts persists cascade artefacts (ID files + RaptorQ symbols) via the @@ -256,7 +256,8 @@ func (task *CascadeRegistrationTask) verifyActionFee(ctx context.Context, action // -// VerifyDownloadSignature verifies the download signature for actionID.creatorAddress +// VerifyDownloadSignature verifies a download signature where the signed payload +// is actionID (creator address not included in the payload) func (task *CascadeRegistrationTask) VerifyDownloadSignature(ctx context.Context, actionID, signature string) error { fields := logtrace.Fields{ logtrace.FieldActionID: actionID, @@ -272,9 +273,9 @@ func (task *CascadeRegistrationTask) VerifyDownloadSignature(ctx context.Context creatorAddress := actionDetails.GetAction().Creator fields["creator_address"] = creatorAddress - // Create the expected signature data: actionID (creator address not included in payload) - signatureData := fmt.Sprintf("%s", actionID) - fields["signature_data"] = signatureData + // Create the expected signature data: actionID (creator address not included in payload) + signatureData := fmt.Sprintf("%s", actionID) + fields["signature_data"] = signatureData // Decode the base64 signature signatureBytes, err := base64.StdEncoding.DecodeString(signature) diff --git a/supernode/services/cascade/service.go b/supernode/services/cascade/service.go index f88c284b..a1d9898b 100644 --- a/supernode/services/cascade/service.go +++ b/supernode/services/cascade/service.go @@ -56,11 +56,11 @@ func (service *CascadeService) GetRunningTasks() []string { // NewCascadeService returns a new CascadeService instance func NewCascadeService(config *Config, lumera lumera.Client, p2pClient p2p.Client, codec codec.Codec, rqstore rqstore.Store) *CascadeService { - return &CascadeService{ - config: config, - SuperNodeService: base.NewSuperNodeService(p2pClient), - LumeraClient: adaptors.NewLumeraClient(lumera), - P2P: adaptors.NewP2PService(p2pClient, rqstore), - RQ: adaptors.NewCodecService(codec), - } + return &CascadeService{ + config: config, + SuperNodeService: base.NewSuperNodeService(p2pClient), + LumeraClient: adaptors.NewLumeraClient(lumera), + P2P: adaptors.NewP2PService(p2pClient, rqstore), + RQ: adaptors.NewCodecService(codec), + } } diff --git a/supernode/services/cascade/status_test.go b/supernode/services/cascade/status_test.go index d85f9f8f..0c1b04fd 100644 --- a/supernode/services/cascade/status_test.go +++ b/supernode/services/cascade/status_test.go @@ -66,7 +66,7 @@ func TestGetStatus(t *testing.T) { // Version check assert.NotEmpty(t, resp.Version) - + // Uptime check assert.True(t, resp.UptimeSeconds >= 0) @@ -79,7 +79,7 @@ func TestGetStatus(t *testing.T) { assert.True(t, resp.Resources.Memory.TotalGB > 0) assert.True(t, resp.Resources.Memory.UsedGB <= resp.Resources.Memory.TotalGB) assert.True(t, resp.Resources.Memory.UsagePercent >= 0 && resp.Resources.Memory.UsagePercent <= 100) - + // Hardware summary check if resp.Resources.CPU.Cores > 0 && resp.Resources.Memory.TotalGB > 0 { assert.NotEmpty(t, resp.Resources.HardwareSummary) @@ -91,7 +91,7 @@ func TestGetStatus(t *testing.T) { // Registered services check assert.Contains(t, resp.RegisteredServices, "cascade") - + // Check new fields have default values (since service doesn't have access to P2P/lumera/config) assert.Equal(t, int32(0), resp.Network.PeersCount) assert.Empty(t, resp.Network.PeerAddresses) diff --git a/supernode/services/common/supernode/metrics.go b/supernode/services/common/supernode/metrics.go index 6c36ab35..718c2a8f 100644 --- a/supernode/services/common/supernode/metrics.go +++ b/supernode/services/common/supernode/metrics.go @@ -37,7 +37,7 @@ func (m *MetricsCollector) GetCPUCores(ctx context.Context) (int32, error) { logtrace.Error(ctx, "failed to get cpu core count", logtrace.Fields{logtrace.FieldError: err.Error()}) return 0, err } - + return int32(cores), nil } @@ -67,7 +67,7 @@ func (m *MetricsCollector) CollectStorageMetrics(ctx context.Context, paths []st if err != nil { logtrace.Error(ctx, "failed to get storage info", logtrace.Fields{ logtrace.FieldError: err.Error(), - "path": path, + "path": path, }) continue // Skip this path but continue with others } diff --git a/supernode/services/common/supernode/service.go b/supernode/services/common/supernode/service.go index 1d0b9dd0..8569470b 100644 --- a/supernode/services/common/supernode/service.go +++ b/supernode/services/common/supernode/service.go @@ -218,7 +218,7 @@ func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetric } } - // Detailed recent per-request lists removed from API mapping + // Detailed recent per-request lists removed from API mapping } // DHT rolling metrics snapshot is attached at top-level under dht_metrics diff --git a/supernode/services/common/supernode/service_test.go b/supernode/services/common/supernode/service_test.go index e2f82287..f7a9b4c4 100644 --- a/supernode/services/common/supernode/service_test.go +++ b/supernode/services/common/supernode/service_test.go @@ -14,12 +14,12 @@ func TestSupernodeStatusService(t *testing.T) { t.Run("empty service", func(t *testing.T) { statusService := NewSupernodeStatusService(nil, nil, nil) - resp, err := statusService.GetStatus(ctx, false) + resp, err := statusService.GetStatus(ctx, false) assert.NoError(t, err) - + // Should have version info assert.NotEmpty(t, resp.Version) - + // Should have uptime assert.True(t, resp.UptimeSeconds >= 0) @@ -30,7 +30,7 @@ func TestSupernodeStatusService(t *testing.T) { assert.True(t, resp.Resources.Memory.TotalGB > 0) assert.True(t, resp.Resources.Memory.UsagePercent >= 0) assert.True(t, resp.Resources.Memory.UsagePercent <= 100) - + // Should have hardware summary if cores and memory are available if resp.Resources.CPU.Cores > 0 && resp.Resources.Memory.TotalGB > 0 { assert.NotEmpty(t, resp.Resources.HardwareSummary) @@ -43,7 +43,7 @@ func TestSupernodeStatusService(t *testing.T) { // Should have empty services list assert.Empty(t, resp.RunningTasks) assert.Empty(t, resp.RegisteredServices) - + // Should have default values for new fields assert.Equal(t, int32(0), resp.Network.PeersCount) assert.Empty(t, resp.Network.PeerAddresses) @@ -61,7 +61,7 @@ func TestSupernodeStatusService(t *testing.T) { } statusService.RegisterTaskProvider(mockProvider) - resp, err := statusService.GetStatus(ctx, false) + resp, err := statusService.GetStatus(ctx, false) assert.NoError(t, err) // Should have one service @@ -91,7 +91,7 @@ func TestSupernodeStatusService(t *testing.T) { statusService.RegisterTaskProvider(cascadeProvider) statusService.RegisterTaskProvider(senseProvider) - resp, err := statusService.GetStatus(ctx, false) + resp, err := statusService.GetStatus(ctx, false) assert.NoError(t, err) // Should have two services @@ -127,7 +127,7 @@ func TestSupernodeStatusService(t *testing.T) { } statusService.RegisterTaskProvider(mockProvider) - resp, err := statusService.GetStatus(ctx, false) + resp, err := statusService.GetStatus(ctx, false) assert.NoError(t, err) // Should have one service diff --git a/supernode/services/common/supernode/types.go b/supernode/services/common/supernode/types.go index e84b954a..6224d36d 100644 --- a/supernode/services/common/supernode/types.go +++ b/supernode/services/common/supernode/types.go @@ -60,12 +60,12 @@ type NetworkInfo struct { // P2PMetrics mirrors the proto P2P metrics for status API type P2PMetrics struct { - DhtMetrics DhtMetrics - NetworkHandleMetrics map[string]HandleCounters - ConnPoolMetrics map[string]int64 - BanList []BanEntry - Database DatabaseStats - Disk DiskStatus + DhtMetrics DhtMetrics + NetworkHandleMetrics map[string]HandleCounters + ConnPoolMetrics map[string]int64 + BanList []BanEntry + Database DatabaseStats + Disk DiskStatus } type StoreSuccessPoint struct { diff --git a/tests/integration/p2p/p2p_integration_test.go b/tests/integration/p2p/p2p_integration_test.go index 478711d2..a856211b 100644 --- a/tests/integration/p2p/p2p_integration_test.go +++ b/tests/integration/p2p/p2p_integration_test.go @@ -203,8 +203,8 @@ func SetupTestP2PNodes(t *testing.T, ctx context.Context) ([]p2p.Client, []*rqst require.NoError(t, err, "failed to create rqstore for node %d: %v", i, err) rqStores = append(rqStores, rqStore) - // Disable metrics in integration tests by default - service, err := p2p.New(ctx, p2pConfig, mockClient, kr, rqStore, nil, nil) + // Disable metrics in integration tests by default + service, err := p2p.New(ctx, p2pConfig, mockClient, kr, rqStore, nil, nil) require.NoError(t, err, "failed to create p2p service for node %d: %v", i, err) // Start P2P service