From 1b23e280ed46d597cd42044acd5761824e68507c Mon Sep 17 00:00:00 2001 From: Matee ullah Malik <46045452+mateeullahmalik@users.noreply.github.com> Date: Mon, 15 Sep 2025 17:31:59 +0500 Subject: [PATCH 1/7] improve write deadline for p2p data upload (#173) --- p2p/kademlia/network.go | 54 ++++++++++++++++------ supernode/services/cascade/adaptors/p2p.go | 2 +- 2 files changed, 41 insertions(+), 15 deletions(-) diff --git a/p2p/kademlia/network.go b/p2p/kademlia/network.go index 366f0f4a..56e7ac55 100644 --- a/p2p/kademlia/network.go +++ b/p2p/kademlia/network.go @@ -627,20 +627,7 @@ func (s *Network) Call(ctx context.Context, request *Message, isLong bool) (*Mes // ---- retryable RPC helpers ------------------------------------------------- func (s *Network) rpcOnceWrapper(ctx context.Context, cw *connWrapper, remoteAddr string, data []byte, timeout time.Duration, msgType int) (*Message, error) { - - sizeMB := float64(len(data)) / (1024.0 * 1024.0) // data is your gob-encoded message - throughputFloor := 8.0 // MB/s (~64 Mbps) - est := time.Duration(sizeMB / throughputFloor * float64(time.Second)) - base := 1 * time.Second - cushion := 5 * time.Second - - writeDL := base + est + cushion - if writeDL < 5*time.Second { - writeDL = 5 * time.Second - } - if writeDL > timeout-1*time.Second { - writeDL = timeout - 1*time.Second - } + writeDL := calcWriteDeadline(timeout, len(data), 2.0) // target ~2 MB/s retried := false for { @@ -1430,3 +1417,42 @@ func readDeadlineFor(msgType int, overall time.Duration) time.Duration { return overall // Bulk responses keep full budget } } + +// calcWriteDeadline returns a conservative write deadline based on payload size. +// - targetMBps: assumed sustained throughput (lower = more lenient). +// - We reserve some headroom from overall timeout for server processing/response. +func calcWriteDeadline(timeout time.Duration, sizeBytes int, targetMBps float64) time.Duration { + if timeout <= 0 { + timeout = 30 * time.Second + } + // Leave headroom for server processing + response + const reserve = 8 * time.Second + maxBudget := timeout - reserve + if maxBudget < 5*time.Second { + maxBudget = timeout - 1*time.Second + if maxBudget < 3*time.Second { + maxBudget = 3 * time.Second + } + } + + sizeMB := float64(sizeBytes) / (1024.0 * 1024.0) + base := 2 * time.Second + cushion := 5 * time.Second + + // Softer floor: assume ~2 MB/s; increase if you like. + if targetMBps <= 0 { + targetMBps = 2.0 + } + est := time.Duration(sizeMB / targetMBps * float64(time.Second)) + + writeDL := base + est + cushion + + // Ensure a more generous minimum for big-ish payloads + if writeDL < 10*time.Second { + writeDL = 10 * time.Second + } + if writeDL > maxBudget { + writeDL = maxBudget + } + return writeDL +} diff --git a/supernode/services/cascade/adaptors/p2p.go b/supernode/services/cascade/adaptors/p2p.go index b3e6377c..be2eb74c 100644 --- a/supernode/services/cascade/adaptors/p2p.go +++ b/supernode/services/cascade/adaptors/p2p.go @@ -20,7 +20,7 @@ import ( ) const ( - loadSymbolsBatchSize = 5000 + loadSymbolsBatchSize = 3000 // Minimum first-pass coverage to store before returning from Register (percent) storeSymbolsPercent = 18 From 50e9edc78f3e4a9d8048e4e8674c775045708a48 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Sat, 13 Sep 2025 16:29:38 +0500 Subject: [PATCH 2/7] feat: Enhance call metrics and documentation --- docs/p2p-metrics-capture.md | 178 +++++++++++ p2p/client.go | 13 +- p2p/kademlia/dht.go | 143 ++++++--- p2p/kademlia/message.go | 4 + p2p/kademlia/rq_symbols.go | 4 +- p2p/mocks/Client.go | 30 +- p2p/p2p.go | 12 +- pkg/p2pmetrics/metrics.go | 281 ++++++++++++++++++ sdk/adapters/supernodeservice/adapter.go | 164 ++++++---- sdk/event/keys.go | 26 ++ sdk/net/factory.go | 11 +- .../server/cascade/cascade_action_server.go | 97 +++--- .../cascade/adaptors/mocks/p2p_mock.go | 7 +- supernode/services/cascade/adaptors/p2p.go | 124 +++----- supernode/services/cascade/download.go | 124 +++++--- supernode/services/cascade/helper.go | 44 ++- .../services/cascade/progressive_decode.go | 86 ------ supernode/services/cascade/register.go | 15 +- supernode/services/cascade/register_test.go | 17 +- supernode/services/common/storage/handler.go | 15 +- .../services/common/storage/handler_test.go | 3 +- tests/integration/p2p/p2p_integration_test.go | 2 +- 22 files changed, 909 insertions(+), 491 deletions(-) create mode 100644 docs/p2p-metrics-capture.md create mode 100644 pkg/p2pmetrics/metrics.go delete mode 100644 supernode/services/cascade/progressive_decode.go diff --git a/docs/p2p-metrics-capture.md b/docs/p2p-metrics-capture.md new file mode 100644 index 00000000..d272a7c7 --- /dev/null +++ b/docs/p2p-metrics-capture.md @@ -0,0 +1,178 @@ +# P2P Metrics Capture — What Each Field Means and Where It’s Collected + +This guide explains every field we emit in Cascade events, how it is measured, and exactly where it is captured in the code. + +The design is minimal by intent: +- Metrics are collected only for the first pass of Register (store) and for the active Download operation. +- P2P APIs return errors only; per‑RPC details are captured via a small metrics package (`pkg/p2pmetrics`). +- No aggregation; we only group raw RPC attempts by IP. + +--- + +## Store (Register) Event + +Event payload shape + +```json +{ + "store": { + "duration_ms": 9876, + "symbols_first_pass": 220, + "symbols_total": 1200, + "id_files_count": 14, + "calls_by_ip": { + "10.0.0.5": [ + {"ip": "10.0.0.5", "address": "A:4445", "keys": 100, "success": true, "duration_ms": 120}, + {"ip": "10.0.0.5", "address": "A:4445", "keys": 120, "success": false, "error": "timeout", "duration_ms": 300} + ] + } + } +} +``` + +### Fields + +- `store.duration_ms` + +- `store.symbols_first_pass` + - Meaning: Number of symbols sent during the Register first pass (across the combined first batch and any immediate first‑pass symbol batches). + - Where captured: `supernode/services/cascade/adaptors/p2p.go` via `p2pmetrics.SetStoreSummary(...)` using the value returned by `storeCascadeSymbolsAndData`. + +- `store.symbols_total` + - Meaning: Total symbols available in the symbol directory (before sampling). Used to contextualize the first‑pass coverage. + - Where captured: Computed in `storeCascadeSymbolsAndData` and included in `SetStoreSummary`. + +- `store.id_files_count` + - Meaning: Number of redundant metadata files (ID files) sent in the first combined batch. + - Where captured: `len(req.IDFiles)` in `StoreArtefacts`, passed to `SetStoreSummary`. + - Meaning: End‑to‑end elapsed time of the first‑pass store phase (Register’s storage section only). + - Where captured: `supernode/services/cascade/adaptors/p2p.go` + - A `time.Now()` timestamp is taken just before the first‑pass store function and measured on return. + +- `store.calls_by_ip` + - Meaning: All raw network store RPC attempts grouped by the node IP. + - Each array entry is a single RPC attempt with: + - `ip` — Node IP (fallback to `address` if missing). + - `address` — Node string `IP:port`. + - `keys` — Number of items in that RPC attempt (metadata + first symbols for the first combined batch, symbols for subsequent batches within the first pass). + - `success` — True if the node acknowledged the store successfully. + - `error` — Any error string captured; omitted when success. + - `duration_ms` — RPC duration in milliseconds. + - Where captured: + - Emission point (P2P): `p2p/kademlia/dht.go::IterateBatchStore(...)` + - After each node RPC returns, we call `p2pmetrics.RecordStore(taskID, Call{...})`. + - `taskID` is read from the context via `p2pmetrics.TaskIDFromContext(ctx)`. + - Grouping: `pkg/p2pmetrics/metrics.go` + - `StartStoreCapture(taskID)` enables capture; `StopStoreCapture(taskID)` disables it. + - Calls are grouped by `ip` (fallback to `address`) without further aggregation. + +### First‑Pass Success Threshold + +- Internal enforcement only: if DHT first‑pass success rate is below 75%, `IterateBatchStore` returns an error. +- No success rate is emitted in events; only error flow is affected. +- Code: `p2p/kademlia/dht.go::IterateBatchStore`. + +### Scope Limits + +- Background worker (which continues storing remaining symbols) is NOT captured — we don’t set a metrics task ID on those paths. + +--- + +## Download (Retrieve) Event + +Event payload shape + +```json +{ + "retrieve": { + "found_local": 42, + "retrieve_ms": 2000, + "decode_ms": 8000, + "calls_by_ip": { + "10.0.0.7": [ + {"ip": "10.0.0.7", "address": "B:4445", "keys": 13, "success": true, "duration_ms": 90} + ] + } + } +} +``` + +### Fields + +- `retrieve.found_local` + - Meaning: Number of items retrieved from local storage before any network calls. + - Where captured: `p2p/kademlia/dht.go::BatchRetrieve(...)` + - After `fetchAndAddLocalKeys`, we call `p2pmetrics.ReportFoundLocal(taskID, int(foundLocalCount))`. + - `taskID` is read from context with `p2pmetrics.TaskIDFromContext(ctx)`. + +- `retrieve.retrieve_ms` + - Meaning: Time spent in network batch‑retrieve. + - Where captured: `supernode/services/cascade/download.go` + - Timestamp before `BatchRetrieve`, measured after it returns. + +- `retrieve.decode_ms` + - Meaning: Time spent decoding symbols and reconstructing the file. + - Where captured: `supernode/services/cascade/download.go` + - Timestamp before decode, measured after it returns. + +- `retrieve.calls_by_ip` + - Meaning: All raw per‑RPC retrieve attempts grouped by node IP. + - Each array entry is a single RPC attempt with: + - `ip`, `address` — Identifiers as available. + - `keys` — Number of symbols returned by that node in that call. + - `success` — True if `keys > 0`. + - `error` — Error string when the RPC failed; omitted otherwise. + - `duration_ms` — RPC duration in milliseconds. + - Where captured: + - Emission point (P2P): `p2p/kademlia/dht.go::iterateBatchGetValues(...)` + - Each node RPC records a `p2pmetrics.RecordRetrieve(taskID, Call{...})`. + - `taskID` is extracted from context using `p2pmetrics.TaskIDFromContext(ctx)`. + - Grouping: `pkg/p2pmetrics/metrics.go` (same grouping/fallback as store). + +### Scope Limits + +- Metrics are captured only for the active Download call (context is tagged in `download.go`). + +--- + +## Context Tagging (Task ID) + +- We use an explicit, metrics‑only context key defined in `pkg/p2pmetrics` to tag P2P calls with a task ID. + - Setters: `p2pmetrics.WithTaskID(ctx, id)`. + - Getters: `p2pmetrics.TaskIDFromContext(ctx)`. +- Where it is set: + - Store (first pass): `supernode/services/cascade/adaptors/p2p.go` wraps `StoreBatch` calls. + - Download: `supernode/services/cascade/download.go` wraps `BatchRetrieve` call. + +--- + +## Building and Emitting Events + +- Store + - `supernode/services/cascade/helper.go::emitArtefactsStored(...)` + - Builds `store` payload via `p2pmetrics.BuildStoreEventPayloadFromCollector(taskID)`. + - Emits the event. + +- Download + - `supernode/services/cascade/download.go` + - Builds `retrieve` payload via `p2pmetrics.BuildDownloadEventPayloadFromCollector(actionID)`. + - Emits the event. + +--- + +## Quick File Map + +- Capture + grouping: `pkg/p2pmetrics/metrics.go` +- Store adaptor: `supernode/services/cascade/adaptors/p2p.go` +- Store event: `supernode/services/cascade/helper.go` +- Download flow: `supernode/services/cascade/download.go` +- DHT store calls: `p2p/kademlia/dht.go::IterateBatchStore` +- DHT retrieve calls: `p2p/kademlia/dht.go::BatchRetrieve` and `iterateBatchGetValues` + +--- + +## Notes + +- No P2P stats/snapshots are used to build events. +- No aggregation is performed; we only group raw RPC attempts by IP. +- First‑pass success rate is enforced internally (75% threshold) but not emitted as a metric. diff --git a/p2p/client.go b/p2p/client.go index 6eb169b2..5d4a44be 100644 --- a/p2p/client.go +++ b/p2p/client.go @@ -22,17 +22,8 @@ type Client interface { // - the base58 encoded identifier will be returned Store(ctx context.Context, data []byte, typ int) (string, error) - // StoreBatch will store a batch of values with their Blake3 hash as the key. - // - // Semantics: - // - Returns `successRatePct` as a percentage (0–100) computed as - // successful node RPCs divided by total node RPCs attempted during the - // network store phase for this batch. - // - Returns `requests` as the total number of node RPCs attempted for this - // batch (not the number of items in `values`). - // - On error, `successRatePct` and `requests` may reflect partial progress; - // prefer using them only when err == nil, or treat as best‑effort metrics. - StoreBatch(ctx context.Context, values [][]byte, typ int, taskID string) (float64, int, error) + // StoreBatch stores a batch of values; returns error only. + StoreBatch(ctx context.Context, values [][]byte, typ int, taskID string) error // Delete a key, value Delete(ctx context.Context, key string) error diff --git a/p2p/kademlia/dht.go b/p2p/kademlia/dht.go index 60150ceb..81fb8ecc 100644 --- a/p2p/kademlia/dht.go +++ b/p2p/kademlia/dht.go @@ -23,6 +23,7 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "github.com/LumeraProtocol/supernode/v2/pkg/lumera" ltc "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials" + "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics" "github.com/LumeraProtocol/supernode/v2/pkg/storage" "github.com/LumeraProtocol/supernode/v2/pkg/storage/memory" "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" @@ -351,32 +352,27 @@ func (s *DHT) Store(ctx context.Context, data []byte, typ int) (string, error) { return retKey, nil } -// StoreBatch will store a batch of values with their Blake3 hash as the key. -// -// Returns: -// - successRatePct: percentage (0–100) of successful node RPCs during the -// network store phase for this batch. -// - requestCount: total number of node RPCs attempted (batch store calls) for -// this batch; this is not the number of values stored. -// - error: wrapped error if local DB store failed, or if the network store did -// not reach the configured minimum success rate. -func (s *DHT) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID string) (float64, int, error) { +// StoreBatch stores a batch of values with their Blake3 hash as the key. +// It persists to the local store then performs the network store. If the +// measured success rate for node RPCs is below the configured minimum, an error +// is returned. Metrics are not returned through the API. +func (s *DHT) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID string) error { logtrace.Info(ctx, "Store DB batch begin", logtrace.Fields{ logtrace.FieldModule: "dht", logtrace.FieldTaskID: taskID, "records": len(values), }) if err := s.store.StoreBatch(ctx, values, typ, true); err != nil { - return 0, 0, fmt.Errorf("store batch: %v", err) + return fmt.Errorf("store batch: %v", err) } logtrace.Info(ctx, "Store DB batch done, store network batch begin", logtrace.Fields{ logtrace.FieldModule: "dht", logtrace.FieldTaskID: taskID, }) - rate, requests, err := s.IterateBatchStore(ctx, values, typ, taskID) + err := s.IterateBatchStore(ctx, values, typ, taskID) if err != nil { - return rate, requests, fmt.Errorf("iterate batch store: %v", err) + return fmt.Errorf("iterate batch store: %v", err) } logtrace.Info(ctx, "Store network batch workers done", logtrace.Fields{ @@ -384,7 +380,7 @@ func (s *DHT) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID s logtrace.FieldTaskID: taskID, }) - return rate, requests, nil + return nil } // Retrieve data from the networking using key. Key is the base58 encoded @@ -730,6 +726,8 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, if err != nil { return nil, fmt.Errorf("fetch and add local keys: %v", err) } + // Report how many were found locally, for event metrics + p2pmetrics.ReportFoundLocal(p2pmetrics.TaskIDFromContext(ctx), int(foundLocalCount)) if foundLocalCount >= required { return result, nil } @@ -907,6 +905,7 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, defer func() { <-semaphore }() } + callStart := time.Now() indices := fetchMap[nodeID] requestKeys := make(map[string]KeyValWithClosest) for _, idx := range indices { @@ -919,6 +918,14 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, } if len(requestKeys) == 0 { + p2pmetrics.RecordRetrieve(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ + IP: node.IP, + Address: node.String(), + Keys: 0, + Success: false, + Error: "", + DurationMS: time.Since(callStart).Milliseconds(), + }) return } @@ -929,23 +936,45 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, firstErr = err } mu.Unlock() + // record failed RPC per-node + p2pmetrics.RecordRetrieve(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ + IP: node.IP, + Address: node.String(), + Keys: 0, + Success: false, + Error: err.Error(), + DurationMS: time.Since(callStart).Milliseconds(), + }) return } + returned := 0 for k, v := range decompressedData { if len(v.Value) > 0 { _, loaded := resMap.LoadOrStore(k, v.Value) if !loaded { atomic.AddInt32(&foundCount, 1) + returned++ if atomic.LoadInt32(&foundCount) >= int32(req-alreadyFound) { cancel() // Cancel context to stop other goroutines - return + // don't early return; record metric and exit goroutine + break } } } else { contactsMap[nodeID][k] = v.Closest } } + + // record successful RPC per-node (returned may be 0) + p2pmetrics.RecordRetrieve(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ + IP: node.IP, + Address: node.String(), + Keys: returned, + Success: returned > 0, + Error: "", + DurationMS: time.Since(callStart).Milliseconds(), + }) }(node, nodeID) } @@ -990,7 +1019,6 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, nodes.TopN(Alpha) closestContacts[key] = nodes } - return int(foundCount), closestContacts, firstErr } @@ -1185,12 +1213,13 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat func (s *DHT) handleResponses(ctx context.Context, responses <-chan *Message, nl *NodeList) (*NodeList, []byte) { for response := range responses { s.addNode(ctx, response.Sender) - if response.MessageType == FindNode || response.MessageType == StoreData { + switch response.MessageType { + case FindNode, StoreData: v, ok := response.Data.(*FindNodeResponse) if ok && v.Status.Result == ResultOk && len(v.Closest) > 0 { nl.AddNodes(v.Closest) } - } else if response.MessageType == FindValue { + case FindValue: v, ok := response.Data.(*FindValueResponse) if ok { if v.Status.Result == ResultOk && len(v.Value) > 0 { @@ -1601,7 +1630,7 @@ func (s *DHT) addKnownNodes(ctx context.Context, nodes []*Node, knownNodes map[s // during this run; success rate is successful responses divided by this count. // If the success rate is below `minimumDataStoreSuccessRate`, an error is // returned alongside the measured rate and request count. -func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, id string) (float64, int, error) { +func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, id string) error { globalClosestContacts := make(map[string]*NodeList) knownNodes := make(map[string]*Node) hashes := make([][]byte, len(values)) @@ -1636,38 +1665,54 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i storeResponses := s.batchStoreNetwork(ctx, values, knownNodes, storageMap, typ) for response := range storeResponses { requests++ + var nodeAddr string + var nodeIP string + if response.Receiver != nil { + nodeAddr = response.Receiver.String() + nodeIP = response.Receiver.IP + } else if response.Message != nil && response.Message.Sender != nil { + nodeAddr = response.Message.Sender.String() + nodeIP = response.Message.Sender.IP + } + + errMsg := "" if response.Error != nil { - sender := "" - if response.Message != nil && response.Message.Sender != nil { - sender = response.Message.Sender.String() - } + errMsg = response.Error.Error() logtrace.Error(ctx, "Batch store failed on a node", logtrace.Fields{ logtrace.FieldModule: "dht", - "node": sender, - logtrace.FieldError: response.Error.Error(), + "node": nodeAddr, + logtrace.FieldError: errMsg, }) } - if response.Message == nil { - continue + if response.Message != nil { + if v, ok := response.Message.Data.(*StoreDataResponse); ok { + if v.Status.Result == ResultOk { + successful++ + } else { + if v.Status.ErrMsg != "" { + errMsg = v.Status.ErrMsg + } + logtrace.Error(ctx, "Batch store to node failed", logtrace.Fields{ + logtrace.FieldModule: "dht", + "err": errMsg, + "task_id": id, + "node": nodeAddr, + }) + } + } } - v, ok := response.Message.Data.(*StoreDataResponse) - if ok && v.Status.Result == ResultOk { - successful++ - } else { - errMsg := "unknown error" - if v != nil { - errMsg = v.Status.ErrMsg - } + // Emit per-node store RPC call via metrics bridge (no P2P API coupling) + p2pmetrics.RecordStore(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ + IP: nodeIP, + Address: nodeAddr, + Keys: response.KeysCount, + Success: errMsg == "" && response.Error == nil, + Error: errMsg, + DurationMS: response.DurationMS, + }) - logtrace.Error(ctx, "Batch store to node failed", logtrace.Fields{ - logtrace.FieldModule: "dht", - "err": errMsg, - "task_id": id, - "node": response.Message.Sender.String(), - }) - } } if requests > 0 { @@ -1680,19 +1725,19 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i "task_id": id, "success_rate": fmt.Sprintf("%.2f%%", successRate), }) - return successRate, requests, nil + return nil } else { logtrace.Info(ctx, "Failed to achieve desired success rate", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": id, "success_rate": fmt.Sprintf("%.2f%%", successRate), }) - return successRate, requests, fmt.Errorf("failed to achieve desired success rate, only: %.2f%% successful", successRate) + return fmt.Errorf("failed to achieve desired success rate, only: %.2f%% successful", successRate) } } - return 0, 0, fmt.Errorf("no store operations were performed") + return fmt.Errorf("no store operations were performed") } func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[string]*Node, storageMap map[string][]int, typ int) chan *MessageWithError { @@ -1730,7 +1775,7 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ select { case <-ctx.Done(): - responses <- &MessageWithError{Error: ctx.Err()} + responses <- &MessageWithError{Error: ctx.Err(), Receiver: receiver} return default: keysToStore := storageMap[key] @@ -1749,7 +1794,9 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ data := &BatchStoreDataRequest{Data: toStore, Type: typ} request := s.newMessage(BatchStoreData, receiver, data) + start := time.Now() response, err := s.network.Call(ctx, request, false) + dur := time.Since(start).Milliseconds() if err != nil { if !isLocalCancel(err) { s.ignorelist.IncrementCount(receiver) @@ -1761,11 +1808,11 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ logtrace.FieldError: err.Error(), "request": request.String(), }) - responses <- &MessageWithError{Error: err, Message: response} + responses <- &MessageWithError{Error: err, Message: response, KeysCount: len(toStore), Receiver: receiver, DurationMS: dur} return } - responses <- &MessageWithError{Message: response} + responses <- &MessageWithError{Message: response, KeysCount: len(toStore), Receiver: receiver, DurationMS: dur} } }(node, key) } diff --git a/p2p/kademlia/message.go b/p2p/kademlia/message.go index 7ef3f206..0baef37c 100644 --- a/p2p/kademlia/message.go +++ b/p2p/kademlia/message.go @@ -54,6 +54,10 @@ func init() { type MessageWithError struct { Message *Message Error error + // Extended context for store RPCs + KeysCount int // number of items attempted in this RPC + Receiver *Node // receiver node info (target) + DurationMS int64 // duration of the RPC in milliseconds } // Message structure for kademlia network diff --git a/p2p/kademlia/rq_symbols.go b/p2p/kademlia/rq_symbols.go index 0b530f98..fbf6563d 100644 --- a/p2p/kademlia/rq_symbols.go +++ b/p2p/kademlia/rq_symbols.go @@ -96,9 +96,7 @@ func (s *DHT) storeSymbolsInP2P(ctx context.Context, dir string, keys []string) return fmt.Errorf("load symbols: %w", err) } - // Intentionally ignore (ratePct, requests) here; a non-nil error will already - // reflect whether the network store met the configured success threshold. - if _, _, err := s.StoreBatch(ctx, loaded, 1, dir); err != nil { + if err := s.StoreBatch(ctx, loaded, 1, dir); err != nil { return fmt.Errorf("p2p store batch: %w", err) } diff --git a/p2p/mocks/Client.go b/p2p/mocks/Client.go index 6d092c92..67991025 100644 --- a/p2p/mocks/Client.go +++ b/p2p/mocks/Client.go @@ -245,35 +245,15 @@ func (_m *Client) Store(ctx context.Context, data []byte, typ int) (string, erro } // StoreBatch provides a mock function with given fields: ctx, values, typ, taskID -func (_m *Client) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID string) (float64, int, error) { +func (_m *Client) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID string) error { ret := _m.Called(ctx, values, typ, taskID) - - var r0 float64 - if rf, ok := ret.Get(0).(func(context.Context, [][]byte, int, string) float64); ok { + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, [][]byte, int, string) error); ok { r0 = rf(ctx, values, typ, taskID) } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(float64) - } - } - - var r1 int - if rf, ok := ret.Get(1).(func(context.Context, [][]byte, int, string) int); ok { - r1 = rf(ctx, values, typ, taskID) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(int) - } - } - - var r2 error - if rf, ok := ret.Get(2).(func(context.Context, [][]byte, int, string) error); ok { - r2 = rf(ctx, values, typ, taskID) - } else { - r2 = ret.Error(2) + r0 = ret.Error(0) } - - return r0, r1, r2 + return r0 } type mockConstructorTestingTNewClient interface { diff --git a/p2p/p2p.go b/p2p/p2p.go index a9ca294c..5cb9c505 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -117,18 +117,12 @@ func (s *p2p) Store(ctx context.Context, data []byte, typ int) (string, error) { return s.dht.Store(ctx, data, typ) } -// StoreBatch will store a batch of values with their Blake3 hash as the key. -// -// It proxies to DHT.StoreBatch and returns: -// - successRatePct: percentage of successful node RPCs during the network store -// - requests: total node RPCs attempted for the batch -// - error: error if persistence or network store did not meet minimum success criteria -func (s *p2p) StoreBatch(ctx context.Context, data [][]byte, typ int, taskID string) (float64, int, error) { +// StoreBatch stores a batch of values by their Blake3 hash as the key. +func (s *p2p) StoreBatch(ctx context.Context, data [][]byte, typ int, taskID string) error { if !s.running { - return 0, 0, errors.New("p2p service is not running") + return errors.New("p2p service is not running") } - return s.dht.StoreBatch(ctx, data, typ, taskID) } diff --git a/pkg/p2pmetrics/metrics.go b/pkg/p2pmetrics/metrics.go new file mode 100644 index 00000000..1c1b0088 --- /dev/null +++ b/pkg/p2pmetrics/metrics.go @@ -0,0 +1,281 @@ +package p2pmetrics + +import ( + "context" + "sync" +) + +// Call represents a single per-node RPC outcome (store or retrieve). +type Call struct { + IP string `json:"ip"` + Address string `json:"address"` + Keys int `json:"keys"` + Success bool `json:"success"` + Error string `json:"error,omitempty"` + DurationMS int64 `json:"duration_ms"` +} + +// -------- Lightweight hooks ------------------------- + +var ( + storeMu sync.RWMutex + storeHook = make(map[string]func(Call)) + + retrieveMu sync.RWMutex + retrieveHook = make(map[string]func(Call)) + + foundLocalMu sync.RWMutex + foundLocalCb = make(map[string]func(int)) +) + +// RegisterStoreHook registers a callback to receive store RPC calls for a task. +func RegisterStoreHook(taskID string, fn func(Call)) { + storeMu.Lock() + defer storeMu.Unlock() + if fn == nil { + delete(storeHook, taskID) + return + } + storeHook[taskID] = fn +} + +// UnregisterStoreHook removes the registered store callback for a task. +func UnregisterStoreHook(taskID string) { RegisterStoreHook(taskID, nil) } + +// RecordStore invokes the registered store callback for the given task, if any. +func RecordStore(taskID string, c Call) { + storeMu.RLock() + fn := storeHook[taskID] + storeMu.RUnlock() + if fn != nil { + fn(c) + } +} + +// RegisterRetrieveHook registers a callback to receive retrieve RPC calls. +func RegisterRetrieveHook(taskID string, fn func(Call)) { + retrieveMu.Lock() + defer retrieveMu.Unlock() + if fn == nil { + delete(retrieveHook, taskID) + return + } + retrieveHook[taskID] = fn +} + +// UnregisterRetrieveHook removes the registered retrieve callback for a task. +func UnregisterRetrieveHook(taskID string) { RegisterRetrieveHook(taskID, nil) } + +// RecordRetrieve invokes the registered retrieve callback for the given task. +func RecordRetrieve(taskID string, c Call) { + retrieveMu.RLock() + fn := retrieveHook[taskID] + retrieveMu.RUnlock() + if fn != nil { + fn(c) + } +} + +// RegisterFoundLocalHook registers a callback to receive found-local counts. +func RegisterFoundLocalHook(taskID string, fn func(int)) { + foundLocalMu.Lock() + defer foundLocalMu.Unlock() + if fn == nil { + delete(foundLocalCb, taskID) + return + } + foundLocalCb[taskID] = fn +} + +// UnregisterFoundLocalHook removes the registered found-local callback. +func UnregisterFoundLocalHook(taskID string) { RegisterFoundLocalHook(taskID, nil) } + +// ReportFoundLocal invokes the registered found-local callback for the task. +func ReportFoundLocal(taskID string, count int) { + foundLocalMu.RLock() + fn := foundLocalCb[taskID] + foundLocalMu.RUnlock() + if fn != nil { + fn(count) + } +} + +// -------- Minimal in-process collectors for events -------------------------- + +// Store session +type storeSession struct { + CallsByIP map[string][]Call + SymbolsFirstPass int + SymbolsTotal int + IDFilesCount int + DurationMS int64 +} + +var storeSessions = struct{ m map[string]*storeSession }{m: map[string]*storeSession{}} + +// RegisterStoreBridge hooks store callbacks into the store session collector. +func StartStoreCapture(taskID string) { + RegisterStoreHook(taskID, func(c Call) { + s := storeSessions.m[taskID] + if s == nil { + s = &storeSession{CallsByIP: map[string][]Call{}} + storeSessions.m[taskID] = s + } + key := c.IP + if key == "" { + key = c.Address + } + s.CallsByIP[key] = append(s.CallsByIP[key], c) + }) +} + +func StopStoreCapture(taskID string) { UnregisterStoreHook(taskID) } + +// SetStoreSummary sets store summary fields for the first pass and totals. +// +// - symbolsFirstPass: number of symbols sent during the first pass +// - symbolsTotal: total symbols available in the directory +// - idFilesCount: number of ID/metadata files included in the first combined batch +// - durationMS: elapsed time of the first-pass store phase +func SetStoreSummary(taskID string, symbolsFirstPass, symbolsTotal, idFilesCount int, durationMS int64) { + if taskID == "" { + return + } + s := storeSessions.m[taskID] + if s == nil { + s = &storeSession{CallsByIP: map[string][]Call{}} + storeSessions.m[taskID] = s + } + s.SymbolsFirstPass = symbolsFirstPass + s.SymbolsTotal = symbolsTotal + s.IDFilesCount = idFilesCount + s.DurationMS = durationMS +} + +// BuildStoreEventPayloadFromCollector builds the store event payload (minimal). +func BuildStoreEventPayloadFromCollector(taskID string) map[string]any { + s := storeSessions.m[taskID] + if s == nil { + return map[string]any{ + "store": map[string]any{ + "duration_ms": int64(0), + "symbols_first_pass": 0, + "symbols_total": 0, + "id_files_count": 0, + "calls_by_ip": map[string][]Call{}, + }, + } + } + return map[string]any{ + "store": map[string]any{ + "duration_ms": s.DurationMS, + "symbols_first_pass": s.SymbolsFirstPass, + "symbols_total": s.SymbolsTotal, + "id_files_count": s.IDFilesCount, + "calls_by_ip": s.CallsByIP, + }, + } +} + +// Retrieve session +type retrieveSession struct { + CallsByIP map[string][]Call + FoundLocal int + RetrieveMS int64 + DecodeMS int64 +} + +var retrieveSessions = struct{ m map[string]*retrieveSession }{m: map[string]*retrieveSession{}} + +// RegisterRetrieveBridge hooks retrieve callbacks into the retrieve collector. +func StartRetrieveCapture(taskID string) { + RegisterRetrieveHook(taskID, func(c Call) { + s := retrieveSessions.m[taskID] + if s == nil { + s = &retrieveSession{CallsByIP: map[string][]Call{}} + retrieveSessions.m[taskID] = s + } + key := c.IP + if key == "" { + key = c.Address + } + s.CallsByIP[key] = append(s.CallsByIP[key], c) + }) + RegisterFoundLocalHook(taskID, func(n int) { + s := retrieveSessions.m[taskID] + if s == nil { + s = &retrieveSession{CallsByIP: map[string][]Call{}} + retrieveSessions.m[taskID] = s + } + s.FoundLocal = n + }) +} + +func StopRetrieveCapture(taskID string) { + UnregisterRetrieveHook(taskID) + UnregisterFoundLocalHook(taskID) +} + +// SetRetrieveSummary sets timing info for retrieve/decode phases. +func SetRetrieveSummary(taskID string, retrieveMS, decodeMS int64) { + if taskID == "" { + return + } + s := retrieveSessions.m[taskID] + if s == nil { + s = &retrieveSession{CallsByIP: map[string][]Call{}} + retrieveSessions.m[taskID] = s + } + s.RetrieveMS = retrieveMS + s.DecodeMS = decodeMS +} + +// BuildDownloadEventPayloadFromCollector builds the download section payload. +func BuildDownloadEventPayloadFromCollector(taskID string) map[string]any { + s := retrieveSessions.m[taskID] + if s == nil { + return map[string]any{ + "retrieve": map[string]any{ + "found_local": 0, + "retrieve_ms": int64(0), + "decode_ms": int64(0), + "calls_by_ip": map[string][]Call{}, + }, + } + } + return map[string]any{ + "retrieve": map[string]any{ + "found_local": s.FoundLocal, + "retrieve_ms": s.RetrieveMS, + "decode_ms": s.DecodeMS, + "calls_by_ip": s.CallsByIP, + }, + } +} + +// -------- Context helpers (dedicated to metrics tagging) -------------------- + +type ctxKey string + +var taskIDKey ctxKey = "p2pmetrics-task-id" + +// WithTaskID returns a child context with the metrics task ID set. +func WithTaskID(ctx context.Context, taskID string) context.Context { + if ctx == nil { + return context.Background() + } + return context.WithValue(ctx, taskIDKey, taskID) +} + +// TaskIDFromContext extracts the metrics task ID from context (or ""). +func TaskIDFromContext(ctx context.Context) string { + if ctx == nil { + return "" + } + if v := ctx.Value(taskIDKey); v != nil { + if s, ok := v.(string); ok { + return s + } + } + return "" +} diff --git a/sdk/adapters/supernodeservice/adapter.go b/sdk/adapters/supernodeservice/adapter.go index 3b6b61dc..6068147b 100644 --- a/sdk/adapters/supernodeservice/adapter.go +++ b/sdk/adapters/supernodeservice/adapter.go @@ -2,12 +2,11 @@ package supernodeservice import ( "context" + "encoding/json" "fmt" "io" "os" "path/filepath" - "regexp" - "strconv" "time" "github.com/LumeraProtocol/supernode/v2/gen/supernode" @@ -346,10 +345,44 @@ func (a *cascadeAdapter) CascadeSupernodeRegister(ctx context.Context, in *Casca event.KeyTaskID: in.TaskId, event.KeyActionID: in.ActionID, } - // Extract success rate if provided in message format: "... success_rate=NN.NN%" + // For artefacts stored, parse JSON payload with metrics (new minimal shape) if resp.EventType == cascade.SupernodeEventType_ARTEFACTS_STORED { - if rate, ok := parseSuccessRate(resp.Message); ok { - edata[event.KeySuccessRate] = rate + var payload map[string]any + if err := json.Unmarshal([]byte(resp.Message), &payload); err == nil { + if store, ok := payload["store"].(map[string]any); ok { + if v, ok := store["duration_ms"].(float64); ok { + edata[event.KeyStoreDurationMS] = int64(v) + } + if v, ok := store["symbols_first_pass"].(float64); ok { + edata[event.KeyStoreSymbolsFirstPass] = int64(v) + } + if v, ok := store["symbols_total"].(float64); ok { + edata[event.KeyStoreSymbolsTotal] = int64(v) + } + if v, ok := store["id_files_count"].(float64); ok { + edata[event.KeyStoreIDFilesCount] = int64(v) + } + if v, ok := store["calls_by_ip"]; ok { + edata[event.KeyStoreCallsByIP] = v + } + } else { + // Legacy fallbacks (previous SDK behavior) + if v, ok := payload["success_rate"].(float64); ok { + edata[event.KeySuccessRate] = v + } + if v, ok := payload["meta_duration_ms"].(float64); ok { + edata[event.KeyMetaDurationMS] = int64(v) + } + if v, ok := payload["sym_duration_ms"].(float64); ok { + edata[event.KeySymDurationMS] = int64(v) + } + if v, ok := payload["sym_nodes"]; ok { + edata[event.KeySymNodes] = v + } + if v, ok := payload["meta_nodes"]; ok { + edata[event.KeyMetaNodes] = v + } + } } } in.EventLogger(ctx, toSdkEventWithMessage(resp.EventType, resp.Message), resp.Message, edata) @@ -395,9 +428,9 @@ func (a *cascadeAdapter) GetSupernodeStatus(ctx context.Context) (SupernodeStatu // CascadeSupernodeDownload downloads a file from a supernode gRPC stream func (a *cascadeAdapter) CascadeSupernodeDownload( - ctx context.Context, - in *CascadeSupernodeDownloadRequest, - opts ...grpc.CallOption, + ctx context.Context, + in *CascadeSupernodeDownloadRequest, + opts ...grpc.CallOption, ) (*CascadeSupernodeDownloadResponse, error) { // Use provided context as-is (no correlation IDs) @@ -426,11 +459,11 @@ func (a *cascadeAdapter) CascadeSupernodeDownload( } defer outFile.Close() - var ( - bytesWritten int64 - chunkIndex int - startedEmitted bool - ) + var ( + bytesWritten int64 + chunkIndex int + startedEmitted bool + ) // 3. Receive streamed responses for { @@ -449,28 +482,51 @@ func (a *cascadeAdapter) CascadeSupernodeDownload( a.logger.Info(ctx, "supernode event", "event_type", x.Event.EventType, "message", x.Event.Message, "action_id", in.ActionID) if in.EventLogger != nil { - in.EventLogger(ctx, toSdkEvent(x.Event.EventType), x.Event.Message, event.EventData{ + edata := event.EventData{ event.KeyActionID: in.ActionID, event.KeyEventType: x.Event.EventType, event.KeyMessage: x.Event.Message, - }) + } + // Parse detailed metrics for downloaded event if JSON payload provided (new minimal shape) + if x.Event.EventType == cascade.SupernodeEventType_ARTEFACTS_DOWNLOADED { + var payload map[string]any + if err := json.Unmarshal([]byte(x.Event.Message), &payload); err == nil { + if retrieve, ok := payload["retrieve"].(map[string]any); ok { + if v, ok := retrieve["found_local"].(float64); ok { + edata[event.KeyRetrieveFoundLocal] = int64(v) + } + if v, ok := retrieve["retrieve_ms"].(float64); ok { + edata[event.KeyRetrieveMS] = int64(v) + // Maintain old key for rough duration if consumers expect it + edata[event.KeyDHTDurationMS] = int64(v) + } + if v, ok := retrieve["decode_ms"].(float64); ok { + edata[event.KeyDecodeMS] = int64(v) + } + if v, ok := retrieve["calls_by_ip"]; ok { + edata[event.KeyRetrieveCallsByIP] = v + } + } + } + } + in.EventLogger(ctx, toSdkEvent(x.Event.EventType), x.Event.Message, edata) } - // 3b. Actual data chunk - case *cascade.DownloadResponse_Chunk: - data := x.Chunk.Data - if len(data) == 0 { - continue - } - if !startedEmitted { - if in.EventLogger != nil { - in.EventLogger(ctx, event.SDKDownloadStarted, "Download started", event.EventData{event.KeyActionID: in.ActionID}) - } - startedEmitted = true - } - if _, err := outFile.Write(data); err != nil { - return nil, fmt.Errorf("write chunk: %w", err) - } + // 3b. Actual data chunk + case *cascade.DownloadResponse_Chunk: + data := x.Chunk.Data + if len(data) == 0 { + continue + } + if !startedEmitted { + if in.EventLogger != nil { + in.EventLogger(ctx, event.SDKDownloadStarted, "Download started", event.EventData{event.KeyActionID: in.ActionID}) + } + startedEmitted = true + } + if _, err := outFile.Write(data); err != nil { + return nil, fmt.Errorf("write chunk: %w", err) + } bytesWritten += int64(len(data)) chunkIndex++ @@ -481,19 +537,19 @@ func (a *cascadeAdapter) CascadeSupernodeDownload( a.logger.Info(ctx, "download complete", "bytes_written", bytesWritten, "path", in.OutputPath, "action_id", in.ActionID) - if in.EventLogger != nil { - in.EventLogger(ctx, event.SDKDownloadCompleted, "Download completed", event.EventData{event.KeyActionID: in.ActionID, event.KeyOutputPath: in.OutputPath}) - } - return &CascadeSupernodeDownloadResponse{ - Success: true, - Message: "artefact downloaded", - OutputPath: in.OutputPath, - }, nil + if in.EventLogger != nil { + in.EventLogger(ctx, event.SDKDownloadCompleted, "Download completed", event.EventData{event.KeyActionID: in.ActionID, event.KeyOutputPath: in.OutputPath}) + } + return &CascadeSupernodeDownloadResponse{ + Success: true, + Message: "artefact downloaded", + OutputPath: in.OutputPath, + }, nil } // toSdkEvent converts a supernode-side enum value into an internal SDK EventType. func toSdkEvent(e cascade.SupernodeEventType) event.EventType { - switch e { + switch e { case cascade.SupernodeEventType_ACTION_RETRIEVED: return event.SupernodeActionRetrieved case cascade.SupernodeEventType_ACTION_FEE_VERIFIED: @@ -516,14 +572,14 @@ func toSdkEvent(e cascade.SupernodeEventType) event.EventType { return event.SupernodeArtefactsStored case cascade.SupernodeEventType_ACTION_FINALIZED: return event.SupernodeActionFinalized - case cascade.SupernodeEventType_ARTEFACTS_DOWNLOADED: - return event.SupernodeArtefactsDownloaded - case cascade.SupernodeEventType_NETWORK_RETRIEVE_STARTED: - return event.SupernodeNetworkRetrieveStarted - case cascade.SupernodeEventType_DECODE_COMPLETED: - return event.SupernodeDecodeCompleted - case cascade.SupernodeEventType_SERVE_READY: - return event.SupernodeServeReady + case cascade.SupernodeEventType_ARTEFACTS_DOWNLOADED: + return event.SupernodeArtefactsDownloaded + case cascade.SupernodeEventType_NETWORK_RETRIEVE_STARTED: + return event.SupernodeNetworkRetrieveStarted + case cascade.SupernodeEventType_DECODE_COMPLETED: + return event.SupernodeDecodeCompleted + case cascade.SupernodeEventType_SERVE_READY: + return event.SupernodeServeReady case cascade.SupernodeEventType_FINALIZE_SIMULATED: return event.SupernodeFinalizeSimulated case cascade.SupernodeEventType_FINALIZE_SIMULATION_FAILED: @@ -542,20 +598,6 @@ func toSdkEventWithMessage(e cascade.SupernodeEventType, msg string) event.Event return toSdkEvent(e) } -var rateRe = regexp.MustCompile(`success_rate=([0-9]+(?:\.[0-9]+)?)%`) - -func parseSuccessRate(msg string) (float64, bool) { - m := rateRe.FindStringSubmatch(msg) - if len(m) != 2 { - return 0, false - } - f, err := strconv.ParseFloat(m[1], 64) - if err != nil { - return 0, false - } - return f, true -} - func toSdkSupernodeStatus(resp *supernode.StatusResponse) *SupernodeStatusresponse { result := &SupernodeStatusresponse{} result.Version = resp.Version diff --git a/sdk/event/keys.go b/sdk/event/keys.go index 4a6f8eaa..5089a30c 100644 --- a/sdk/event/keys.go +++ b/sdk/event/keys.go @@ -30,4 +30,30 @@ const ( // Task specific keys KeyTaskID EventDataKey = "task_id" KeyActionID EventDataKey = "action_id" + + // Cascade storage metrics keys + KeyMetaDurationMS EventDataKey = "meta_duration_ms" + KeySymDurationMS EventDataKey = "sym_duration_ms" + KeyMetaNodes EventDataKey = "meta_nodes" + KeySymNodes EventDataKey = "sym_nodes" + + // Combined store metrics (metadata + symbols) + KeyStoreDurationMS EventDataKey = "store_duration_ms" + KeyStoreRequests EventDataKey = "store_requests" + KeyStoreCalls EventDataKey = "store_calls" + // New minimal store metrics + KeyStoreSymbolsFirstPass EventDataKey = "store_symbols_first_pass" + KeyStoreSymbolsTotal EventDataKey = "store_symbols_total" + KeyStoreIDFilesCount EventDataKey = "store_id_files_count" + KeyStoreCallsByIP EventDataKey = "store_calls_by_ip" + + // Download (retrieve) detailed metrics + KeyDHTNodes EventDataKey = "dht_nodes" + KeyDHTCalls EventDataKey = "dht_calls" + KeyDHTDurationMS EventDataKey = "dht_duration_ms" + // New minimal download metrics + KeyRetrieveFoundLocal EventDataKey = "retrieve_found_local" + KeyRetrieveMS EventDataKey = "retrieve_ms" + KeyDecodeMS EventDataKey = "decode_ms" + KeyRetrieveCallsByIP EventDataKey = "retrieve_calls_by_ip" ) diff --git a/sdk/net/factory.go b/sdk/net/factory.go index 510d1b6b..b9fad9fd 100644 --- a/sdk/net/factory.go +++ b/sdk/net/factory.go @@ -36,12 +36,13 @@ func NewClientFactory(ctx context.Context, logger log.Logger, keyring keyring.Ke logger.Debug(ctx, "Creating supernode client factory", "localAddress", config.LocalCosmosAddress) - // Optimized for streaming 1GB files with 4MB chunks (10 concurrent streams) + // Tuned for 1GB max files with 4MB chunks + // Reduce in-flight memory by aligning windows and msg sizes to chunk size. opts := client.DefaultClientOptions() - opts.MaxRecvMsgSize = 16 * 1024 * 1024 // 16MB to match server - opts.MaxSendMsgSize = 16 * 1024 * 1024 // 16MB to match server - opts.InitialWindowSize = 16 * 1024 * 1024 // 16MB per stream (4x chunk size) - opts.InitialConnWindowSize = 160 * 1024 * 1024 // 160MB (16MB x 10 streams) + opts.MaxRecvMsgSize = 8 * 1024 * 1024 // 8MB: supports 4MB chunks + overhead + opts.MaxSendMsgSize = 8 * 1024 * 1024 // 8MB: supports 4MB chunks + overhead + opts.InitialWindowSize = 4 * 1024 * 1024 // 4MB per-stream window ≈ chunk size + opts.InitialConnWindowSize = 64 * 1024 * 1024 // 64MB per-connection window return &ClientFactory{ logger: logger, diff --git a/supernode/node/action/server/cascade/cascade_action_server.go b/supernode/node/action/server/cascade/cascade_action_server.go index b6b6112c..a99fbf0a 100644 --- a/supernode/node/action/server/cascade/cascade_action_server.go +++ b/supernode/node/action/server/cascade/cascade_action_server.go @@ -292,87 +292,64 @@ func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeS } logtrace.Info(ctx, "streaming artefact file in chunks", fields) - restoredFile, err := readFileContentsInChunks(restoredFilePath) + // Open the restored file and stream directly from disk to avoid buffering entire file in memory + f, err := os.Open(restoredFilePath) if err != nil { - logtrace.Error(ctx, "failed to read restored file", logtrace.Fields{ - logtrace.FieldError: err.Error(), - }) + logtrace.Error(ctx, "failed to open restored file", logtrace.Fields{logtrace.FieldError: err.Error()}) + return err + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + logtrace.Error(ctx, "failed to stat restored file", logtrace.Fields{logtrace.FieldError: err.Error()}) return err } - logtrace.Info(ctx, "file has been read in chunks", fields) // Calculate optimal chunk size based on file size - chunkSize := calculateOptimalChunkSize(int64(len(restoredFile))) + chunkSize := calculateOptimalChunkSize(fi.Size()) logtrace.Info(ctx, "calculated optimal chunk size for download", logtrace.Fields{ - "file_size": len(restoredFile), + "file_size": fi.Size(), "chunk_size": chunkSize, }) - // Announce: file is ready to be served to the client - if err := stream.Send(&pb.DownloadResponse{ - ResponseType: &pb.DownloadResponse_Event{ - Event: &pb.DownloadEvent{ - EventType: pb.SupernodeEventType_SERVE_READY, - Message: "File available for download", - }, - }, - }); err != nil { - logtrace.Error(ctx, "failed to send serve-ready event", logtrace.Fields{logtrace.FieldError: err.Error()}) - return err - } - - // Split and stream the file using adaptive chunk size - for i := 0; i < len(restoredFile); i += chunkSize { - end := i + chunkSize - if end > len(restoredFile) { - end = len(restoredFile) - } - - err := stream.Send(&pb.DownloadResponse{ - ResponseType: &pb.DownloadResponse_Chunk{ - Chunk: &pb.DataChunk{ - Data: restoredFile[i:end], - }, + // Announce: file is ready to be served to the client + if err := stream.Send(&pb.DownloadResponse{ + ResponseType: &pb.DownloadResponse_Event{ + Event: &pb.DownloadEvent{ + EventType: pb.SupernodeEventType_SERVE_READY, + Message: "File available for download", }, - }) - - if err != nil { - logtrace.Error(ctx, "failed to stream chunk", logtrace.Fields{ - logtrace.FieldError: err.Error(), - }) - return err - } - } - - // Cleanup is handled in deferred block above - - logtrace.Info(ctx, "completed streaming all chunks", fields) - return nil -} - -func readFileContentsInChunks(filePath string) ([]byte, error) { - f, err := os.Open(filePath) - if err != nil { - return nil, err + }, + }); err != nil { + logtrace.Error(ctx, "failed to send serve-ready event", logtrace.Fields{logtrace.FieldError: err.Error()}) + return err } - defer f.Close() - - buf := make([]byte, 1024*1024) - var fileBytes []byte + // Stream the file in fixed-size chunks + buf := make([]byte, chunkSize) for { n, readErr := f.Read(buf) if n > 0 { - // Process chunk - fileBytes = append(fileBytes, buf[:n]...) + if err := stream.Send(&pb.DownloadResponse{ + ResponseType: &pb.DownloadResponse_Chunk{ + Chunk: &pb.DataChunk{Data: buf[:n]}, + }, + }); err != nil { + logtrace.Error(ctx, "failed to stream chunk", logtrace.Fields{logtrace.FieldError: err.Error()}) + return err + } } if readErr == io.EOF { break } if readErr != nil { - return nil, fmt.Errorf("chunked read failed: %w", readErr) + return fmt.Errorf("chunked read failed: %w", readErr) } } - return fileBytes, nil + // Cleanup is handled in deferred block above + + logtrace.Info(ctx, "completed streaming all chunks", fields) + return nil } diff --git a/supernode/services/cascade/adaptors/mocks/p2p_mock.go b/supernode/services/cascade/adaptors/mocks/p2p_mock.go index 025109b2..4f62a440 100644 --- a/supernode/services/cascade/adaptors/mocks/p2p_mock.go +++ b/supernode/services/cascade/adaptors/mocks/p2p_mock.go @@ -43,12 +43,11 @@ func (m *MockP2PService) EXPECT() *MockP2PServiceMockRecorder { } // StoreArtefacts mocks base method. -func (m *MockP2PService) StoreArtefacts(ctx context.Context, req adaptors.StoreArtefactsRequest, f logtrace.Fields) (adaptors.StoreArtefactsMetrics, error) { +func (m *MockP2PService) StoreArtefacts(ctx context.Context, req adaptors.StoreArtefactsRequest, f logtrace.Fields) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StoreArtefacts", ctx, req, f) - ret0, _ := ret[0].(adaptors.StoreArtefactsMetrics) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret0, _ := ret[0].(error) + return ret0 } // StoreArtefacts indicates an expected call of StoreArtefacts. diff --git a/supernode/services/cascade/adaptors/p2p.go b/supernode/services/cascade/adaptors/p2p.go index be2eb74c..116d6810 100644 --- a/supernode/services/cascade/adaptors/p2p.go +++ b/supernode/services/cascade/adaptors/p2p.go @@ -13,6 +13,7 @@ import ( "github.com/LumeraProtocol/supernode/v2/p2p" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + cm "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics" "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" "github.com/LumeraProtocol/supernode/v2/pkg/utils" "github.com/LumeraProtocol/supernode/v2/supernode/services/common/storage" @@ -32,16 +33,8 @@ const ( //go:generate mockgen -destination=mocks/p2p_mock.go -package=cascadeadaptormocks -source=p2p.go type P2PService interface { // StoreArtefacts stores ID files and RaptorQ symbols. - // - // Aggregation model: - // - Each underlying StoreBatch returns (ratePct, requests) where requests is - // the number of node RPCs. The aggregated success rate can be computed as - // a weighted average by requests across metadata and symbol batches, - // yielding a global success view across all node calls attempted for this action. - // See implementation notes for item‑weighted aggregation currently in use. - // - // Returns detailed metrics for both categories along with an aggregated view. - StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) (StoreArtefactsMetrics, error) + // Metrics are recorded via internal metrics helpers; no metrics are returned. + StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) error } // p2pImpl is the default implementation of the P2PService interface. @@ -62,40 +55,23 @@ type StoreArtefactsRequest struct { SymbolsDir string } -// StoreArtefactsMetrics captures detailed outcomes of metadata and symbols storage. -type StoreArtefactsMetrics struct { - // Metadata (ID files) - MetaRate float64 // percentage (0–100) - MetaRequests int // number of node RPCs attempted for metadata - MetaCount int // number of metadata files attempted - - // Symbols - SymRate float64 // percentage (0–100) across all symbol batches (item-weighted) - SymRequests int // total node RPCs for symbol batches - SymCount int // total symbols processed - - // Aggregated view - AggregatedRate float64 // item-weighted across metadata and symbols - TotalRequests int // MetaRequests + SymRequests -} +func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) error { + logtrace.Info(ctx, "About to store artefacts (metadata + symbols)", logtrace.Fields{"taskID": req.TaskID, "id_files": len(req.IDFiles)}) + + // Enable per-node store RPC capture for this task + cm.StartStoreCapture(req.TaskID) + defer cm.StopStoreCapture(req.TaskID) -func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) (StoreArtefactsMetrics, error) { - logtrace.Info(ctx, "About to store ID files", logtrace.Fields{"taskID": req.TaskID, "fileCount": len(req.IDFiles)}) - // NOTE: For now we aggregate by item count (ID files + symbol count). - // TODO(move-to-request-weighted): Switch aggregation to request-weighted once - // external consumers and metrics expectations are updated. We already return - // totalRequests so the event/logs can include accurate request counts. - symRate, symCount, symReqs, err := p.storeCascadeSymbolsAndData(ctx, req.TaskID, req.ActionID, req.SymbolsDir, req.IDFiles) + start := time.Now() + firstPassSymbols, totalSymbols, err := p.storeCascadeSymbolsAndData(ctx, req.TaskID, req.ActionID, req.SymbolsDir, req.IDFiles) if err != nil { - return StoreArtefactsMetrics{}, errors.Wrap(err, "error storing raptor-q symbols") + return errors.Wrap(err, "error storing artefacts") } - logtrace.Info(ctx, "raptor-q symbols have been stored", f) - - return StoreArtefactsMetrics{ - SymRate: symRate, - SymRequests: symReqs, - SymCount: symCount, - }, nil + dur := time.Since(start).Milliseconds() + logtrace.Info(ctx, "artefacts have been stored", logtrace.Fields{"taskID": req.TaskID, "symbols_first_pass": firstPassSymbols, "symbols_total": totalSymbols, "id_files_count": len(req.IDFiles)}) + // Record store summary for later event emission + cm.SetStoreSummary(req.TaskID, firstPassSymbols, totalSymbols, len(req.IDFiles), dur) + return nil } // storeCascadeSymbols loads symbols from `symbolsDir`, optionally downsamples, @@ -105,16 +81,16 @@ func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, // - the total number of node requests attempted across batches // // Returns (aggRate, totalSymbols, totalRequests, err). -func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, actionID string, symbolsDir string, metadataFiles [][]byte) (float64, int, int, error) { +func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, actionID string, symbolsDir string, metadataFiles [][]byte) (int, int, error) { /* record directory in DB */ if err := p.rqStore.StoreSymbolDirectory(taskID, symbolsDir); err != nil { - return 0, 0, 0, fmt.Errorf("store symbol dir: %w", err) + return 0, 0, fmt.Errorf("store symbol dir: %w", err) } /* gather every symbol path under symbolsDir ------------------------- */ keys, err := walkSymbolTree(symbolsDir) if err != nil { - return 0, 0, 0, err + return 0, 0, err } totalAvailable := len(keys) @@ -142,10 +118,7 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action /* stream in fixed-size batches -------------------------------------- */ - sumWeightedRates := 0.0 - totalSymbols := 0 // symbols only - totalItems := 0 // symbols + metadata (for rate weighting) - totalRequests := 0 + totalSymbols := 0 // symbols stored firstBatchProcessed := false for start := 0; start < len(keys); { @@ -168,7 +141,7 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action // Load just this symbol chunk symBytes, err := utils.LoadSymbols(symbolsDir, batch) if err != nil { - return 0, totalSymbols, totalRequests, fmt.Errorf("load symbols: %w", err) + return 0, 0, fmt.Errorf("load symbols: %w", err) } // Build combined payload: metadata first, then symbols @@ -178,44 +151,30 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action // Send as the same data type you use for symbols bctx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout) - rate, reqs, err := p.p2p.StoreBatch(bctx, payload, storage.P2PDataRaptorQSymbol, taskID) + bctx = cm.WithTaskID(bctx, taskID) + err = p.p2p.StoreBatch(bctx, payload, storage.P2PDataRaptorQSymbol, taskID) cancel() if err != nil { - agg := 0.0 - if totalItems > 0 { - agg = sumWeightedRates / float64(totalItems) - } - return agg, totalSymbols, totalRequests + reqs, fmt.Errorf("p2p store batch (first): %w", err) + return totalSymbols, totalAvailable, fmt.Errorf("p2p store batch (first): %w", err) } - // Metrics - items := len(payload) // meta + symbols - sumWeightedRates += rate * float64(items) - totalItems += items totalSymbols += len(symBytes) - totalRequests += reqs + // No per-RPC metrics propagated from p2p // Delete only the symbols we uploaded if len(batch) > 0 { if err := utils.DeleteSymbols(ctx, symbolsDir, batch); err != nil { - return rate, totalSymbols, totalRequests, fmt.Errorf("delete symbols: %w", err) + return totalSymbols, totalAvailable, fmt.Errorf("delete symbols: %w", err) } } firstBatchProcessed = true } else { - rate, requests, count, err := p.storeSymbolsInP2P(ctx, taskID, symbolsDir, batch) + count, err := p.storeSymbolsInP2P(ctx, taskID, symbolsDir, batch) if err != nil { - agg := 0.0 - if totalItems > 0 { - agg = sumWeightedRates / float64(totalItems) - } - return agg, totalSymbols, totalRequests, err + return totalSymbols, totalAvailable, err } - sumWeightedRates += rate * float64(count) - totalItems += count totalSymbols += count - totalRequests += requests } start = end @@ -227,17 +186,13 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action achievedPct = (float64(totalSymbols) / float64(totalAvailable)) * 100.0 } logtrace.Info(ctx, "first-pass achieved coverage (symbols)", - logtrace.Fields{"achieved_symbols": totalSymbols, "achieved_percent": achievedPct, "total_requests": totalRequests}) + logtrace.Fields{"achieved_symbols": totalSymbols, "achieved_percent": achievedPct}) if err := p.rqStore.UpdateIsFirstBatchStored(actionID); err != nil { - return 0, totalSymbols, totalRequests, fmt.Errorf("update first-batch flag: %w", err) + return totalSymbols, totalAvailable, fmt.Errorf("update first-batch flag: %w", err) } - aggRate := 0.0 - if totalItems > 0 { - aggRate = sumWeightedRates / float64(totalItems) - } - return aggRate, totalSymbols, totalRequests, nil + return totalSymbols, totalAvailable, nil } @@ -269,27 +224,28 @@ func walkSymbolTree(root string) ([]string, error) { // storeSymbolsInP2P loads a batch of symbols and stores them via P2P. // Returns (ratePct, requests, count, error) where `count` is the number of symbols in this batch. -func (c *p2pImpl) storeSymbolsInP2P(ctx context.Context, taskID, root string, fileKeys []string) (float64, int, int, error) { +func (c *p2pImpl) storeSymbolsInP2P(ctx context.Context, taskID, root string, fileKeys []string) (int, error) { logtrace.Info(ctx, "loading batch symbols", logtrace.Fields{"count": len(fileKeys)}) symbols, err := utils.LoadSymbols(root, fileKeys) if err != nil { - return 0, 0, 0, fmt.Errorf("load symbols: %w", err) + return 0, fmt.Errorf("load symbols: %w", err) } symCtx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout) + symCtx = cm.WithTaskID(symCtx, taskID) defer cancel() - rate, requests, err := c.p2p.StoreBatch(symCtx, symbols, storage.P2PDataRaptorQSymbol, taskID) - if err != nil { - return rate, requests, len(symbols), fmt.Errorf("p2p store batch: %w", err) + if err := c.p2p.StoreBatch(symCtx, symbols, storage.P2PDataRaptorQSymbol, taskID); err != nil { + return len(symbols), fmt.Errorf("p2p store batch: %w", err) } logtrace.Info(ctx, "stored batch symbols", logtrace.Fields{"count": len(symbols)}) if err := utils.DeleteSymbols(ctx, root, fileKeys); err != nil { - return rate, requests, len(symbols), fmt.Errorf("delete symbols: %w", err) + return len(symbols), fmt.Errorf("delete symbols: %w", err) } logtrace.Info(ctx, "deleted batch symbols", logtrace.Fields{"count": len(symbols)}) - return rate, requests, len(symbols), nil + // No per-RPC metrics propagated from p2p + return len(symbols), nil } diff --git a/supernode/services/cascade/download.go b/supernode/services/cascade/download.go index 78db1758..f88d3941 100644 --- a/supernode/services/cascade/download.go +++ b/supernode/services/cascade/download.go @@ -3,23 +3,23 @@ package cascade import ( "bytes" "context" + "encoding/json" "fmt" "os" "sort" + "time" actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" "github.com/LumeraProtocol/supernode/v2/pkg/codec" "github.com/LumeraProtocol/supernode/v2/pkg/crypto" "github.com/LumeraProtocol/supernode/v2/pkg/errors" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + cm "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics" "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" "github.com/LumeraProtocol/supernode/v2/supernode/services/common" ) -const ( - requiredSymbolPercent = 9 -) - type DownloadRequest struct { ActionID string } @@ -36,8 +36,8 @@ func (task *CascadeRegistrationTask) Download( req *DownloadRequest, send func(resp *DownloadResponse) error, ) (err error) { - fields := logtrace.Fields{logtrace.FieldMethod: "Download", logtrace.FieldRequest: req} - logtrace.Info(ctx, "Cascade download request received", fields) + fields := logtrace.Fields{logtrace.FieldMethod: "Download", logtrace.FieldRequest: req} + logtrace.Info(ctx, "Cascade download request received", fields) // Ensure task status is finalized regardless of outcome defer func() { @@ -54,8 +54,8 @@ func (task *CascadeRegistrationTask) Download( fields[logtrace.FieldError] = err return task.wrapErr(ctx, "failed to get action", err, fields) } - logtrace.Info(ctx, "Action retrieved", fields) - task.streamDownloadEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", "", send) + logtrace.Info(ctx, "Action retrieved", fields) + task.streamDownloadEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", "", send) if actionDetails.GetAction().State != actiontypes.ActionStateDone { err = errors.New("action is not in a valid state") @@ -63,27 +63,27 @@ func (task *CascadeRegistrationTask) Download( fields[logtrace.FieldActionState] = actionDetails.GetAction().State return task.wrapErr(ctx, "action not found", err, fields) } - logtrace.Info(ctx, "Action state validated", fields) + logtrace.Info(ctx, "Action state validated", fields) metadata, err := task.decodeCascadeMetadata(ctx, actionDetails.GetAction().Metadata, fields) if err != nil { fields[logtrace.FieldError] = err.Error() return task.wrapErr(ctx, "error decoding cascade metadata", err, fields) } - logtrace.Info(ctx, "Cascade metadata decoded", fields) - task.streamDownloadEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", "", send) + logtrace.Info(ctx, "Cascade metadata decoded", fields) + task.streamDownloadEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", "", send) - // Notify: network retrieval phase begins - task.streamDownloadEvent(SupernodeEventTypeNetworkRetrieveStarted, "Network retrieval started", "", "", send) + // Notify: network retrieval phase begins + task.streamDownloadEvent(SupernodeEventTypeNetworkRetrieveStarted, "Network retrieval started", "", "", send) - filePath, tmpDir, err := task.downloadArtifacts(ctx, actionDetails.GetAction().ActionID, metadata, fields) - if err != nil { - fields[logtrace.FieldError] = err.Error() - return task.wrapErr(ctx, "failed to download artifacts", err, fields) - } - logtrace.Info(ctx, "File reconstructed and hash verified", fields) - // Notify: decode completed, file ready on disk - task.streamDownloadEvent(SupernodeEventTypeDecodeCompleted, "Decode completed", filePath, tmpDir, send) + filePath, tmpDir, err := task.downloadArtifacts(ctx, actionDetails.GetAction().ActionID, metadata, fields) + if err != nil { + fields[logtrace.FieldError] = err.Error() + return task.wrapErr(ctx, "failed to download artifacts", err, fields) + } + logtrace.Info(ctx, "File reconstructed and hash verified", fields) + // Notify: decode completed, file ready on disk + task.streamDownloadEvent(SupernodeEventTypeDecodeCompleted, "Decode completed", filePath, tmpDir, send) return nil } @@ -91,7 +91,12 @@ func (task *CascadeRegistrationTask) Download( func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, actionID string, metadata actiontypes.CascadeMetadata, fields logtrace.Fields) (string, string, error) { logtrace.Info(ctx, "started downloading the artifacts", fields) - var layout codec.Layout + var ( + layout codec.Layout + layoutFetchMS int64 + layoutDecodeMS int64 + layoutAttempts int + ) for _, indexID := range metadata.RqIdsIds { indexFile, err := task.P2PClient.Retrieve(ctx, indexID) @@ -107,11 +112,14 @@ func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, acti } // Try to retrieve layout files using layout IDs from index file - layout, err = task.retrieveLayoutFromIndex(ctx, indexData, fields) + var netMS, decMS int64 + layout, netMS, decMS, layoutAttempts, err = task.retrieveLayoutFromIndex(ctx, indexData, fields) if err != nil { logtrace.Info(ctx, "failed to retrieve layout from index", fields) continue } + layoutFetchMS = netMS + layoutDecodeMS = decMS if len(layout.Blocks) > 0 { logtrace.Info(ctx, "layout file retrieved via index", fields) @@ -122,7 +130,10 @@ func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, acti if len(layout.Blocks) == 0 { return "", "", errors.New("no symbols found in RQ metadata") } - + // Persist layout timing in fields for downstream metrics + fields["layout_fetch_ms"] = layoutFetchMS + fields["layout_decode_ms"] = layoutDecodeMS + fields["layout_attempts"] = layoutAttempts return task.restoreFileFromLayout(ctx, layout, metadata.DataHash, actionID) } @@ -143,19 +154,44 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( sort.Strings(allSymbols) totalSymbols := len(allSymbols) - requiredSymbols := (totalSymbols*requiredSymbolPercent + 99) / 100 - fields["totalSymbols"] = totalSymbols - fields["requiredSymbols"] = requiredSymbols - logtrace.Info(ctx, "Symbols to be retrieved", fields) + logtrace.Info(ctx, "Retrieving all symbols for decode", fields) - // Progressive retrieval moved to helper for readability/testing - decodeInfo, err := task.retrieveAndDecodeProgressively(ctx, layout, actionID, fields) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to decode symbols progressively", fields) - return "", "", fmt.Errorf("decode symbols using RaptorQ: %w", err) - } + // Enable retrieve metrics capture for this action + cm.StartRetrieveCapture(actionID) + defer cm.StopRetrieveCapture(actionID) + + // Measure symbols batch retrieve duration + retrieveStart := time.Now() + // Tag context with metrics task ID (actionID) + ctxRetrieve := cm.WithTaskID(ctx, actionID) + symbols, err := task.P2PClient.BatchRetrieve(ctxRetrieve, allSymbols, totalSymbols, actionID) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "batch retrieve failed", fields) + return "", "", fmt.Errorf("batch retrieve symbols: %w", err) + } + retrieveMS := time.Since(retrieveStart).Milliseconds() + + // Measure decode duration + decodeStart := time.Now() + decodeInfo, err := task.RQ.Decode(ctx, adaptors.DecodeRequest{ + ActionID: actionID, + Symbols: symbols, + Layout: layout, + }) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "decode failed", fields) + return "", "", fmt.Errorf("decode symbols using RaptorQ: %w", err) + } + decodeMS := time.Since(decodeStart).Milliseconds() + + // Set minimal retrieve summary and emit event strictly from internal collector + cm.SetRetrieveSummary(actionID, retrieveMS, decodeMS) + if b, err := json.MarshalIndent(cm.BuildDownloadEventPayloadFromCollector(actionID), "", " "); err == nil { + task.streamDownloadEvent(SupernodeEventTypeArtefactsDownloaded, string(b), "", "", func(resp *DownloadResponse) error { return nil }) + } fileHash, err := crypto.HashFileIncrementally(decodeInfo.FilePath, 0) if err != nil { @@ -175,7 +211,7 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( fields[logtrace.FieldError] = err.Error() return "", decodeInfo.DecodeTmpDir, err } - logtrace.Info(ctx, "File successfully restored and hash verified", fields) + logtrace.Info(ctx, "File successfully restored and hash verified", fields) return decodeInfo.FilePath, decodeInfo.DecodeTmpDir, nil } @@ -207,25 +243,35 @@ func (task *CascadeRegistrationTask) parseIndexFile(data []byte) (IndexFile, err } // retrieveLayoutFromIndex retrieves layout file using layout IDs from index file -func (task *CascadeRegistrationTask) retrieveLayoutFromIndex(ctx context.Context, indexData IndexFile, fields logtrace.Fields) (codec.Layout, error) { +func (task *CascadeRegistrationTask) retrieveLayoutFromIndex(ctx context.Context, indexData IndexFile, fields logtrace.Fields) (codec.Layout, int64, int64, int, error) { // Try to retrieve layout files using layout IDs from index file + var ( + totalFetchMS int64 + totalDecodeMS int64 + attempts int + ) for _, layoutID := range indexData.LayoutIDs { + attempts++ + t0 := time.Now() layoutFile, err := task.P2PClient.Retrieve(ctx, layoutID) + totalFetchMS += time.Since(t0).Milliseconds() if err != nil || len(layoutFile) == 0 { continue } + t1 := time.Now() layout, _, _, err := parseRQMetadataFile(layoutFile) + totalDecodeMS += time.Since(t1).Milliseconds() if err != nil { continue } if len(layout.Blocks) > 0 { - return layout, nil + return layout, totalFetchMS, totalDecodeMS, attempts, nil } } - return codec.Layout{}, errors.New("no valid layout found in index") + return codec.Layout{}, totalFetchMS, totalDecodeMS, attempts, errors.New("no valid layout found in index") } func (task *CascadeRegistrationTask) CleanupDownload(ctx context.Context, actionID string) error { diff --git a/supernode/services/cascade/helper.go b/supernode/services/cascade/helper.go index eed20700..fb8c7ef5 100644 --- a/supernode/services/cascade/helper.go +++ b/supernode/services/cascade/helper.go @@ -5,7 +5,6 @@ import ( "context" "encoding/base64" "fmt" - stdmath "math" "strconv" "strings" @@ -17,6 +16,7 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" "github.com/LumeraProtocol/supernode/v2/pkg/utils" "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" + cm "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/golang/protobuf/proto" @@ -25,6 +25,8 @@ import ( "google.golang.org/grpc/status" ) +// layout stats helpers removed to keep download metrics minimal. + func (task *CascadeRegistrationTask) fetchAction(ctx context.Context, actionID string, f logtrace.Fields) (*actiontypes.Action, error) { res, err := task.LumeraClient.GetAction(ctx, actionID) if err != nil { @@ -171,14 +173,8 @@ func (task *CascadeRegistrationTask) generateRQIDFiles(ctx context.Context, meta } // storeArtefacts persists cascade artefacts (ID files + RaptorQ symbols) via the -// P2P adaptor and returns an aggregated network success rate percentage and total -// node requests used to compute it. -// -// Aggregation details: -// - Underlying batches return (ratePct, requests) where `requests` is the number -// of node RPCs attempted. The adaptor computes a weighted average by requests -// across all batches, reflecting the overall network success rate. -func (task *CascadeRegistrationTask) storeArtefacts(ctx context.Context, actionID string, idFiles [][]byte, symbolsDir string, f logtrace.Fields) (adaptors.StoreArtefactsMetrics, error) { +// P2P adaptor. P2P does not return metrics; cascade summarizes and emits them. +func (task *CascadeRegistrationTask) storeArtefacts(ctx context.Context, actionID string, idFiles [][]byte, symbolsDir string, f logtrace.Fields) error { return task.P2P.StoreArtefacts(ctx, adaptors.StoreArtefactsRequest{ IDFiles: idFiles, SymbolsDir: symbolsDir, @@ -203,22 +199,24 @@ func (task *CascadeRegistrationTask) wrapErr(ctx context.Context, msg string, er // emitArtefactsStored builds a single-line metrics summary and emits the // SupernodeEventTypeArtefactsStored event while logging the metrics line. func (task *CascadeRegistrationTask) emitArtefactsStored( - ctx context.Context, - metrics adaptors.StoreArtefactsMetrics, - fields logtrace.Fields, - send func(resp *RegisterResponse) error, + ctx context.Context, + fields logtrace.Fields, + _ codec.Layout, + send func(resp *RegisterResponse) error, ) { - ok := int(stdmath.Round(metrics.AggregatedRate / 100.0 * float64(metrics.TotalRequests))) - fail := metrics.TotalRequests - ok - line := fmt.Sprintf( - "artefacts stored | success_rate=%.2f%% agg_rate=%.2f%% total_reqs=%d ok=%d fail=%d meta_rate=%.2f%% meta_reqs=%d meta_count=%d sym_rate=%.2f%% sym_reqs=%d sym_count=%d", - metrics.AggregatedRate, metrics.AggregatedRate, metrics.TotalRequests, ok, fail, - metrics.MetaRate, metrics.MetaRequests, metrics.MetaCount, - metrics.SymRate, metrics.SymRequests, metrics.SymCount, - ) - fields["metrics"] = line + if fields == nil { + fields = logtrace.Fields{} + } + + // Build payload strictly from internal collector (no P2P snapshots) + payload := cm.BuildStoreEventPayloadFromCollector(task.ID()) + + b, _ := json.MarshalIndent(payload, "", " ") + msg := string(b) + fields["metrics_json"] = msg logtrace.Info(ctx, "artefacts have been stored", fields) - task.streamEvent(SupernodeEventTypeArtefactsStored, line, "", send) + task.streamEvent(SupernodeEventTypeArtefactsStored, msg, "", send) + // No central state to clear; adaptor returns calls inline } // extractSignatureAndFirstPart extracts the signature and first part from the encoded data diff --git a/supernode/services/cascade/progressive_decode.go b/supernode/services/cascade/progressive_decode.go deleted file mode 100644 index 65cd6820..00000000 --- a/supernode/services/cascade/progressive_decode.go +++ /dev/null @@ -1,86 +0,0 @@ -package cascade - -import ( - "context" - "fmt" - - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" -) - -// retrieveAndDecodeProgressively performs a minimal two-step retrieval for a single-block layout: -// 1) Fetch approximately requiredSymbolPercent of symbols and try decoding. -// 2) If that fails, fetch all available symbols from the block and try again. -// This replaces earlier multi-block balancing and multi-threshold escalation. -func (task *CascadeRegistrationTask) retrieveAndDecodeProgressively( - ctx context.Context, - layout codec.Layout, - actionID string, - fields logtrace.Fields, -) (adaptors.DecodeResponse, error) { - // Ensure base context fields are present for logs - if fields == nil { - fields = logtrace.Fields{} - } - fields[logtrace.FieldActionID] = actionID - - if len(layout.Blocks) == 0 { - return adaptors.DecodeResponse{}, fmt.Errorf("empty layout: no blocks") - } - - // Single-block fast path - if len(layout.Blocks) == 1 { - blk := layout.Blocks[0] - total := len(blk.Symbols) - if total == 0 { - return adaptors.DecodeResponse{}, fmt.Errorf("empty layout: no symbols") - } - - // Step 1: try with requiredSymbolPercent of symbols - reqCount := (total*requiredSymbolPercent + 99) / 100 - if reqCount < 1 { - reqCount = 1 - } - if reqCount > total { - reqCount = total - } - fields["targetPercent"] = requiredSymbolPercent - fields["targetCount"] = reqCount - logtrace.Info(ctx, "retrieving initial symbols (single block)", fields) - - keys := blk.Symbols[:reqCount] - symbols, err := task.P2PClient.BatchRetrieve(ctx, keys, reqCount, actionID) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to retrieve symbols", fields) - return adaptors.DecodeResponse{}, fmt.Errorf("failed to retrieve symbols: %w", err) - } - - decodeInfo, err := task.RQ.Decode(ctx, adaptors.DecodeRequest{ - ActionID: actionID, - Symbols: symbols, - Layout: layout, - }) - if err == nil { - return decodeInfo, nil - } - - // Step 2: escalate to all symbols - logtrace.Info(ctx, "initial decode failed; retrieving all symbols (single block)", nil) - symbols, err = task.P2PClient.BatchRetrieve(ctx, blk.Symbols, total, actionID) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to retrieve all symbols", fields) - return adaptors.DecodeResponse{}, fmt.Errorf("failed to retrieve symbols: %w", err) - } - return task.RQ.Decode(ctx, adaptors.DecodeRequest{ - ActionID: actionID, - Symbols: symbols, - Layout: layout, - }) - } - - // Multi-block layouts are not supported by current policy - return adaptors.DecodeResponse{}, fmt.Errorf("unsupported layout: expected 1 block, found %d", len(layout.Blocks)) -} diff --git a/supernode/services/cascade/register.go b/supernode/services/cascade/register.go index 403d0428..dd6e1e77 100644 --- a/supernode/services/cascade/register.go +++ b/supernode/services/cascade/register.go @@ -157,18 +157,13 @@ func (task *CascadeRegistrationTask) Register( task.streamEvent(SupernodeEventTypeFinalizeSimulated, "Finalize simulation passed", "", send) /* 11. Persist artefacts -------------------------------------------------------- */ - // Persist artefacts to the P2P network. - // Aggregation model (context): - // - Each underlying StoreBatch returns (ratePct, requests) where requests is - // the number of node RPCs. The aggregated success rate can be computed as a - // weighted average by requests across metadata and symbol batches, yielding - // an overall network success view for the action. - metrics, err := task.storeArtefacts(ctx, action.ActionID, rqidResp.RedundantMetadataFiles, encResp.SymbolsDir, fields) - if err != nil { + // Persist artefacts to the P2P network. P2P interfaces return error only; + // metrics are summarized at the cascade layer and emitted via event. + if err := task.storeArtefacts(ctx, action.ActionID, rqidResp.RedundantMetadataFiles, encResp.SymbolsDir, fields); err != nil { return err } - // Emit single-line metrics via helper to keep Register clean - task.emitArtefactsStored(ctx, metrics, fields, send) + // Emit compact analytics payload from centralized metrics collector + task.emitArtefactsStored(ctx, fields, encResp.Metadata, send) resp, err := task.LumeraClient.FinalizeAction(ctx, action.ActionID, rqidResp.RQIDs) if err != nil { diff --git a/supernode/services/cascade/register_test.go b/supernode/services/cascade/register_test.go index 82af32fe..c73b96b7 100644 --- a/supernode/services/cascade/register_test.go +++ b/supernode/services/cascade/register_test.go @@ -104,19 +104,10 @@ func TestCascadeRegistrationTask_Register(t *testing.T) { Metadata: codecpkg.Layout{Blocks: []codecpkg.Block{{BlockID: 1, Hash: "abc"}}}, }, nil) - // 8. Store artefacts (returns detailed metrics) - p2p.EXPECT(). - StoreArtefacts(gomock.Any(), gomock.Any(), gomock.Any()). - Return(adaptors.StoreArtefactsMetrics{ - MetaRate: 96.0, - MetaRequests: 20, - MetaCount: 2, - SymRate: 94.0, - SymRequests: 100, - SymCount: 1000, - AggregatedRate: 95.0, - TotalRequests: 120, - }, nil) + // 8. Store artefacts (no metrics returned; recorded centrally) + p2p.EXPECT(). + StoreArtefacts(gomock.Any(), gomock.Any(), gomock.Any()). + Return(nil) }, expectedError: "", expectedEvents: 12, diff --git a/supernode/services/common/storage/handler.go b/supernode/services/common/storage/handler.go index ae80615b..210dab0f 100644 --- a/supernode/services/common/storage/handler.go +++ b/supernode/services/common/storage/handler.go @@ -14,6 +14,7 @@ import ( "github.com/LumeraProtocol/supernode/v2/p2p" "github.com/LumeraProtocol/supernode/v2/pkg/errors" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics" "github.com/LumeraProtocol/supernode/v2/pkg/storage/files" "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" "github.com/LumeraProtocol/supernode/v2/pkg/utils" @@ -66,10 +67,6 @@ func (h *StorageHandler) StoreBytesIntoP2P(ctx context.Context, data []byte, typ } // StoreBatch stores into P2P an array of byte slices. -// -// Note: The underlying P2P client returns (successRatePct, requests, err). -// This handler intentionally ignores the metrics and only propagates error, -// as callers of this common storage path historically consumed only errors. func (h *StorageHandler) StoreBatch(ctx context.Context, list [][]byte, typ int) error { val := ctx.Value(logtrace.CorrelationIDKey) taskID := "" @@ -78,9 +75,9 @@ func (h *StorageHandler) StoreBatch(ctx context.Context, list [][]byte, typ int) } logtrace.Info(ctx, "task_id in storeList", logtrace.Fields{logtrace.FieldTaskID: taskID}) - - _, _, err := h.P2PClient.StoreBatch(ctx, list, typ, taskID) - return err + // Add taskID to context for metrics + ctx = p2pmetrics.WithTaskID(ctx, taskID) + return h.P2PClient.StoreBatch(ctx, list, typ, taskID) } // StoreRaptorQSymbolsIntoP2P stores RaptorQ symbols into P2P @@ -170,7 +167,9 @@ func (h *StorageHandler) storeSymbolsInP2P(ctx context.Context, taskID, root str return fmt.Errorf("load symbols: %w", err) } - if _, _, err := h.P2PClient.StoreBatch(ctx, symbols, P2PDataRaptorQSymbol, taskID); err != nil { + // Add taskID to context for metrics + ctx = p2pmetrics.WithTaskID(ctx, taskID) + if err := h.P2PClient.StoreBatch(ctx, symbols, P2PDataRaptorQSymbol, taskID); err != nil { return fmt.Errorf("p2p store batch: %w", err) } diff --git a/supernode/services/common/storage/handler_test.go b/supernode/services/common/storage/handler_test.go index e1be29f1..fd4e0d8e 100644 --- a/supernode/services/common/storage/handler_test.go +++ b/supernode/services/common/storage/handler_test.go @@ -49,7 +49,8 @@ func TestStoreBatch(t *testing.T) { ctx := context.WithValue(context.Background(), "task_id", "123") list := [][]byte{[]byte("a"), []byte("b")} - p2pClient.On("StoreBatch", mock.Anything, list, 3, "").Return(0.0, 0, nil) + // StoreBatch now returns error only + p2pClient.On("StoreBatch", mock.Anything, list, 3, "").Return(nil) err := handler.StoreBatch(ctx, list, 3) assert.NoError(t, err) diff --git a/tests/integration/p2p/p2p_integration_test.go b/tests/integration/p2p/p2p_integration_test.go index a689b75d..bce71f58 100644 --- a/tests/integration/p2p/p2p_integration_test.go +++ b/tests/integration/p2p/p2p_integration_test.go @@ -108,7 +108,7 @@ func TestP2PBasicIntegration(t *testing.T) { // Add debug logging log.Printf("Storing batch with keys: %v", expectedKeys) - _, _, err := services[0].StoreBatch(ctx, batchData, 0, taskID) + err := services[0].StoreBatch(ctx, batchData, 0, taskID) require.NoError(t, err) // Add immediate verification From 2a595325990e03c56151023c948c29b89b53078e Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Sat, 13 Sep 2025 16:41:09 +0500 Subject: [PATCH 3/7] refactor: Remove legacy cascade storage metrics keys for cleaner event data --- sdk/adapters/supernodeservice/adapter.go | 19 ------------------- sdk/event/keys.go | 17 +++-------------- 2 files changed, 3 insertions(+), 33 deletions(-) diff --git a/sdk/adapters/supernodeservice/adapter.go b/sdk/adapters/supernodeservice/adapter.go index 6068147b..969ace44 100644 --- a/sdk/adapters/supernodeservice/adapter.go +++ b/sdk/adapters/supernodeservice/adapter.go @@ -365,23 +365,6 @@ func (a *cascadeAdapter) CascadeSupernodeRegister(ctx context.Context, in *Casca if v, ok := store["calls_by_ip"]; ok { edata[event.KeyStoreCallsByIP] = v } - } else { - // Legacy fallbacks (previous SDK behavior) - if v, ok := payload["success_rate"].(float64); ok { - edata[event.KeySuccessRate] = v - } - if v, ok := payload["meta_duration_ms"].(float64); ok { - edata[event.KeyMetaDurationMS] = int64(v) - } - if v, ok := payload["sym_duration_ms"].(float64); ok { - edata[event.KeySymDurationMS] = int64(v) - } - if v, ok := payload["sym_nodes"]; ok { - edata[event.KeySymNodes] = v - } - if v, ok := payload["meta_nodes"]; ok { - edata[event.KeyMetaNodes] = v - } } } } @@ -497,8 +480,6 @@ func (a *cascadeAdapter) CascadeSupernodeDownload( } if v, ok := retrieve["retrieve_ms"].(float64); ok { edata[event.KeyRetrieveMS] = int64(v) - // Maintain old key for rough duration if consumers expect it - edata[event.KeyDHTDurationMS] = int64(v) } if v, ok := retrieve["decode_ms"].(float64); ok { edata[event.KeyDecodeMS] = int64(v) diff --git a/sdk/event/keys.go b/sdk/event/keys.go index 5089a30c..1e012677 100644 --- a/sdk/event/keys.go +++ b/sdk/event/keys.go @@ -15,7 +15,6 @@ const ( KeyProgress EventDataKey = "progress" KeyEventType EventDataKey = "event_type" KeyOutputPath EventDataKey = "output_path" - KeySuccessRate EventDataKey = "success_rate" // Upload/download metrics keys (no progress events; start/complete metrics only) KeyBytesTotal EventDataKey = "bytes_total" @@ -31,27 +30,17 @@ const ( KeyTaskID EventDataKey = "task_id" KeyActionID EventDataKey = "action_id" - // Cascade storage metrics keys - KeyMetaDurationMS EventDataKey = "meta_duration_ms" - KeySymDurationMS EventDataKey = "sym_duration_ms" - KeyMetaNodes EventDataKey = "meta_nodes" - KeySymNodes EventDataKey = "sym_nodes" + // Removed legacy cascade storage metrics keys (meta/sym timings and nodes) - // Combined store metrics (metadata + symbols) + // Combined store metrics (metadata + symbols) — new minimal only KeyStoreDurationMS EventDataKey = "store_duration_ms" - KeyStoreRequests EventDataKey = "store_requests" - KeyStoreCalls EventDataKey = "store_calls" // New minimal store metrics KeyStoreSymbolsFirstPass EventDataKey = "store_symbols_first_pass" KeyStoreSymbolsTotal EventDataKey = "store_symbols_total" KeyStoreIDFilesCount EventDataKey = "store_id_files_count" KeyStoreCallsByIP EventDataKey = "store_calls_by_ip" - // Download (retrieve) detailed metrics - KeyDHTNodes EventDataKey = "dht_nodes" - KeyDHTCalls EventDataKey = "dht_calls" - KeyDHTDurationMS EventDataKey = "dht_duration_ms" - // New minimal download metrics + // Download (retrieve) detailed metrics — new minimal only KeyRetrieveFoundLocal EventDataKey = "retrieve_found_local" KeyRetrieveMS EventDataKey = "retrieve_ms" KeyDecodeMS EventDataKey = "decode_ms" From fa21b3847396146dd96cb8f9a4da7fdc802401af Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Sat, 13 Sep 2025 20:16:50 +0500 Subject: [PATCH 4/7] feat: Pass send function to downloadArtifacts and restoreFileFromLayout for event streaming --- supernode/services/cascade/download.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/supernode/services/cascade/download.go b/supernode/services/cascade/download.go index f88d3941..bfcc25a9 100644 --- a/supernode/services/cascade/download.go +++ b/supernode/services/cascade/download.go @@ -76,7 +76,7 @@ func (task *CascadeRegistrationTask) Download( // Notify: network retrieval phase begins task.streamDownloadEvent(SupernodeEventTypeNetworkRetrieveStarted, "Network retrieval started", "", "", send) - filePath, tmpDir, err := task.downloadArtifacts(ctx, actionDetails.GetAction().ActionID, metadata, fields) + filePath, tmpDir, err := task.downloadArtifacts(ctx, actionDetails.GetAction().ActionID, metadata, fields, send) if err != nil { fields[logtrace.FieldError] = err.Error() return task.wrapErr(ctx, "failed to download artifacts", err, fields) @@ -88,7 +88,7 @@ func (task *CascadeRegistrationTask) Download( return nil } -func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, actionID string, metadata actiontypes.CascadeMetadata, fields logtrace.Fields) (string, string, error) { +func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, actionID string, metadata actiontypes.CascadeMetadata, fields logtrace.Fields, send func(resp *DownloadResponse) error) (string, string, error) { logtrace.Info(ctx, "started downloading the artifacts", fields) var ( @@ -134,7 +134,7 @@ func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, acti fields["layout_fetch_ms"] = layoutFetchMS fields["layout_decode_ms"] = layoutDecodeMS fields["layout_attempts"] = layoutAttempts - return task.restoreFileFromLayout(ctx, layout, metadata.DataHash, actionID) + return task.restoreFileFromLayout(ctx, layout, metadata.DataHash, actionID, send) } func (task *CascadeRegistrationTask) restoreFileFromLayout( @@ -142,6 +142,7 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( layout codec.Layout, dataHash string, actionID string, + send func(resp *DownloadResponse) error, ) (string, string, error) { fields := logtrace.Fields{ @@ -190,7 +191,7 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout( // Set minimal retrieve summary and emit event strictly from internal collector cm.SetRetrieveSummary(actionID, retrieveMS, decodeMS) if b, err := json.MarshalIndent(cm.BuildDownloadEventPayloadFromCollector(actionID), "", " "); err == nil { - task.streamDownloadEvent(SupernodeEventTypeArtefactsDownloaded, string(b), "", "", func(resp *DownloadResponse) error { return nil }) + task.streamDownloadEvent(SupernodeEventTypeArtefactsDownloaded, string(b), "", "", send) } fileHash, err := crypto.HashFileIncrementally(decodeInfo.FilePath, 0) From e26fd6622c374db29f9f600f12ee5e448dbfaf56 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Mon, 15 Sep 2025 12:46:53 +0500 Subject: [PATCH 5/7] feat: Add success rate percentage and noop metrics for store and retrieve operations --- docs/p2p-metrics-capture.md | 38 ++++++++++++++++++++++--------------- p2p/kademlia/dht.go | 31 +++++++++++++++++++++++++----- pkg/p2pmetrics/metrics.go | 18 ++++++++++++++++++ 3 files changed, 67 insertions(+), 20 deletions(-) diff --git a/docs/p2p-metrics-capture.md b/docs/p2p-metrics-capture.md index d272a7c7..6cbafebf 100644 --- a/docs/p2p-metrics-capture.md +++ b/docs/p2p-metrics-capture.md @@ -20,6 +20,7 @@ Event payload shape "symbols_first_pass": 220, "symbols_total": 1200, "id_files_count": 14, + "success_rate_pct": 82.5, "calls_by_ip": { "10.0.0.5": [ {"ip": "10.0.0.5", "address": "A:4445", "keys": 100, "success": true, "duration_ms": 120}, @@ -33,6 +34,9 @@ Event payload shape ### Fields - `store.duration_ms` + - Meaning: End‑to‑end elapsed time of the first‑pass store phase (Register’s storage section only). + - Where captured: `supernode/services/cascade/adaptors/p2p.go` + - A `time.Now()` timestamp is taken just before the first‑pass store function and measured on return. - `store.symbols_first_pass` - Meaning: Number of symbols sent during the Register first pass (across the combined first batch and any immediate first‑pass symbol batches). @@ -45,9 +49,6 @@ Event payload shape - `store.id_files_count` - Meaning: Number of redundant metadata files (ID files) sent in the first combined batch. - Where captured: `len(req.IDFiles)` in `StoreArtefacts`, passed to `SetStoreSummary`. - - Meaning: End‑to‑end elapsed time of the first‑pass store phase (Register’s storage section only). - - Where captured: `supernode/services/cascade/adaptors/p2p.go` - - A `time.Now()` timestamp is taken just before the first‑pass store function and measured on return. - `store.calls_by_ip` - Meaning: All raw network store RPC attempts grouped by the node IP. @@ -55,21 +56,26 @@ Event payload shape - `ip` — Node IP (fallback to `address` if missing). - `address` — Node string `IP:port`. - `keys` — Number of items in that RPC attempt (metadata + first symbols for the first combined batch, symbols for subsequent batches within the first pass). - - `success` — True if the node acknowledged the store successfully. + - `success` — True if there was no transport error and no error message returned by the node response. Note: this flag does not explicitly check the `ResultOk` status; in rare cases, a non‑OK response with an empty error message may appear as `success` in metrics. (Internal success‑rate enforcement still uses explicit response status.) - `error` — Any error string captured; omitted when success. - `duration_ms` — RPC duration in milliseconds. + - `noop` — Present and `true` when no store payload was sent to the node (empty batch for that node). Such entries are recorded as `success=true`, `keys=0`, with no `error`. - Where captured: - Emission point (P2P): `p2p/kademlia/dht.go::IterateBatchStore(...)` - - After each node RPC returns, we call `p2pmetrics.RecordStore(taskID, Call{...})`. + - After each node RPC returns, we call `p2pmetrics.RecordStore(taskID, Call{...})`. For nodes with no payload, a `noop: true` entry is emitted without sending a wire RPC. - `taskID` is read from the context via `p2pmetrics.TaskIDFromContext(ctx)`. - Grouping: `pkg/p2pmetrics/metrics.go` - `StartStoreCapture(taskID)` enables capture; `StopStoreCapture(taskID)` disables it. - Calls are grouped by `ip` (fallback to `address`) without further aggregation. + +- `store.success_rate_pct` + - Meaning: First‑pass store success rate computed from captured per‑RPC outcomes: successful responses divided by total recorded store RPC attempts, expressed as a percentage. + - Where captured: Computed in `pkg/p2pmetrics/metrics.go::BuildStoreEventPayloadFromCollector` from `calls_by_ip` data. ### First‑Pass Success Threshold - Internal enforcement only: if DHT first‑pass success rate is below 75%, `IterateBatchStore` returns an error. -- No success rate is emitted in events; only error flow is affected. +- We also emit `store.success_rate_pct` for analytics; the threshold only affects control flow (errors), not the emitted metric. - Code: `p2p/kademlia/dht.go::IterateBatchStore`. ### Scope Limits @@ -78,7 +84,7 @@ Event payload shape --- -## Download (Retrieve) Event +## Download Event Event payload shape @@ -120,12 +126,13 @@ Event payload shape - Each array entry is a single RPC attempt with: - `ip`, `address` — Identifiers as available. - `keys` — Number of symbols returned by that node in that call. - - `success` — True if `keys > 0`. + - `success` — True if the RPC completed without error (even if `keys == 0`). Transport/status errors remain `success=false` with an `error` message. - `error` — Error string when the RPC failed; omitted otherwise. - `duration_ms` — RPC duration in milliseconds. + - `noop` — Present and `true` when no network request was actually sent to the node (e.g., all requested keys were already satisfied or deduped before issuing the call). Such entries are recorded as `success=true`, `keys=0`, with no `error`. - Where captured: - Emission point (P2P): `p2p/kademlia/dht.go::iterateBatchGetValues(...)` - - Each node RPC records a `p2pmetrics.RecordRetrieve(taskID, Call{...})`. + - Each node attempt records a `p2pmetrics.RecordRetrieve(taskID, Call{...})`. For attempts where no network RPC is sent, a `noop: true` entry is emitted. - `taskID` is extracted from context using `p2pmetrics.TaskIDFromContext(ctx)`. - Grouping: `pkg/p2pmetrics/metrics.go` (same grouping/fallback as store). @@ -151,6 +158,7 @@ Event payload shape - Store - `supernode/services/cascade/helper.go::emitArtefactsStored(...)` - Builds `store` payload via `p2pmetrics.BuildStoreEventPayloadFromCollector(taskID)`. + - Includes `success_rate_pct` (first‑pass store success rate computed from captured per‑RPC outcomes) in addition to the minimal fields. - Emits the event. - Download @@ -162,12 +170,12 @@ Event payload shape ## Quick File Map -- Capture + grouping: `pkg/p2pmetrics/metrics.go` -- Store adaptor: `supernode/services/cascade/adaptors/p2p.go` -- Store event: `supernode/services/cascade/helper.go` -- Download flow: `supernode/services/cascade/download.go` -- DHT store calls: `p2p/kademlia/dht.go::IterateBatchStore` -- DHT retrieve calls: `p2p/kademlia/dht.go::BatchRetrieve` and `iterateBatchGetValues` +- Capture + grouping: `supernode/pkg/p2pmetrics/metrics.go` +- Store adaptor: `supernode/supernode/services/cascade/adaptors/p2p.go` +- Store event: `supernode/supernode/services/cascade/helper.go` +- Download flow: `supernode/supernode/services/cascade/download.go` +- DHT store calls: `supernode/p2p/kademlia/dht.go::IterateBatchStore` +- DHT retrieve calls: `supernode/p2p/kademlia/dht.go::BatchRetrieve` and `iterateBatchGetValues` --- diff --git a/p2p/kademlia/dht.go b/p2p/kademlia/dht.go index 81fb8ecc..c7c63c18 100644 --- a/p2p/kademlia/dht.go +++ b/p2p/kademlia/dht.go @@ -918,13 +918,16 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, } if len(requestKeys) == 0 { + // No keys to request from this node (e.g., all keys already satisfied elsewhere). + // Treat as a successful, no-op call for metrics when there is no error. p2pmetrics.RecordRetrieve(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ IP: node.IP, Address: node.String(), Keys: 0, - Success: false, + Success: true, Error: "", DurationMS: time.Since(callStart).Milliseconds(), + Noop: true, }) return } @@ -966,12 +969,12 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, } } - // record successful RPC per-node (returned may be 0) + // record successful RPC per-node (returned may be 0). Success is true when no error. p2pmetrics.RecordRetrieve(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ IP: node.IP, Address: node.String(), Keys: returned, - Success: returned > 0, + Success: true, Error: "", DurationMS: time.Since(callStart).Milliseconds(), }) @@ -1778,6 +1781,7 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ responses <- &MessageWithError{Error: ctx.Err(), Receiver: receiver} return default: + callStart := time.Now() keysToStore := storageMap[key] toStore := make([][]byte, len(keysToStore)) totalBytes := 0 @@ -1792,11 +1796,28 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ "size_before_compress": utils.BytesIntToMB(totalBytes), }) + // Skip empty payloads: avoid sending empty store RPCs, but record a noop metric for visibility. + if len(toStore) == 0 { + logtrace.Info(ctx, "Skipping store RPC with empty payload", logtrace.Fields{ + logtrace.FieldModule: "dht", + "node": receiver.String(), + }) + p2pmetrics.RecordStore(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ + IP: receiver.IP, + Address: receiver.String(), + Keys: 0, + Success: true, + Error: "", + DurationMS: time.Since(callStart).Milliseconds(), + Noop: true, + }) + return + } + data := &BatchStoreDataRequest{Data: toStore, Type: typ} request := s.newMessage(BatchStoreData, receiver, data) - start := time.Now() response, err := s.network.Call(ctx, request, false) - dur := time.Since(start).Milliseconds() + dur := time.Since(callStart).Milliseconds() if err != nil { if !isLocalCancel(err) { s.ignorelist.IncrementCount(receiver) diff --git a/pkg/p2pmetrics/metrics.go b/pkg/p2pmetrics/metrics.go index 1c1b0088..b483bb1d 100644 --- a/pkg/p2pmetrics/metrics.go +++ b/pkg/p2pmetrics/metrics.go @@ -13,6 +13,7 @@ type Call struct { Success bool `json:"success"` Error string `json:"error,omitempty"` DurationMS int64 `json:"duration_ms"` + Noop bool `json:"noop,omitempty"` } // -------- Lightweight hooks ------------------------- @@ -162,16 +163,33 @@ func BuildStoreEventPayloadFromCollector(taskID string) map[string]any { "symbols_first_pass": 0, "symbols_total": 0, "id_files_count": 0, + "success_rate_pct": float64(0), "calls_by_ip": map[string][]Call{}, }, } } + // Compute per-call success rate across first-pass store RPC attempts + totalCalls := 0 + successCalls := 0 + for _, calls := range s.CallsByIP { + for _, c := range calls { + totalCalls++ + if c.Success { + successCalls++ + } + } + } + var successRate float64 + if totalCalls > 0 { + successRate = float64(successCalls) / float64(totalCalls) * 100.0 + } return map[string]any{ "store": map[string]any{ "duration_ms": s.DurationMS, "symbols_first_pass": s.SymbolsFirstPass, "symbols_total": s.SymbolsTotal, "id_files_count": s.IDFilesCount, + "success_rate_pct": successRate, "calls_by_ip": s.CallsByIP, }, } From 79242d03d9da367911331061d412e56c730c0d5f Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Mon, 15 Sep 2025 16:55:09 +0500 Subject: [PATCH 6/7] Increase TCP buffer size --- p2p/p2p.go | 7 ++++++- supernode/node/supernode/server/server.go | 6 ++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/p2p/p2p.go b/p2p/p2p.go index 5cb9c505..e3d6b40a 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -176,7 +176,12 @@ func (s *p2p) Stats(ctx context.Context) (map[string]interface{}, error) { retStats["disk-info"] = &diskUse retStats["ban-list"] = s.dht.BanListSnapshot() retStats["conn-pool"] = s.dht.ConnPoolSnapshot() - dhtStats["dht_metrics"] = s.dht.MetricsSnapshot() + + // Expose DHT rolling metrics snapshot both under the top-level key (as expected by + // the status service) and also within the DHT map for backward compatibility. + snapshot := s.dht.MetricsSnapshot() + retStats["dht_metrics"] = snapshot + dhtStats["dht_metrics"] = snapshot return retStats, nil } diff --git a/supernode/node/supernode/server/server.go b/supernode/node/supernode/server/server.go index 7ded8eb7..f17a4f10 100644 --- a/supernode/node/supernode/server/server.go +++ b/supernode/node/supernode/server/server.go @@ -66,8 +66,10 @@ func (server *Server) Run(ctx context.Context) error { opts.InitialWindowSize = (4 * 1024 * 1024) // 4MB per-stream window ~ chunk size opts.InitialConnWindowSize = (64 * 1024 * 1024) // 64MB per-connection window opts.MaxConcurrentStreams = 20 // Prevent resource exhaustion - opts.ReadBufferSize = (1 * 1024 * 1024) // 1MB TCP buffer - opts.WriteBufferSize = (1 * 1024 * 1024) // 1MB TCP buffer + // Use larger socket buffers to reduce risk of flow-control stalls during long-running + // retrieve/stream phases while keeping overall memory limits reasonable. + opts.ReadBufferSize = (8 * 1024 * 1024) // 8MB TCP buffer + opts.WriteBufferSize = (8 * 1024 * 1024) // 8MB TCP buffer for _, address := range addresses { addr := net.JoinHostPort(strings.TrimSpace(address), strconv.Itoa(server.config.Port)) From d17c904c4be86ac292fd297f7aebb327840f3237 Mon Sep 17 00:00:00 2001 From: Matee Ullah Malik Date: Mon, 15 Sep 2025 17:26:36 +0500 Subject: [PATCH 7/7] Add handler request tracking per IP --- gen/supernode/supernode.pb.go | 674 ++++++++++++++---- gen/supernode/supernode.swagger.json | 112 +++ p2p/kademlia/dht.go | 11 + p2p/kademlia/network.go | 93 +++ p2p/kademlia/recent.go | 90 +++ proto/supernode/supernode.proto | 31 + .../node/supernode/server/status_server.go | 63 ++ .../services/common/supernode/service.go | 367 ++++++---- supernode/services/common/supernode/types.go | 124 ++-- 9 files changed, 1250 insertions(+), 315 deletions(-) create mode 100644 p2p/kademlia/recent.go diff --git a/gen/supernode/supernode.pb.go b/gen/supernode/supernode.pb.go index 5410f5c6..431bc8b5 100644 --- a/gen/supernode/supernode.pb.go +++ b/gen/supernode/supernode.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 +// protoc-gen-go v1.35.1 // protoc v3.21.12 // source: supernode/supernode.proto @@ -513,12 +513,16 @@ type StatusResponse_P2PMetrics struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - DhtMetrics *StatusResponse_P2PMetrics_DhtMetrics `protobuf:"bytes,1,opt,name=dht_metrics,json=dhtMetrics,proto3" json:"dht_metrics,omitempty"` - NetworkHandleMetrics map[string]*StatusResponse_P2PMetrics_HandleCounters `protobuf:"bytes,2,rep,name=network_handle_metrics,json=networkHandleMetrics,proto3" json:"network_handle_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - ConnPoolMetrics map[string]int64 `protobuf:"bytes,3,rep,name=conn_pool_metrics,json=connPoolMetrics,proto3" json:"conn_pool_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - BanList []*StatusResponse_P2PMetrics_BanEntry `protobuf:"bytes,4,rep,name=ban_list,json=banList,proto3" json:"ban_list,omitempty"` - Database *StatusResponse_P2PMetrics_DatabaseStats `protobuf:"bytes,5,opt,name=database,proto3" json:"database,omitempty"` - Disk *StatusResponse_P2PMetrics_DiskStatus `protobuf:"bytes,6,opt,name=disk,proto3" json:"disk,omitempty"` + DhtMetrics *StatusResponse_P2PMetrics_DhtMetrics `protobuf:"bytes,1,opt,name=dht_metrics,json=dhtMetrics,proto3" json:"dht_metrics,omitempty"` + NetworkHandleMetrics map[string]*StatusResponse_P2PMetrics_HandleCounters `protobuf:"bytes,2,rep,name=network_handle_metrics,json=networkHandleMetrics,proto3" json:"network_handle_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ConnPoolMetrics map[string]int64 `protobuf:"bytes,3,rep,name=conn_pool_metrics,json=connPoolMetrics,proto3" json:"conn_pool_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + BanList []*StatusResponse_P2PMetrics_BanEntry `protobuf:"bytes,4,rep,name=ban_list,json=banList,proto3" json:"ban_list,omitempty"` + Database *StatusResponse_P2PMetrics_DatabaseStats `protobuf:"bytes,5,opt,name=database,proto3" json:"database,omitempty"` + Disk *StatusResponse_P2PMetrics_DiskStatus `protobuf:"bytes,6,opt,name=disk,proto3" json:"disk,omitempty"` + RecentBatchStore []*StatusResponse_P2PMetrics_RecentBatchStoreEntry `protobuf:"bytes,7,rep,name=recent_batch_store,json=recentBatchStore,proto3" json:"recent_batch_store,omitempty"` + RecentBatchRetrieve []*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry `protobuf:"bytes,8,rep,name=recent_batch_retrieve,json=recentBatchRetrieve,proto3" json:"recent_batch_retrieve,omitempty"` + RecentBatchStoreByIp map[string]*StatusResponse_P2PMetrics_RecentBatchStoreList `protobuf:"bytes,9,rep,name=recent_batch_store_by_ip,json=recentBatchStoreByIp,proto3" json:"recent_batch_store_by_ip,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + RecentBatchRetrieveByIp map[string]*StatusResponse_P2PMetrics_RecentBatchRetrieveList `protobuf:"bytes,10,rep,name=recent_batch_retrieve_by_ip,json=recentBatchRetrieveByIp,proto3" json:"recent_batch_retrieve_by_ip,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *StatusResponse_P2PMetrics) Reset() { @@ -593,6 +597,34 @@ func (x *StatusResponse_P2PMetrics) GetDisk() *StatusResponse_P2PMetrics_DiskSta return nil } +func (x *StatusResponse_P2PMetrics) GetRecentBatchStore() []*StatusResponse_P2PMetrics_RecentBatchStoreEntry { + if x != nil { + return x.RecentBatchStore + } + return nil +} + +func (x *StatusResponse_P2PMetrics) GetRecentBatchRetrieve() []*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry { + if x != nil { + return x.RecentBatchRetrieve + } + return nil +} + +func (x *StatusResponse_P2PMetrics) GetRecentBatchStoreByIp() map[string]*StatusResponse_P2PMetrics_RecentBatchStoreList { + if x != nil { + return x.RecentBatchStoreByIp + } + return nil +} + +func (x *StatusResponse_P2PMetrics) GetRecentBatchRetrieveByIp() map[string]*StatusResponse_P2PMetrics_RecentBatchRetrieveList { + if x != nil { + return x.RecentBatchRetrieveByIp + } + return nil +} + type StatusResponse_Resources_CPU struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1134,6 +1166,285 @@ func (x *StatusResponse_P2PMetrics_DiskStatus) GetFreeMb() float64 { return 0 } +// Last handled BatchStoreData requests (most recent first) +type StatusResponse_P2PMetrics_RecentBatchStoreEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` + SenderId string `protobuf:"bytes,2,opt,name=sender_id,json=senderId,proto3" json:"sender_id,omitempty"` + SenderIp string `protobuf:"bytes,3,opt,name=sender_ip,json=senderIp,proto3" json:"sender_ip,omitempty"` + Keys int32 `protobuf:"varint,4,opt,name=keys,proto3" json:"keys,omitempty"` + DurationMs int64 `protobuf:"varint,5,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"` + Ok bool `protobuf:"varint,6,opt,name=ok,proto3" json:"ok,omitempty"` + Error string `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) Reset() { + *x = StatusResponse_P2PMetrics_RecentBatchStoreEntry{} + mi := &file_supernode_supernode_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_RecentBatchStoreEntry) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) ProtoReflect() protoreflect.Message { + mi := &file_supernode_supernode_proto_msgTypes[19] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_RecentBatchStoreEntry.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_RecentBatchStoreEntry) Descriptor() ([]byte, []int) { + return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 7} +} + +func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetTimeUnix() int64 { + if x != nil { + return x.TimeUnix + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetSenderId() string { + if x != nil { + return x.SenderId + } + return "" +} + +func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetSenderIp() string { + if x != nil { + return x.SenderIp + } + return "" +} + +func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetKeys() int32 { + if x != nil { + return x.Keys + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetDurationMs() int64 { + if x != nil { + return x.DurationMs + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetOk() bool { + if x != nil { + return x.Ok + } + return false +} + +func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +// Last handled BatchGetValues requests (most recent first) +type StatusResponse_P2PMetrics_RecentBatchRetrieveEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` + SenderId string `protobuf:"bytes,2,opt,name=sender_id,json=senderId,proto3" json:"sender_id,omitempty"` + SenderIp string `protobuf:"bytes,3,opt,name=sender_ip,json=senderIp,proto3" json:"sender_ip,omitempty"` + Requested int32 `protobuf:"varint,4,opt,name=requested,proto3" json:"requested,omitempty"` + Found int32 `protobuf:"varint,5,opt,name=found,proto3" json:"found,omitempty"` + DurationMs int64 `protobuf:"varint,6,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"` + Error string `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) Reset() { + *x = StatusResponse_P2PMetrics_RecentBatchRetrieveEntry{} + mi := &file_supernode_supernode_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) ProtoReflect() protoreflect.Message { + mi := &file_supernode_supernode_proto_msgTypes[20] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_RecentBatchRetrieveEntry.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) Descriptor() ([]byte, []int) { + return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 8} +} + +func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetTimeUnix() int64 { + if x != nil { + return x.TimeUnix + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetSenderId() string { + if x != nil { + return x.SenderId + } + return "" +} + +func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetSenderIp() string { + if x != nil { + return x.SenderIp + } + return "" +} + +func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetRequested() int32 { + if x != nil { + return x.Requested + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetFound() int32 { + if x != nil { + return x.Found + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetDurationMs() int64 { + if x != nil { + return x.DurationMs + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +// Per-IP buckets: last 10 per sender IP +type StatusResponse_P2PMetrics_RecentBatchStoreList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entries []*StatusResponse_P2PMetrics_RecentBatchStoreEntry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *StatusResponse_P2PMetrics_RecentBatchStoreList) Reset() { + *x = StatusResponse_P2PMetrics_RecentBatchStoreList{} + mi := &file_supernode_supernode_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_RecentBatchStoreList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_RecentBatchStoreList) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_RecentBatchStoreList) ProtoReflect() protoreflect.Message { + mi := &file_supernode_supernode_proto_msgTypes[21] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_RecentBatchStoreList.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_RecentBatchStoreList) Descriptor() ([]byte, []int) { + return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 9} +} + +func (x *StatusResponse_P2PMetrics_RecentBatchStoreList) GetEntries() []*StatusResponse_P2PMetrics_RecentBatchStoreEntry { + if x != nil { + return x.Entries + } + return nil +} + +type StatusResponse_P2PMetrics_RecentBatchRetrieveList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entries []*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveList) Reset() { + *x = StatusResponse_P2PMetrics_RecentBatchRetrieveList{} + mi := &file_supernode_supernode_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_RecentBatchRetrieveList) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveList) ProtoReflect() protoreflect.Message { + mi := &file_supernode_supernode_proto_msgTypes[22] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_RecentBatchRetrieveList.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_RecentBatchRetrieveList) Descriptor() ([]byte, []int) { + return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 10} +} + +func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveList) GetEntries() []*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry { + if x != nil { + return x.Entries + } + return nil +} + type StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1147,7 +1458,7 @@ type StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint struct { func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) Reset() { *x = StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint{} - mi := &file_supernode_supernode_proto_msgTypes[19] + mi := &file_supernode_supernode_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1159,7 +1470,7 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) String() string func (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[19] + mi := &file_supernode_supernode_proto_msgTypes[25] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1218,7 +1529,7 @@ type StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint struct { func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) Reset() { *x = StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint{} - mi := &file_supernode_supernode_proto_msgTypes[20] + mi := &file_supernode_supernode_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1230,7 +1541,7 @@ func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) String() strin func (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoMessage() {} func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[20] + mi := &file_supernode_supernode_proto_msgTypes[26] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1310,7 +1621,7 @@ var file_supernode_supernode_proto_rawDesc = []byte{ 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x22, 0x84, 0x19, 0x0a, 0x0e, + 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x22, 0xf7, 0x23, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x75, 0x70, 0x74, 0x69, @@ -1391,7 +1702,7 @@ var file_supernode_supernode_proto_rawDesc = []byte{ 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x70, - 0x65, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x1a, 0xf3, 0x0e, 0x0a, + 0x65, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x1a, 0xe6, 0x19, 0x0a, 0x0a, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x50, 0x0a, 0x0b, 0x64, 0x68, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, @@ -1425,110 +1736,197 @@ var file_supernode_supernode_proto_rawDesc = []byte{ 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x04, 0x64, 0x69, 0x73, 0x6b, 0x1a, 0xc0, 0x05, 0x0a, 0x0a, 0x44, 0x68, - 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x73, 0x0a, 0x14, 0x73, 0x74, 0x6f, 0x72, - 0x65, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x12, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x76, 0x0a, - 0x15, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x5f, - 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x73, + 0x74, 0x75, 0x73, 0x52, 0x04, 0x64, 0x69, 0x73, 0x6b, 0x12, 0x68, 0x0a, 0x12, 0x72, 0x65, 0x63, + 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x18, + 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, + 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, + 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x10, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, + 0x6f, 0x72, 0x65, 0x12, 0x71, 0x0a, 0x15, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, + 0x74, 0x63, 0x68, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x18, 0x08, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, + 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, + 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x13, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, + 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x12, 0x76, 0x0a, 0x18, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, + 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x62, 0x79, 0x5f, + 0x69, 0x70, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x52, + 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x42, + 0x79, 0x49, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, + 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x79, 0x49, 0x70, 0x12, 0x7f, + 0x0a, 0x1b, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x72, + 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x69, 0x70, 0x18, 0x0a, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, + 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, + 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x42, 0x79, 0x49, + 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x17, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, + 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x42, 0x79, 0x49, 0x70, 0x1a, + 0xc0, 0x05, 0x0a, 0x0a, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x73, + 0x0a, 0x14, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, + 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x42, 0x61, - 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, - 0x52, 0x13, 0x62, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x52, - 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x15, 0x68, 0x6f, 0x74, 0x5f, 0x70, 0x61, 0x74, - 0x68, 0x5f, 0x62, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x6b, 0x69, 0x70, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x68, 0x6f, 0x74, 0x50, 0x61, 0x74, 0x68, 0x42, 0x61, 0x6e, - 0x6e, 0x65, 0x64, 0x53, 0x6b, 0x69, 0x70, 0x73, 0x12, 0x35, 0x0a, 0x17, 0x68, 0x6f, 0x74, 0x5f, - 0x70, 0x61, 0x74, 0x68, 0x5f, 0x62, 0x61, 0x6e, 0x5f, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x68, 0x6f, 0x74, 0x50, 0x61, - 0x74, 0x68, 0x42, 0x61, 0x6e, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, - 0x8f, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, - 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, - 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x1e, - 0x0a, 0x0a, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x0a, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x12, 0x21, - 0x0a, 0x0c, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, - 0x65, 0x1a, 0xc8, 0x01, 0x0a, 0x12, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, - 0x65, 0x76, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, - 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, - 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, - 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x71, - 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x66, 0x6f, 0x75, 0x6e, - 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, - 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x66, - 0x6f, 0x75, 0x6e, 0x64, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x1a, 0x74, 0x0a, 0x0e, - 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x12, 0x14, - 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, - 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, - 0x0a, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x1a, 0x9d, 0x01, 0x0a, 0x08, 0x42, 0x61, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, - 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, - 0x6f, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x55, 0x6e, 0x69, - 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x61, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, - 0x64, 0x73, 0x1a, 0x65, 0x0a, 0x0d, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0e, 0x70, 0x32, 0x70, 0x5f, 0x64, 0x62, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x70, 0x32, 0x70, - 0x44, 0x62, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x62, 0x12, 0x2f, 0x0a, 0x14, 0x70, 0x32, 0x70, 0x5f, - 0x64, 0x62, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x70, 0x32, 0x70, 0x44, 0x62, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x55, 0x0a, 0x0a, 0x44, 0x69, 0x73, - 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x15, 0x0a, 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6d, - 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x4d, 0x62, 0x12, 0x17, - 0x0a, 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x6d, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x06, 0x75, 0x73, 0x65, 0x64, 0x4d, 0x62, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x72, 0x65, 0x65, 0x5f, - 0x6d, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x66, 0x72, 0x65, 0x65, 0x4d, 0x62, - 0x1a, 0x7c, 0x0a, 0x19, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, - 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x49, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, - 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, - 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x32, 0xd7, 0x01, 0x0a, 0x10, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, - 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x69, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x12, 0x1e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x12, 0x10, 0x2f, 0x61, 0x70, 0x69, - 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x36, 0x5a, 0x34, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, - 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, - 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, - 0x6e, 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x53, 0x74, + 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, + 0x12, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x63, + 0x65, 0x6e, 0x74, 0x12, 0x76, 0x0a, 0x15, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x72, 0x65, 0x74, + 0x72, 0x69, 0x65, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, + 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, + 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x13, 0x62, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, + 0x72, 0x69, 0x65, 0x76, 0x65, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x15, 0x68, + 0x6f, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x62, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, + 0x6b, 0x69, 0x70, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x68, 0x6f, 0x74, 0x50, + 0x61, 0x74, 0x68, 0x42, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x6b, 0x69, 0x70, 0x73, 0x12, 0x35, + 0x0a, 0x17, 0x68, 0x6f, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x62, 0x61, 0x6e, 0x5f, 0x69, + 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x14, 0x68, 0x6f, 0x74, 0x50, 0x61, 0x74, 0x68, 0x42, 0x61, 0x6e, 0x49, 0x6e, 0x63, 0x72, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x8f, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, + 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, + 0x75, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x66, 0x75, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, + 0x72, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x73, 0x75, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x1a, 0xc8, 0x01, 0x0a, 0x12, 0x42, 0x61, 0x74, 0x63, + 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x6b, + 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, + 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x66, + 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x0a, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x12, 0x23, 0x0a, 0x0d, + 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0c, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x73, 0x1a, 0x74, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x65, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x75, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x1a, 0x9d, 0x01, 0x0a, 0x08, 0x42, 0x61, 0x6e, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, + 0x26, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x75, 0x6e, + 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x41, 0x74, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x67, 0x65, 0x5f, 0x73, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x61, 0x67, + 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x65, 0x0a, 0x0d, 0x44, 0x61, 0x74, 0x61, + 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0e, 0x70, 0x32, 0x70, + 0x5f, 0x64, 0x62, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x0b, 0x70, 0x32, 0x70, 0x44, 0x62, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x62, 0x12, 0x2f, + 0x0a, 0x14, 0x70, 0x32, 0x70, 0x5f, 0x64, 0x62, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x70, 0x32, + 0x70, 0x44, 0x62, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, + 0x55, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x15, 0x0a, + 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6d, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x61, + 0x6c, 0x6c, 0x4d, 0x62, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x6d, 0x62, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x75, 0x73, 0x65, 0x64, 0x4d, 0x62, 0x12, 0x17, 0x0a, + 0x07, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, + 0x66, 0x72, 0x65, 0x65, 0x4d, 0x62, 0x1a, 0x7c, 0x0a, 0x19, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, + 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xc9, 0x01, 0x0a, 0x15, 0x52, 0x65, 0x63, + 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, + 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x49, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x1f, 0x0a, + 0x0b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x12, 0x0e, + 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x12, 0x14, + 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x1a, 0xdc, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, + 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x1b, + 0x0a, 0x09, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, + 0x65, 0x6e, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x49, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x1f, 0x0a, 0x0b, + 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x12, 0x14, 0x0a, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x1a, 0x6c, 0x0a, 0x14, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, + 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x54, 0x0a, 0x07, 0x65, + 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, + 0x6f, 0x72, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, + 0x73, 0x1a, 0x72, 0x0a, 0x17, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, + 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x57, 0x0a, 0x07, + 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, + 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, + 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, + 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x82, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, + 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x79, 0x49, 0x70, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, + 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x88, 0x01, 0x0a, 0x1c, 0x52, + 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, + 0x76, 0x65, 0x42, 0x79, 0x49, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x52, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, + 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x32, 0xd7, 0x01, 0x0a, 0x10, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, + 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x47, 0x65, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x19, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x69, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x12, 0x10, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, + 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75, + 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70, + 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75, + 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1543,7 +1941,7 @@ func file_supernode_supernode_proto_rawDescGZIP() []byte { return file_supernode_supernode_proto_rawDescData } -var file_supernode_supernode_proto_msgTypes = make([]protoimpl.MessageInfo, 21) +var file_supernode_supernode_proto_msgTypes = make([]protoimpl.MessageInfo, 27) var file_supernode_supernode_proto_goTypes = []any{ (*StatusRequest)(nil), // 0: supernode.StatusRequest (*ListServicesRequest)(nil), // 1: supernode.ListServicesRequest @@ -1564,8 +1962,14 @@ var file_supernode_supernode_proto_goTypes = []any{ (*StatusResponse_P2PMetrics_DiskStatus)(nil), // 16: supernode.StatusResponse.P2PMetrics.DiskStatus nil, // 17: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry nil, // 18: supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry - (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint)(nil), // 19: supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint - (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint)(nil), // 20: supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint + (*StatusResponse_P2PMetrics_RecentBatchStoreEntry)(nil), // 19: supernode.StatusResponse.P2PMetrics.RecentBatchStoreEntry + (*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry)(nil), // 20: supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveEntry + (*StatusResponse_P2PMetrics_RecentBatchStoreList)(nil), // 21: supernode.StatusResponse.P2PMetrics.RecentBatchStoreList + (*StatusResponse_P2PMetrics_RecentBatchRetrieveList)(nil), // 22: supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveList + nil, // 23: supernode.StatusResponse.P2PMetrics.RecentBatchStoreByIpEntry + nil, // 24: supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveByIpEntry + (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint)(nil), // 25: supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint + (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint)(nil), // 26: supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint } var file_supernode_supernode_proto_depIdxs = []int32{ 3, // 0: supernode.ListServicesResponse.services:type_name -> supernode.ServiceInfo @@ -1582,18 +1986,26 @@ var file_supernode_supernode_proto_depIdxs = []int32{ 14, // 11: supernode.StatusResponse.P2PMetrics.ban_list:type_name -> supernode.StatusResponse.P2PMetrics.BanEntry 15, // 12: supernode.StatusResponse.P2PMetrics.database:type_name -> supernode.StatusResponse.P2PMetrics.DatabaseStats 16, // 13: supernode.StatusResponse.P2PMetrics.disk:type_name -> supernode.StatusResponse.P2PMetrics.DiskStatus - 19, // 14: supernode.StatusResponse.P2PMetrics.DhtMetrics.store_success_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint - 20, // 15: supernode.StatusResponse.P2PMetrics.DhtMetrics.batch_retrieve_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint - 13, // 16: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.HandleCounters - 0, // 17: supernode.SupernodeService.GetStatus:input_type -> supernode.StatusRequest - 1, // 18: supernode.SupernodeService.ListServices:input_type -> supernode.ListServicesRequest - 4, // 19: supernode.SupernodeService.GetStatus:output_type -> supernode.StatusResponse - 2, // 20: supernode.SupernodeService.ListServices:output_type -> supernode.ListServicesResponse - 19, // [19:21] is the sub-list for method output_type - 17, // [17:19] is the sub-list for method input_type - 17, // [17:17] is the sub-list for extension type_name - 17, // [17:17] is the sub-list for extension extendee - 0, // [0:17] is the sub-list for field type_name + 19, // 14: supernode.StatusResponse.P2PMetrics.recent_batch_store:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchStoreEntry + 20, // 15: supernode.StatusResponse.P2PMetrics.recent_batch_retrieve:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveEntry + 23, // 16: supernode.StatusResponse.P2PMetrics.recent_batch_store_by_ip:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchStoreByIpEntry + 24, // 17: supernode.StatusResponse.P2PMetrics.recent_batch_retrieve_by_ip:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveByIpEntry + 25, // 18: supernode.StatusResponse.P2PMetrics.DhtMetrics.store_success_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint + 26, // 19: supernode.StatusResponse.P2PMetrics.DhtMetrics.batch_retrieve_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint + 13, // 20: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.HandleCounters + 19, // 21: supernode.StatusResponse.P2PMetrics.RecentBatchStoreList.entries:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchStoreEntry + 20, // 22: supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveList.entries:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveEntry + 21, // 23: supernode.StatusResponse.P2PMetrics.RecentBatchStoreByIpEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchStoreList + 22, // 24: supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveByIpEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveList + 0, // 25: supernode.SupernodeService.GetStatus:input_type -> supernode.StatusRequest + 1, // 26: supernode.SupernodeService.ListServices:input_type -> supernode.ListServicesRequest + 4, // 27: supernode.SupernodeService.GetStatus:output_type -> supernode.StatusResponse + 2, // 28: supernode.SupernodeService.ListServices:output_type -> supernode.ListServicesResponse + 27, // [27:29] is the sub-list for method output_type + 25, // [25:27] is the sub-list for method input_type + 25, // [25:25] is the sub-list for extension type_name + 25, // [25:25] is the sub-list for extension extendee + 0, // [0:25] is the sub-list for field type_name } func init() { file_supernode_supernode_proto_init() } @@ -1607,7 +2019,7 @@ func file_supernode_supernode_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_supernode_supernode_proto_rawDesc, NumEnums: 0, - NumMessages: 21, + NumMessages: 27, NumExtensions: 0, NumServices: 1, }, diff --git a/gen/supernode/supernode.swagger.json b/gen/supernode/supernode.swagger.json index e29dcbae..00a47bb8 100644 --- a/gen/supernode/supernode.swagger.json +++ b/gen/supernode/supernode.swagger.json @@ -249,6 +249,92 @@ }, "title": "Per-handler counters from network layer" }, + "P2PMetricsRecentBatchRetrieveEntry": { + "type": "object", + "properties": { + "timeUnix": { + "type": "string", + "format": "int64" + }, + "senderId": { + "type": "string" + }, + "senderIp": { + "type": "string" + }, + "requested": { + "type": "integer", + "format": "int32" + }, + "found": { + "type": "integer", + "format": "int32" + }, + "durationMs": { + "type": "string", + "format": "int64" + }, + "error": { + "type": "string" + } + }, + "title": "Last handled BatchGetValues requests (most recent first)" + }, + "P2PMetricsRecentBatchRetrieveList": { + "type": "object", + "properties": { + "entries": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/P2PMetricsRecentBatchRetrieveEntry" + } + } + } + }, + "P2PMetricsRecentBatchStoreEntry": { + "type": "object", + "properties": { + "timeUnix": { + "type": "string", + "format": "int64" + }, + "senderId": { + "type": "string" + }, + "senderIp": { + "type": "string" + }, + "keys": { + "type": "integer", + "format": "int32" + }, + "durationMs": { + "type": "string", + "format": "int64" + }, + "ok": { + "type": "boolean" + }, + "error": { + "type": "string" + } + }, + "title": "Last handled BatchStoreData requests (most recent first)" + }, + "P2PMetricsRecentBatchStoreList": { + "type": "object", + "properties": { + "entries": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/P2PMetricsRecentBatchStoreEntry" + } + } + }, + "title": "Per-IP buckets: last 10 per sender IP" + }, "ResourcesCPU": { "type": "object", "properties": { @@ -364,6 +450,32 @@ }, "disk": { "$ref": "#/definitions/P2PMetricsDiskStatus" + }, + "recentBatchStore": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/P2PMetricsRecentBatchStoreEntry" + } + }, + "recentBatchRetrieve": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/P2PMetricsRecentBatchRetrieveEntry" + } + }, + "recentBatchStoreByIp": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/P2PMetricsRecentBatchStoreList" + } + }, + "recentBatchRetrieveByIp": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/P2PMetricsRecentBatchRetrieveList" + } } }, "title": "P2P metrics and diagnostics (additive field)" diff --git a/p2p/kademlia/dht.go b/p2p/kademlia/dht.go index c7c63c18..22e2cef3 100644 --- a/p2p/kademlia/dht.go +++ b/p2p/kademlia/dht.go @@ -464,6 +464,17 @@ func (s *DHT) Stats(ctx context.Context) (map[string]interface{}, error) { dhtStats["peers_count"] = len(s.ht.nodes()) dhtStats["peers"] = s.ht.nodes() dhtStats["network"] = s.network.HandleMetricsSnapshot() + // Include recent request snapshots for observability + if s.network != nil { + if overall, byIP := s.network.RecentBatchStoreSnapshot(); len(overall) > 0 || len(byIP) > 0 { + dhtStats["recent_batch_store_overall"] = overall + dhtStats["recent_batch_store_by_ip"] = byIP + } + if overall, byIP := s.network.RecentBatchRetrieveSnapshot(); len(overall) > 0 || len(byIP) > 0 { + dhtStats["recent_batch_retrieve_overall"] = overall + dhtStats["recent_batch_retrieve_by_ip"] = byIP + } + } dhtStats["database"] = dbStats return dhtStats, nil diff --git a/p2p/kademlia/network.go b/p2p/kademlia/network.go index 56e7ac55..e3059c02 100644 --- a/p2p/kademlia/network.go +++ b/p2p/kademlia/network.go @@ -69,6 +69,13 @@ type Network struct { sem *semaphore.Weighted metrics sync.Map + + // recent request tracking (last 10 entries overall and per IP) + recentMu sync.Mutex + recentStoreOverall []RecentBatchStoreEntry + recentStoreByIP map[string][]RecentBatchStoreEntry + recentRetrieveOverall []RecentBatchRetrieveEntry + recentRetrieveByIP map[string][]RecentBatchRetrieveEntry } // NewNetwork returns a network service @@ -895,15 +902,38 @@ func (s *Network) handleBatchFindValues(ctx context.Context, message *Message, r } func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, reqID string) (res []byte, err error) { + start := time.Now() + appended := false defer func() { if response, err := s.handlePanic(ctx, message.Sender, BatchGetValues); response != nil || err != nil { res = response + if !appended { + s.appendRetrieveEntry(message.Sender.IP, RecentBatchRetrieveEntry{ + TimeUnix: time.Now().UTC().Unix(), + SenderID: string(message.Sender.ID), + SenderIP: message.Sender.IP, + Requested: 0, + Found: 0, + DurationMS: time.Since(start).Milliseconds(), + Error: "panic/recovered", + }) + } } }() request, ok := message.Data.(*BatchGetValuesRequest) if !ok { err := errors.New("invalid BatchGetValuesRequest") + s.appendRetrieveEntry(message.Sender.IP, RecentBatchRetrieveEntry{ + TimeUnix: time.Now().UTC().Unix(), + SenderID: string(message.Sender.ID), + SenderIP: message.Sender.IP, + Requested: 0, + Found: 0, + DurationMS: time.Since(start).Milliseconds(), + Error: err.Error(), + }) + appended = true return s.generateResponseMessage(BatchGetValues, message.Sender, ResultFailed, err.Error()) } @@ -924,6 +954,16 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, values, count, err := s.dht.store.RetrieveBatchValues(ctx, keys, true) if err != nil { err = errors.Errorf("batch find values: %w", err) + s.appendRetrieveEntry(message.Sender.IP, RecentBatchRetrieveEntry{ + TimeUnix: time.Now().UTC().Unix(), + SenderID: string(message.Sender.ID), + SenderIP: message.Sender.IP, + Requested: len(keys), + Found: count, + DurationMS: time.Since(start).Milliseconds(), + Error: err.Error(), + }) + appended = true return s.generateResponseMessage(BatchGetValues, message.Sender, ResultFailed, err.Error()) } @@ -961,6 +1001,16 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, // new a response message resMsg := s.dht.newMessage(BatchGetValues, message.Sender, response) + s.appendRetrieveEntry(message.Sender.IP, RecentBatchRetrieveEntry{ + TimeUnix: time.Now().UTC().Unix(), + SenderID: string(message.Sender.ID), + SenderIP: message.Sender.IP, + Requested: len(keys), + Found: count, + DurationMS: time.Since(start).Milliseconds(), + Error: "", + }) + appended = true return s.encodeMesage(resMsg) } @@ -1132,15 +1182,38 @@ func findTopHeaviestKeys(dataMap map[string][]byte, size int) (int, []string) { } func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (res []byte, err error) { + start := time.Now() + appended := false defer func() { if response, err := s.handlePanic(ctx, message.Sender, BatchStoreData); response != nil || err != nil { res = response + if !appended { + s.appendStoreEntry(message.Sender.IP, RecentBatchStoreEntry{ + TimeUnix: time.Now().UTC().Unix(), + SenderID: string(message.Sender.ID), + SenderIP: message.Sender.IP, + Keys: 0, + DurationMS: time.Since(start).Milliseconds(), + OK: false, + Error: "panic/recovered", + }) + } } }() request, ok := message.Data.(*BatchStoreDataRequest) if !ok { err := errors.New("invalid BatchStoreDataRequest") + s.appendStoreEntry(message.Sender.IP, RecentBatchStoreEntry{ + TimeUnix: time.Now().UTC().Unix(), + SenderID: string(message.Sender.ID), + SenderIP: message.Sender.IP, + Keys: 0, + DurationMS: time.Since(start).Milliseconds(), + OK: false, + Error: err.Error(), + }) + appended = true return s.generateResponseMessage(BatchStoreData, message.Sender, ResultFailed, err.Error()) } @@ -1156,6 +1229,16 @@ func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (r if err := s.dht.store.StoreBatch(ctx, request.Data, 1, false); err != nil { err = errors.Errorf("batch store the data: %w", err) + s.appendStoreEntry(message.Sender.IP, RecentBatchStoreEntry{ + TimeUnix: time.Now().UTC().Unix(), + SenderID: string(message.Sender.ID), + SenderIP: message.Sender.IP, + Keys: len(request.Data), + DurationMS: time.Since(start).Milliseconds(), + OK: false, + Error: err.Error(), + }) + appended = true return s.generateResponseMessage(BatchStoreData, message.Sender, ResultFailed, err.Error()) } @@ -1173,6 +1256,16 @@ func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (r // new a response message resMsg := s.dht.newMessage(BatchStoreData, message.Sender, response) + s.appendStoreEntry(message.Sender.IP, RecentBatchStoreEntry{ + TimeUnix: time.Now().UTC().Unix(), + SenderID: string(message.Sender.ID), + SenderIP: message.Sender.IP, + Keys: len(request.Data), + DurationMS: time.Since(start).Milliseconds(), + OK: true, + Error: "", + }) + appended = true return s.encodeMesage(resMsg) } diff --git a/p2p/kademlia/recent.go b/p2p/kademlia/recent.go new file mode 100644 index 00000000..2467cf02 --- /dev/null +++ b/p2p/kademlia/recent.go @@ -0,0 +1,90 @@ +package kademlia + +import ( + "sync" + "time" +) + +// RecentBatchStoreEntry captures a handled BatchStoreData request outcome +type RecentBatchStoreEntry struct { + TimeUnix int64 `json:"time_unix"` + SenderID string `json:"sender_id"` + SenderIP string `json:"sender_ip"` + Keys int `json:"keys"` + DurationMS int64 `json:"duration_ms"` + OK bool `json:"ok"` + Error string `json:"error,omitempty"` +} + +// RecentBatchRetrieveEntry captures a handled BatchGetValues request outcome +type RecentBatchRetrieveEntry struct { + TimeUnix int64 `json:"time_unix"` + SenderID string `json:"sender_id"` + SenderIP string `json:"sender_ip"` + Requested int `json:"requested"` + Found int `json:"found"` + DurationMS int64 `json:"duration_ms"` + Error string `json:"error,omitempty"` +} + +func (s *Network) appendStoreEntry(ip string, e RecentBatchStoreEntry) { + s.recentMu.Lock() + defer s.recentMu.Unlock() + if s.recentStoreByIP == nil { + s.recentStoreByIP = make(map[string][]RecentBatchStoreEntry) + } + s.recentStoreOverall = append([]RecentBatchStoreEntry{e}, s.recentStoreOverall...) + if len(s.recentStoreOverall) > 10 { + s.recentStoreOverall = s.recentStoreOverall[:10] + } + lst := append([]RecentBatchStoreEntry{e}, s.recentStoreByIP[ip]...) + if len(lst) > 10 { + lst = lst[:10] + } + s.recentStoreByIP[ip] = lst +} + +func (s *Network) appendRetrieveEntry(ip string, e RecentBatchRetrieveEntry) { + s.recentMu.Lock() + defer s.recentMu.Unlock() + if s.recentRetrieveByIP == nil { + s.recentRetrieveByIP = make(map[string][]RecentBatchRetrieveEntry) + } + s.recentRetrieveOverall = append([]RecentBatchRetrieveEntry{e}, s.recentRetrieveOverall...) + if len(s.recentRetrieveOverall) > 10 { + s.recentRetrieveOverall = s.recentRetrieveOverall[:10] + } + lst := append([]RecentBatchRetrieveEntry{e}, s.recentRetrieveByIP[ip]...) + if len(lst) > 10 { + lst = lst[:10] + } + s.recentRetrieveByIP[ip] = lst +} + +// RecentBatchStoreSnapshot returns copies of recent store entries (overall and by IP) +func (s *Network) RecentBatchStoreSnapshot() (overall []RecentBatchStoreEntry, byIP map[string][]RecentBatchStoreEntry) { + s.recentMu.Lock() + defer s.recentMu.Unlock() + overall = append([]RecentBatchStoreEntry(nil), s.recentStoreOverall...) + byIP = make(map[string][]RecentBatchStoreEntry, len(s.recentStoreByIP)) + for k, v := range s.recentStoreByIP { + byIP[k] = append([]RecentBatchStoreEntry(nil), v...) + } + return +} + +// RecentBatchRetrieveSnapshot returns copies of recent retrieve entries (overall and by IP) +func (s *Network) RecentBatchRetrieveSnapshot() (overall []RecentBatchRetrieveEntry, byIP map[string][]RecentBatchRetrieveEntry) { + s.recentMu.Lock() + defer s.recentMu.Unlock() + overall = append([]RecentBatchRetrieveEntry(nil), s.recentRetrieveOverall...) + byIP = make(map[string][]RecentBatchRetrieveEntry, len(s.recentRetrieveByIP)) + for k, v := range s.recentRetrieveByIP { + byIP[k] = append([]RecentBatchRetrieveEntry(nil), v...) + } + return +} + +// helper to avoid unused import warning if needed +var _ = time.Now +var _ = sync.Mutex{} diff --git a/proto/supernode/supernode.proto b/proto/supernode/supernode.proto index edbff3b0..50597e90 100644 --- a/proto/supernode/supernode.proto +++ b/proto/supernode/supernode.proto @@ -154,6 +154,37 @@ message StatusResponse { repeated BanEntry ban_list = 4; DatabaseStats database = 5; DiskStatus disk = 6; + + // Last handled BatchStoreData requests (most recent first) + message RecentBatchStoreEntry { + int64 time_unix = 1; + string sender_id = 2; + string sender_ip = 3; + int32 keys = 4; + int64 duration_ms = 5; + bool ok = 6; + string error = 7; + } + + // Last handled BatchGetValues requests (most recent first) + message RecentBatchRetrieveEntry { + int64 time_unix = 1; + string sender_id = 2; + string sender_ip = 3; + int32 requested = 4; + int32 found = 5; + int64 duration_ms = 6; + string error = 7; + } + + repeated RecentBatchStoreEntry recent_batch_store = 7; + repeated RecentBatchRetrieveEntry recent_batch_retrieve = 8; + + // Per-IP buckets: last 10 per sender IP + message RecentBatchStoreList { repeated RecentBatchStoreEntry entries = 1; } + message RecentBatchRetrieveList { repeated RecentBatchRetrieveEntry entries = 1; } + map recent_batch_store_by_ip = 9; + map recent_batch_retrieve_by_ip = 10; } P2PMetrics p2p_metrics = 9; diff --git a/supernode/node/supernode/server/status_server.go b/supernode/node/supernode/server/status_server.go index 5a8cc156..d90b1e3e 100644 --- a/supernode/node/supernode/server/status_server.go +++ b/supernode/node/supernode/server/status_server.go @@ -174,6 +174,69 @@ func (s *SupernodeServer) GetStatus(ctx context.Context, req *pb.StatusRequest) pbpm.Disk.UsedMb = pm.Disk.UsedMB pbpm.Disk.FreeMb = pm.Disk.FreeMB + // Recent batch store + for _, e := range pm.RecentBatchStore { + pbpm.RecentBatchStore = append(pbpm.RecentBatchStore, &pb.StatusResponse_P2PMetrics_RecentBatchStoreEntry{ + TimeUnix: e.TimeUnix, + SenderId: e.SenderID, + SenderIp: e.SenderIP, + Keys: int32(e.Keys), + DurationMs: e.DurationMS, + Ok: e.OK, + Error: e.Error, + }) + } + // Recent batch retrieve + for _, e := range pm.RecentBatchRetrieve { + pbpm.RecentBatchRetrieve = append(pbpm.RecentBatchRetrieve, &pb.StatusResponse_P2PMetrics_RecentBatchRetrieveEntry{ + TimeUnix: e.TimeUnix, + SenderId: e.SenderID, + SenderIp: e.SenderIP, + Requested: int32(e.Requested), + Found: int32(e.Found), + DurationMs: e.DurationMS, + Error: e.Error, + }) + } + + // Per-IP buckets + if pm.RecentBatchStoreByIP != nil { + pbpm.RecentBatchStoreByIp = map[string]*pb.StatusResponse_P2PMetrics_RecentBatchStoreList{} + for ip, list := range pm.RecentBatchStoreByIP { + pbList := &pb.StatusResponse_P2PMetrics_RecentBatchStoreList{} + for _, e := range list { + pbList.Entries = append(pbList.Entries, &pb.StatusResponse_P2PMetrics_RecentBatchStoreEntry{ + TimeUnix: e.TimeUnix, + SenderId: e.SenderID, + SenderIp: e.SenderIP, + Keys: int32(e.Keys), + DurationMs: e.DurationMS, + Ok: e.OK, + Error: e.Error, + }) + } + pbpm.RecentBatchStoreByIp[ip] = pbList + } + } + if pm.RecentBatchRetrieveByIP != nil { + pbpm.RecentBatchRetrieveByIp = map[string]*pb.StatusResponse_P2PMetrics_RecentBatchRetrieveList{} + for ip, list := range pm.RecentBatchRetrieveByIP { + pbList := &pb.StatusResponse_P2PMetrics_RecentBatchRetrieveList{} + for _, e := range list { + pbList.Entries = append(pbList.Entries, &pb.StatusResponse_P2PMetrics_RecentBatchRetrieveEntry{ + TimeUnix: e.TimeUnix, + SenderId: e.SenderID, + SenderIp: e.SenderIP, + Requested: int32(e.Requested), + Found: int32(e.Found), + DurationMs: e.DurationMS, + Error: e.Error, + }) + } + pbpm.RecentBatchRetrieveByIp[ip] = pbList + } + } + response.P2PMetrics = pbpm } diff --git a/supernode/services/common/supernode/service.go b/supernode/services/common/supernode/service.go index c167eec2..13d5efe4 100644 --- a/supernode/services/common/supernode/service.go +++ b/supernode/services/common/supernode/service.go @@ -1,16 +1,16 @@ package supernode import ( - "context" - "fmt" - "time" - - "github.com/LumeraProtocol/supernode/v2/p2p" - "github.com/LumeraProtocol/supernode/v2/p2p/kademlia" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/LumeraProtocol/supernode/v2/supernode/config" + "context" + "fmt" + "time" + + "github.com/LumeraProtocol/supernode/v2/p2p" + "github.com/LumeraProtocol/supernode/v2/p2p/kademlia" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/LumeraProtocol/supernode/v2/supernode/config" ) // Version is the supernode version, set by the main application @@ -125,130 +125,229 @@ func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetric PeerAddresses: []string{}, } - // Prepare P2P metrics container (always present in response) - metrics := P2PMetrics{ - NetworkHandleMetrics: map[string]HandleCounters{}, - ConnPoolMetrics: map[string]int64{}, - BanList: []BanEntry{}, - } - - // Collect P2P network information and metrics (fill when available and requested) - if includeP2PMetrics && s.p2pService != nil { - p2pStats, err := s.p2pService.Stats(ctx) - if err != nil { - // Log error but continue - non-critical - logtrace.Error(ctx, "failed to get p2p stats", logtrace.Fields{logtrace.FieldError: err.Error()}) - } else { - if dhtStats, ok := p2pStats["dht"].(map[string]interface{}); ok { - if peersCount, ok := dhtStats["peers_count"].(int); ok { - resp.Network.PeersCount = int32(peersCount) - } - - // Extract peer addresses - if peers, ok := dhtStats["peers"].([]*kademlia.Node); ok { - resp.Network.PeerAddresses = make([]string, 0, len(peers)) - for _, peer := range peers { - // Format peer address as "ID@IP:Port" - peerAddr := fmt.Sprintf("%s@%s:%d", string(peer.ID), peer.IP, peer.Port) - resp.Network.PeerAddresses = append(resp.Network.PeerAddresses, peerAddr) - } - } else { - resp.Network.PeerAddresses = []string{} - } - } - - // Disk info - if du, ok := p2pStats["disk-info"].(utils.DiskStatus); ok { - metrics.Disk = DiskStatus{AllMB: du.All, UsedMB: du.Used, FreeMB: du.Free} - } else if duPtr, ok := p2pStats["disk-info"].(*utils.DiskStatus); ok && duPtr != nil { - metrics.Disk = DiskStatus{AllMB: duPtr.All, UsedMB: duPtr.Used, FreeMB: duPtr.Free} - } - - // Ban list - if bans, ok := p2pStats["ban-list"].([]kademlia.BanSnapshot); ok { - for _, b := range bans { - metrics.BanList = append(metrics.BanList, BanEntry{ - ID: b.ID, - IP: b.IP, - Port: uint32(b.Port), - Count: int32(b.Count), - CreatedAtUnix: b.CreatedAt.Unix(), - AgeSeconds: int64(b.Age.Seconds()), - }) - } - } - - // Conn pool metrics - if pool, ok := p2pStats["conn-pool"].(map[string]int64); ok { - for k, v := range pool { - metrics.ConnPoolMetrics[k] = v - } - } - - // DHT metrics and database/network counters live inside dht map - if dhtStats, ok := p2pStats["dht"].(map[string]interface{}); ok { - // Database - if db, ok := dhtStats["database"].(map[string]interface{}); ok { - var sizeMB float64 - if v, ok := db["p2p_db_size"].(float64); ok { - sizeMB = v - } - var recs int64 - switch v := db["p2p_db_records_count"].(type) { - case int: - recs = int64(v) - case int64: - recs = v - case float64: - recs = int64(v) - } - metrics.Database = DatabaseStats{P2PDBSizeMB: sizeMB, P2PDBRecordsCount: recs} - } - - // Network handle metrics - if nhm, ok := dhtStats["network"].(map[string]kademlia.HandleCounters); ok { - for k, c := range nhm { - metrics.NetworkHandleMetrics[k] = HandleCounters{Total: c.Total, Success: c.Success, Failure: c.Failure, Timeout: c.Timeout} - } - } else if nhmI, ok := dhtStats["network"].(map[string]interface{}); ok { - for k, vi := range nhmI { - if c, ok := vi.(kademlia.HandleCounters); ok { - metrics.NetworkHandleMetrics[k] = HandleCounters{Total: c.Total, Success: c.Success, Failure: c.Failure, Timeout: c.Timeout} - } - } - } - } - - // DHT rolling metrics snapshot is attached at top-level under dht_metrics - if snap, ok := p2pStats["dht_metrics"].(kademlia.DHTMetricsSnapshot); ok { - // Store success - for _, p := range snap.StoreSuccessRecent { - metrics.DhtMetrics.StoreSuccessRecent = append(metrics.DhtMetrics.StoreSuccessRecent, StoreSuccessPoint{ - TimeUnix: p.Time.Unix(), - Requests: int32(p.Requests), - Successful: int32(p.Successful), - SuccessRate: p.SuccessRate, - }) - } - // Batch retrieve - for _, p := range snap.BatchRetrieveRecent { - metrics.DhtMetrics.BatchRetrieveRecent = append(metrics.DhtMetrics.BatchRetrieveRecent, BatchRetrievePoint{ - TimeUnix: p.Time.Unix(), - Keys: int32(p.Keys), - Required: int32(p.Required), - FoundLocal: int32(p.FoundLocal), - FoundNetwork: int32(p.FoundNet), - DurationMS: p.Duration.Milliseconds(), - }) - } - metrics.DhtMetrics.HotPathBannedSkips = snap.HotPathBannedSkips - metrics.DhtMetrics.HotPathBanIncrements = snap.HotPathBanIncrements - } - } - } - - // Always include metrics (may be empty if not available) - resp.P2PMetrics = metrics + // Prepare P2P metrics container (always present in response) + metrics := P2PMetrics{ + NetworkHandleMetrics: map[string]HandleCounters{}, + ConnPoolMetrics: map[string]int64{}, + BanList: []BanEntry{}, + } + + // Collect P2P network information and metrics (fill when available and requested) + if includeP2PMetrics && s.p2pService != nil { + p2pStats, err := s.p2pService.Stats(ctx) + if err != nil { + // Log error but continue - non-critical + logtrace.Error(ctx, "failed to get p2p stats", logtrace.Fields{logtrace.FieldError: err.Error()}) + } else { + if dhtStats, ok := p2pStats["dht"].(map[string]interface{}); ok { + if peersCount, ok := dhtStats["peers_count"].(int); ok { + resp.Network.PeersCount = int32(peersCount) + } + + // Extract peer addresses + if peers, ok := dhtStats["peers"].([]*kademlia.Node); ok { + resp.Network.PeerAddresses = make([]string, 0, len(peers)) + for _, peer := range peers { + // Format peer address as "ID@IP:Port" + peerAddr := fmt.Sprintf("%s@%s:%d", string(peer.ID), peer.IP, peer.Port) + resp.Network.PeerAddresses = append(resp.Network.PeerAddresses, peerAddr) + } + } else { + resp.Network.PeerAddresses = []string{} + } + } + + // Disk info + if du, ok := p2pStats["disk-info"].(utils.DiskStatus); ok { + metrics.Disk = DiskStatus{AllMB: du.All, UsedMB: du.Used, FreeMB: du.Free} + } else if duPtr, ok := p2pStats["disk-info"].(*utils.DiskStatus); ok && duPtr != nil { + metrics.Disk = DiskStatus{AllMB: duPtr.All, UsedMB: duPtr.Used, FreeMB: duPtr.Free} + } + + // Ban list + if bans, ok := p2pStats["ban-list"].([]kademlia.BanSnapshot); ok { + for _, b := range bans { + metrics.BanList = append(metrics.BanList, BanEntry{ + ID: b.ID, + IP: b.IP, + Port: uint32(b.Port), + Count: int32(b.Count), + CreatedAtUnix: b.CreatedAt.Unix(), + AgeSeconds: int64(b.Age.Seconds()), + }) + } + } + + // Conn pool metrics + if pool, ok := p2pStats["conn-pool"].(map[string]int64); ok { + for k, v := range pool { + metrics.ConnPoolMetrics[k] = v + } + } + + // DHT metrics and database/network counters live inside dht map + if dhtStats, ok := p2pStats["dht"].(map[string]interface{}); ok { + // Database + if db, ok := dhtStats["database"].(map[string]interface{}); ok { + var sizeMB float64 + if v, ok := db["p2p_db_size"].(float64); ok { + sizeMB = v + } + var recs int64 + switch v := db["p2p_db_records_count"].(type) { + case int: + recs = int64(v) + case int64: + recs = v + case float64: + recs = int64(v) + } + metrics.Database = DatabaseStats{P2PDBSizeMB: sizeMB, P2PDBRecordsCount: recs} + } + + // Network handle metrics + if nhm, ok := dhtStats["network"].(map[string]kademlia.HandleCounters); ok { + for k, c := range nhm { + metrics.NetworkHandleMetrics[k] = HandleCounters{Total: c.Total, Success: c.Success, Failure: c.Failure, Timeout: c.Timeout} + } + } else if nhmI, ok := dhtStats["network"].(map[string]interface{}); ok { + for k, vi := range nhmI { + if c, ok := vi.(kademlia.HandleCounters); ok { + metrics.NetworkHandleMetrics[k] = HandleCounters{Total: c.Total, Success: c.Success, Failure: c.Failure, Timeout: c.Timeout} + } + } + } + + // Recent batch store/retrieve (overall lists) + if rbs, ok := dhtStats["recent_batch_store_overall"].([]kademlia.RecentBatchStoreEntry); ok { + for _, e := range rbs { + metrics.RecentBatchStore = append(metrics.RecentBatchStore, RecentBatchStoreEntry{ + TimeUnix: e.TimeUnix, + SenderID: e.SenderID, + SenderIP: e.SenderIP, + Keys: e.Keys, + DurationMS: e.DurationMS, + OK: e.OK, + Error: e.Error, + }) + } + } else if anyList, ok := dhtStats["recent_batch_store_overall"].([]interface{}); ok { + for _, vi := range anyList { + if e, ok := vi.(kademlia.RecentBatchStoreEntry); ok { + metrics.RecentBatchStore = append(metrics.RecentBatchStore, RecentBatchStoreEntry{ + TimeUnix: e.TimeUnix, + SenderID: e.SenderID, + SenderIP: e.SenderIP, + Keys: e.Keys, + DurationMS: e.DurationMS, + OK: e.OK, + Error: e.Error, + }) + } + } + } + if rbr, ok := dhtStats["recent_batch_retrieve_overall"].([]kademlia.RecentBatchRetrieveEntry); ok { + for _, e := range rbr { + metrics.RecentBatchRetrieve = append(metrics.RecentBatchRetrieve, RecentBatchRetrieveEntry{ + TimeUnix: e.TimeUnix, + SenderID: e.SenderID, + SenderIP: e.SenderIP, + Requested: e.Requested, + Found: e.Found, + DurationMS: e.DurationMS, + Error: e.Error, + }) + } + } else if anyList, ok := dhtStats["recent_batch_retrieve_overall"].([]interface{}); ok { + for _, vi := range anyList { + if e, ok := vi.(kademlia.RecentBatchRetrieveEntry); ok { + metrics.RecentBatchRetrieve = append(metrics.RecentBatchRetrieve, RecentBatchRetrieveEntry{ + TimeUnix: e.TimeUnix, + SenderID: e.SenderID, + SenderIP: e.SenderIP, + Requested: e.Requested, + Found: e.Found, + DurationMS: e.DurationMS, + Error: e.Error, + }) + } + } + } + + // Per-IP buckets + if byip, ok := dhtStats["recent_batch_store_by_ip"].(map[string][]kademlia.RecentBatchStoreEntry); ok { + for ip, list := range byip { + bucket := make([]RecentBatchStoreEntry, 0, len(list)) + for _, e := range list { + bucket = append(bucket, RecentBatchStoreEntry{ + TimeUnix: e.TimeUnix, + SenderID: e.SenderID, + SenderIP: e.SenderIP, + Keys: e.Keys, + DurationMS: e.DurationMS, + OK: e.OK, + Error: e.Error, + }) + } + // initialize map if needed + if metrics.RecentBatchStoreByIP == nil { + metrics.RecentBatchStoreByIP = map[string][]RecentBatchStoreEntry{} + } + metrics.RecentBatchStoreByIP[ip] = bucket + } + } + if byip, ok := dhtStats["recent_batch_retrieve_by_ip"].(map[string][]kademlia.RecentBatchRetrieveEntry); ok { + for ip, list := range byip { + bucket := make([]RecentBatchRetrieveEntry, 0, len(list)) + for _, e := range list { + bucket = append(bucket, RecentBatchRetrieveEntry{ + TimeUnix: e.TimeUnix, + SenderID: e.SenderID, + SenderIP: e.SenderIP, + Requested: e.Requested, + Found: e.Found, + DurationMS: e.DurationMS, + Error: e.Error, + }) + } + if metrics.RecentBatchRetrieveByIP == nil { + metrics.RecentBatchRetrieveByIP = map[string][]RecentBatchRetrieveEntry{} + } + metrics.RecentBatchRetrieveByIP[ip] = bucket + } + } + } + + // DHT rolling metrics snapshot is attached at top-level under dht_metrics + if snap, ok := p2pStats["dht_metrics"].(kademlia.DHTMetricsSnapshot); ok { + // Store success + for _, p := range snap.StoreSuccessRecent { + metrics.DhtMetrics.StoreSuccessRecent = append(metrics.DhtMetrics.StoreSuccessRecent, StoreSuccessPoint{ + TimeUnix: p.Time.Unix(), + Requests: int32(p.Requests), + Successful: int32(p.Successful), + SuccessRate: p.SuccessRate, + }) + } + // Batch retrieve + for _, p := range snap.BatchRetrieveRecent { + metrics.DhtMetrics.BatchRetrieveRecent = append(metrics.DhtMetrics.BatchRetrieveRecent, BatchRetrievePoint{ + TimeUnix: p.Time.Unix(), + Keys: int32(p.Keys), + Required: int32(p.Required), + FoundLocal: int32(p.FoundLocal), + FoundNetwork: int32(p.FoundNet), + DurationMS: p.Duration.Milliseconds(), + }) + } + metrics.DhtMetrics.HotPathBannedSkips = snap.HotPathBannedSkips + metrics.DhtMetrics.HotPathBanIncrements = snap.HotPathBanIncrements + } + } + } + + // Always include metrics (may be empty if not available) + resp.P2PMetrics = metrics // Calculate rank from top supernodes if s.lumeraClient != nil && s.config != nil { diff --git a/supernode/services/common/supernode/types.go b/supernode/services/common/supernode/types.go index 032aa0ee..9a6f0953 100644 --- a/supernode/services/common/supernode/types.go +++ b/supernode/services/common/supernode/types.go @@ -3,23 +3,23 @@ package supernode // StatusResponse represents the complete system status information // with clear organization of resources and services type StatusResponse struct { - Version string // Supernode version - UptimeSeconds uint64 // Uptime in seconds - Resources Resources // System resource information - RunningTasks []ServiceTasks // Services with currently running tasks - RegisteredServices []string // All registered/available services - Network NetworkInfo // P2P network information - Rank int32 // Rank in the top supernodes list (0 if not in top list) - IPAddress string // Supernode IP address with port (e.g., "192.168.1.1:4445") - P2PMetrics P2PMetrics // Detailed P2P metrics snapshot + Version string // Supernode version + UptimeSeconds uint64 // Uptime in seconds + Resources Resources // System resource information + RunningTasks []ServiceTasks // Services with currently running tasks + RegisteredServices []string // All registered/available services + Network NetworkInfo // P2P network information + Rank int32 // Rank in the top supernodes list (0 if not in top list) + IPAddress string // Supernode IP address with port (e.g., "192.168.1.1:4445") + P2PMetrics P2PMetrics // Detailed P2P metrics snapshot } // Resources contains system resource metrics type Resources struct { - CPU CPUInfo // CPU usage information - Memory MemoryInfo // Memory usage information - Storage []StorageInfo // Storage volumes information - HardwareSummary string // Formatted hardware summary (e.g., "8 cores / 32GB RAM") + CPU CPUInfo // CPU usage information + Memory MemoryInfo // Memory usage information + Storage []StorageInfo // Storage volumes information + HardwareSummary string // Formatted hardware summary (e.g., "8 cores / 32GB RAM") } // CPUInfo contains CPU usage metrics @@ -54,68 +54,92 @@ type ServiceTasks struct { // NetworkInfo contains P2P network information type NetworkInfo struct { - PeersCount int32 // Number of connected peers in P2P network - PeerAddresses []string // List of connected peer addresses (optional, may be empty for privacy) + PeersCount int32 // Number of connected peers in P2P network + PeerAddresses []string // List of connected peer addresses (optional, may be empty for privacy) } // P2PMetrics mirrors the proto P2P metrics for status API type P2PMetrics struct { - DhtMetrics DhtMetrics - NetworkHandleMetrics map[string]HandleCounters - ConnPoolMetrics map[string]int64 - BanList []BanEntry - Database DatabaseStats - Disk DiskStatus + DhtMetrics DhtMetrics + NetworkHandleMetrics map[string]HandleCounters + ConnPoolMetrics map[string]int64 + BanList []BanEntry + Database DatabaseStats + Disk DiskStatus + RecentBatchStore []RecentBatchStoreEntry + RecentBatchRetrieve []RecentBatchRetrieveEntry + RecentBatchStoreByIP map[string][]RecentBatchStoreEntry + RecentBatchRetrieveByIP map[string][]RecentBatchRetrieveEntry } type StoreSuccessPoint struct { - TimeUnix int64 - Requests int32 - Successful int32 - SuccessRate float64 + TimeUnix int64 + Requests int32 + Successful int32 + SuccessRate float64 } type BatchRetrievePoint struct { - TimeUnix int64 - Keys int32 - Required int32 - FoundLocal int32 - FoundNetwork int32 - DurationMS int64 + TimeUnix int64 + Keys int32 + Required int32 + FoundLocal int32 + FoundNetwork int32 + DurationMS int64 } type DhtMetrics struct { - StoreSuccessRecent []StoreSuccessPoint - BatchRetrieveRecent []BatchRetrievePoint - HotPathBannedSkips int64 - HotPathBanIncrements int64 + StoreSuccessRecent []StoreSuccessPoint + BatchRetrieveRecent []BatchRetrievePoint + HotPathBannedSkips int64 + HotPathBanIncrements int64 } type HandleCounters struct { - Total int64 - Success int64 - Failure int64 - Timeout int64 + Total int64 + Success int64 + Failure int64 + Timeout int64 } type BanEntry struct { - ID string - IP string - Port uint32 - Count int32 - CreatedAtUnix int64 - AgeSeconds int64 + ID string + IP string + Port uint32 + Count int32 + CreatedAtUnix int64 + AgeSeconds int64 } type DatabaseStats struct { - P2PDBSizeMB float64 - P2PDBRecordsCount int64 + P2PDBSizeMB float64 + P2PDBRecordsCount int64 } type DiskStatus struct { - AllMB float64 - UsedMB float64 - FreeMB float64 + AllMB float64 + UsedMB float64 + FreeMB float64 +} + +type RecentBatchStoreEntry struct { + TimeUnix int64 + SenderID string + SenderIP string + Keys int + DurationMS int64 + OK bool + Error string +} + +type RecentBatchRetrieveEntry struct { + TimeUnix int64 + SenderID string + SenderIP string + Requested int + Found int + DurationMS int64 + Error string } // TaskProvider interface defines the contract for services to provide