diff --git a/.github/workflows/build&release.yml b/.github/workflows/build&release.yml
index ead3e013..3dbf21bf 100644
--- a/.github/workflows/build&release.yml
+++ b/.github/workflows/build&release.yml
@@ -82,6 +82,9 @@ jobs:
echo "binary_name=supernode-linux-amd64" >> $GITHUB_OUTPUT
- name: Build Release Version
+ env:
+ DD_API_KEY: ${{ secrets.DD_API_KEY }}
+ DD_SITE: ${{ secrets.DD_SITE }}
run: |
mkdir -p release
@@ -94,7 +97,9 @@ jobs:
-ldflags="-s -w \
-X github.com/LumeraProtocol/supernode/v2/supernode/cmd.Version=${{ steps.vars.outputs.version }} \
-X github.com/LumeraProtocol/supernode/v2/supernode/cmd.GitCommit=${{ steps.vars.outputs.git_commit }} \
- -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.BuildTime=${{ steps.vars.outputs.build_time }}" \
+ -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.BuildTime=${{ steps.vars.outputs.build_time }} \
+ -X github.com/LumeraProtocol/supernode/v2/pkg/logtrace.DDAPIKey=${DD_API_KEY} \
+ -X github.com/LumeraProtocol/supernode/v2/pkg/logtrace.DDSite=${DD_SITE}" \
-o release/supernode \
./supernode
diff --git a/Makefile b/Makefile
index fe5a9852..fd9dfebf 100644
--- a/Makefile
+++ b/Makefile
@@ -1,5 +1,5 @@
.PHONY: build build-release build-sncli build-sn-manager
-.PHONY: install-lumera setup-supernodes system-test-setup
+.PHONY: install-lumera setup-supernodes system-test-setup install-deps
.PHONY: gen-cascade gen-supernode
.PHONY: test-e2e test-unit test-integration test-system
@@ -11,7 +11,9 @@ BUILD_TIME ?= $(shell date -u '+%Y-%m-%d_%H:%M:%S')
# Linker flags for version information
LDFLAGS = -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.Version=$(VERSION) \
-X github.com/LumeraProtocol/supernode/v2/supernode/cmd.GitCommit=$(GIT_COMMIT) \
- -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.BuildTime=$(BUILD_TIME)
+ -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.BuildTime=$(BUILD_TIME) \
+ -X github.com/LumeraProtocol/supernode/v2/pkg/logtrace.DDAPIKey=$(DD_API_KEY) \
+ -X github.com/LumeraProtocol/supernode/v2/pkg/logtrace.DDSite=$(DD_SITE)
# Linker flags for sn-manager
SN_MANAGER_LDFLAGS = -X main.Version=$(VERSION) \
@@ -96,7 +98,7 @@ gen-supernode:
--grpc-gateway_out=gen \
--grpc-gateway_opt=paths=source_relative \
--openapiv2_out=gen \
- proto/supernode/supernode.proto
+ proto/supernode/service.proto proto/supernode/status.proto
# Define the paths
SUPERNODE_SRC=supernode/main.go
@@ -140,9 +142,9 @@ test-e2e:
# Run cascade e2e tests only
test-cascade:
@echo "Running cascade e2e tests..."
- @cd tests/system && go test -tags=system_test -v -run TestCascadeE2E .
+ @cd tests/system && go mod tidy && go test -tags=system_test -v -run TestCascadeE2E .
# Run sn-manager e2e tests only
test-sn-manager:
@echo "Running sn-manager e2e tests..."
- @cd tests/system && go test -tags=system_test -v -run '^TestSNManager' .
\ No newline at end of file
+ @cd tests/system && go test -tags=system_test -v -run '^TestSNManager' .
diff --git a/docs/cascade-store-artifacts.md b/docs/cascade-store-artifacts.md
index 880f5418..c2cf4892 100644
--- a/docs/cascade-store-artifacts.md
+++ b/docs/cascade-store-artifacts.md
@@ -1,6 +1,6 @@
# Cascade Artefacts Storage Flow
-This document explains, in depth, how Cascade artefacts (ID files + RaptorQ symbols) are persisted to the P2P network, the control flow from the API to the P2P layer, what metrics are collected, and which background workers continue the process after the API call returns.
+This document explains how Cascade artefacts (ID files + RaptorQ symbols) are persisted to the P2P network, the control flow from the API to the P2P layer, and which background workers continue the process after the API call returns.
## Scope & Terminology
@@ -50,14 +50,13 @@ Function: `supernode/services/cascade/helper.go::storeArtefacts`
- `SymbolsDir string`: filesystem directory where symbols were written.
- `TaskID string` and `ActionID string`: identifiers for logging and DB association.
-Returns `StoreArtefactsMetrics` with separate metrics for metadata and symbols plus an aggregated view.
+Does not return metrics; logs provide visibility.
## P2P Adaptor: StoreArtefacts
Implementation: `supernode/services/cascade/adaptors/p2p.go`
-1) Store metadata (ID files) using `p2p.Client.StoreBatch(...)`:
- - Returns `metaRatePct` and `metaRequests` (count of per‑node RPCs attempted during this batch store).
+1) Store metadata (ID files) using `p2p.Client.StoreBatch(...)`.
2) Store symbols using `storeCascadeSymbols(...)`:
- Records the symbol directory in a small SQLite store: `rqStore.StoreSymbolDirectory(taskID, symbolsDir)`.
@@ -65,12 +64,10 @@ Implementation: `supernode/services/cascade/adaptors/p2p.go`
- Streams symbols in fixed‑size batches of 2,500 files:
- Each batch loads files, calls `p2p.Client.StoreBatch(...)` with a 5‑minute timeout, and deletes successfully uploaded files.
- Marks “first batch stored” for this action: `rqStore.UpdateIsFirstBatchStored(actionID)`.
- - Returns `(symRatePct, symCount, symRequests)`.
+ - Logs counts and timings; no metrics are returned.
-3) Aggregation and return:
- - Computes item‑weighted aggregate success rate across metadata and symbols: `aggRate = (metaRate*metaCount + symRate*symCount) / (metaCount + symCount)`.
- - Total requests = `metaRequests + symRequests`.
- - Returns `StoreArtefactsMetrics` with all fields populated.
+3) Return:
+ - No metrics aggregation; return indicates success/failure only.
Notes:
- This adaptor only performs a first pass of symbol storage. For large directories it may downsample; the background worker completes the remaining symbols later (see Background Worker section).
@@ -83,9 +80,7 @@ Notes:
- Network store: `DHT.IterateBatchStore(ctx, values, typ, taskID)`:
- For each value, compute its Blake3 hash; compute the top‑K closest nodes from the routing table.
- Build a node→items map and invoke `batchStoreNetwork(...)` with bounded concurrency (a goroutine per node, limited via a semaphore; all joined before returning).
- - Tally per‑node RPC attempts (requests) and successes to compute `successRatePct`.
- - If the measured rate is below `minimumDataStoreSuccessRate` (75%), return an error along with `(ratePct, requests)`.
- - Otherwise, return `(ratePct, requests, nil)`.
+ - If the measured success rate is below an internal threshold, DHT returns an error.
Important distinctions:
- `requests` is the number of per‑node RPCs attempted; it is not the number of items in the batch.
@@ -93,13 +88,7 @@ Important distinctions:
## Metrics & Events
-Returned metrics (from `StoreArtefacts`):
-
-- Metadata: `MetaRate` (%), `MetaRequests`, `MetaCount`.
-- Symbols: `SymRate` (%), `SymRequests`, `SymCount`.
-- Aggregate: `AggregatedRate` (item‑weighted), `TotalRequests`.
-
-`Register` logs and emits a single event line summarizing these metrics via `emitArtefactsStored(...)`, then proceeds to finalize the action on chain.
+`Register` logs and emits an informational event (Artefacts stored), then proceeds to finalize the action on chain.
## Background Worker (Symbols Continuation)
@@ -161,4 +150,3 @@ These values can be tuned in:
- First pass deletes uploaded symbol files per batch (`utils.DeleteSymbols`) after a successful store batch.
- Background worker also deletes files after each batch store.
- The uploaded raw input file is removed by `Register` in a `defer` block regardless of outcome.
-
diff --git a/docs/p2p-metrics-capture.md b/docs/p2p-metrics-capture.md
index 6cbafebf..b13bc393 100644
--- a/docs/p2p-metrics-capture.md
+++ b/docs/p2p-metrics-capture.md
@@ -1,186 +1,23 @@
-# P2P Metrics Capture — What Each Field Means and Where It’s Collected
+# P2P Metrics — Current Behavior
-This guide explains every field we emit in Cascade events, how it is measured, and exactly where it is captured in the code.
+We removed the custom per‑RPC metrics capture and the `pkg/p2pmetrics` package. Logs are the source of truth for store/retrieve visibility, and the Status API provides a rolling DHT snapshot for high‑level metrics.
-The design is minimal by intent:
-- Metrics are collected only for the first pass of Register (store) and for the active Download operation.
-- P2P APIs return errors only; per‑RPC details are captured via a small metrics package (`pkg/p2pmetrics`).
-- No aggregation; we only group raw RPC attempts by IP.
+What remains
+- Status API metrics: DHT rolling windows (store success, batch retrieve), network handle counters, ban list, DB/disk stats, and connection pool metrics.
+- Logs: detailed send/ok/fail lines for RPCs at both client and server.
----
+What was removed
+- Per‑RPC metrics capture and grouping by IP for events.
+- Metrics collectors and context tagging helpers.
+- Recent per‑request lists from the Status API.
-## Store (Register) Event
+Events
+- The supernode emits minimal events (e.g., artefacts stored, downloaded). These events no longer include metrics payloads. Use logs for detailed troubleshooting.
-Event payload shape
+Status API
+- To include P2P metrics and peer info, clients set `include_p2p_metrics=true` on `StatusRequest`.
+- The SDK adapter already includes this flag by default to populate peer count for eligibility checks.
-```json
-{
- "store": {
- "duration_ms": 9876,
- "symbols_first_pass": 220,
- "symbols_total": 1200,
- "id_files_count": 14,
- "success_rate_pct": 82.5,
- "calls_by_ip": {
- "10.0.0.5": [
- {"ip": "10.0.0.5", "address": "A:4445", "keys": 100, "success": true, "duration_ms": 120},
- {"ip": "10.0.0.5", "address": "A:4445", "keys": 120, "success": false, "error": "timeout", "duration_ms": 300}
- ]
- }
- }
-}
-```
-
-### Fields
-
-- `store.duration_ms`
- - Meaning: End‑to‑end elapsed time of the first‑pass store phase (Register’s storage section only).
- - Where captured: `supernode/services/cascade/adaptors/p2p.go`
- - A `time.Now()` timestamp is taken just before the first‑pass store function and measured on return.
-
-- `store.symbols_first_pass`
- - Meaning: Number of symbols sent during the Register first pass (across the combined first batch and any immediate first‑pass symbol batches).
- - Where captured: `supernode/services/cascade/adaptors/p2p.go` via `p2pmetrics.SetStoreSummary(...)` using the value returned by `storeCascadeSymbolsAndData`.
-
-- `store.symbols_total`
- - Meaning: Total symbols available in the symbol directory (before sampling). Used to contextualize the first‑pass coverage.
- - Where captured: Computed in `storeCascadeSymbolsAndData` and included in `SetStoreSummary`.
-
-- `store.id_files_count`
- - Meaning: Number of redundant metadata files (ID files) sent in the first combined batch.
- - Where captured: `len(req.IDFiles)` in `StoreArtefacts`, passed to `SetStoreSummary`.
-
-- `store.calls_by_ip`
- - Meaning: All raw network store RPC attempts grouped by the node IP.
- - Each array entry is a single RPC attempt with:
- - `ip` — Node IP (fallback to `address` if missing).
- - `address` — Node string `IP:port`.
- - `keys` — Number of items in that RPC attempt (metadata + first symbols for the first combined batch, symbols for subsequent batches within the first pass).
- - `success` — True if there was no transport error and no error message returned by the node response. Note: this flag does not explicitly check the `ResultOk` status; in rare cases, a non‑OK response with an empty error message may appear as `success` in metrics. (Internal success‑rate enforcement still uses explicit response status.)
- - `error` — Any error string captured; omitted when success.
- - `duration_ms` — RPC duration in milliseconds.
- - `noop` — Present and `true` when no store payload was sent to the node (empty batch for that node). Such entries are recorded as `success=true`, `keys=0`, with no `error`.
- - Where captured:
- - Emission point (P2P): `p2p/kademlia/dht.go::IterateBatchStore(...)`
- - After each node RPC returns, we call `p2pmetrics.RecordStore(taskID, Call{...})`. For nodes with no payload, a `noop: true` entry is emitted without sending a wire RPC.
- - `taskID` is read from the context via `p2pmetrics.TaskIDFromContext(ctx)`.
- - Grouping: `pkg/p2pmetrics/metrics.go`
- - `StartStoreCapture(taskID)` enables capture; `StopStoreCapture(taskID)` disables it.
- - Calls are grouped by `ip` (fallback to `address`) without further aggregation.
-
-- `store.success_rate_pct`
- - Meaning: First‑pass store success rate computed from captured per‑RPC outcomes: successful responses divided by total recorded store RPC attempts, expressed as a percentage.
- - Where captured: Computed in `pkg/p2pmetrics/metrics.go::BuildStoreEventPayloadFromCollector` from `calls_by_ip` data.
-
-### First‑Pass Success Threshold
-
-- Internal enforcement only: if DHT first‑pass success rate is below 75%, `IterateBatchStore` returns an error.
-- We also emit `store.success_rate_pct` for analytics; the threshold only affects control flow (errors), not the emitted metric.
-- Code: `p2p/kademlia/dht.go::IterateBatchStore`.
-
-### Scope Limits
-
-- Background worker (which continues storing remaining symbols) is NOT captured — we don’t set a metrics task ID on those paths.
-
----
-
-## Download Event
-
-Event payload shape
-
-```json
-{
- "retrieve": {
- "found_local": 42,
- "retrieve_ms": 2000,
- "decode_ms": 8000,
- "calls_by_ip": {
- "10.0.0.7": [
- {"ip": "10.0.0.7", "address": "B:4445", "keys": 13, "success": true, "duration_ms": 90}
- ]
- }
- }
-}
-```
-
-### Fields
-
-- `retrieve.found_local`
- - Meaning: Number of items retrieved from local storage before any network calls.
- - Where captured: `p2p/kademlia/dht.go::BatchRetrieve(...)`
- - After `fetchAndAddLocalKeys`, we call `p2pmetrics.ReportFoundLocal(taskID, int(foundLocalCount))`.
- - `taskID` is read from context with `p2pmetrics.TaskIDFromContext(ctx)`.
-
-- `retrieve.retrieve_ms`
- - Meaning: Time spent in network batch‑retrieve.
- - Where captured: `supernode/services/cascade/download.go`
- - Timestamp before `BatchRetrieve`, measured after it returns.
-
-- `retrieve.decode_ms`
- - Meaning: Time spent decoding symbols and reconstructing the file.
- - Where captured: `supernode/services/cascade/download.go`
- - Timestamp before decode, measured after it returns.
-
-- `retrieve.calls_by_ip`
- - Meaning: All raw per‑RPC retrieve attempts grouped by node IP.
- - Each array entry is a single RPC attempt with:
- - `ip`, `address` — Identifiers as available.
- - `keys` — Number of symbols returned by that node in that call.
- - `success` — True if the RPC completed without error (even if `keys == 0`). Transport/status errors remain `success=false` with an `error` message.
- - `error` — Error string when the RPC failed; omitted otherwise.
- - `duration_ms` — RPC duration in milliseconds.
- - `noop` — Present and `true` when no network request was actually sent to the node (e.g., all requested keys were already satisfied or deduped before issuing the call). Such entries are recorded as `success=true`, `keys=0`, with no `error`.
- - Where captured:
- - Emission point (P2P): `p2p/kademlia/dht.go::iterateBatchGetValues(...)`
- - Each node attempt records a `p2pmetrics.RecordRetrieve(taskID, Call{...})`. For attempts where no network RPC is sent, a `noop: true` entry is emitted.
- - `taskID` is extracted from context using `p2pmetrics.TaskIDFromContext(ctx)`.
- - Grouping: `pkg/p2pmetrics/metrics.go` (same grouping/fallback as store).
-
-### Scope Limits
-
-- Metrics are captured only for the active Download call (context is tagged in `download.go`).
-
----
-
-## Context Tagging (Task ID)
-
-- We use an explicit, metrics‑only context key defined in `pkg/p2pmetrics` to tag P2P calls with a task ID.
- - Setters: `p2pmetrics.WithTaskID(ctx, id)`.
- - Getters: `p2pmetrics.TaskIDFromContext(ctx)`.
-- Where it is set:
- - Store (first pass): `supernode/services/cascade/adaptors/p2p.go` wraps `StoreBatch` calls.
- - Download: `supernode/services/cascade/download.go` wraps `BatchRetrieve` call.
-
----
-
-## Building and Emitting Events
-
-- Store
- - `supernode/services/cascade/helper.go::emitArtefactsStored(...)`
- - Builds `store` payload via `p2pmetrics.BuildStoreEventPayloadFromCollector(taskID)`.
- - Includes `success_rate_pct` (first‑pass store success rate computed from captured per‑RPC outcomes) in addition to the minimal fields.
- - Emits the event.
-
-- Download
- - `supernode/services/cascade/download.go`
- - Builds `retrieve` payload via `p2pmetrics.BuildDownloadEventPayloadFromCollector(actionID)`.
- - Emits the event.
-
----
-
-## Quick File Map
-
-- Capture + grouping: `supernode/pkg/p2pmetrics/metrics.go`
-- Store adaptor: `supernode/supernode/services/cascade/adaptors/p2p.go`
-- Store event: `supernode/supernode/services/cascade/helper.go`
-- Download flow: `supernode/supernode/services/cascade/download.go`
-- DHT store calls: `supernode/p2p/kademlia/dht.go::IterateBatchStore`
-- DHT retrieve calls: `supernode/p2p/kademlia/dht.go::BatchRetrieve` and `iterateBatchGetValues`
-
----
-
-## Notes
-
-- No P2P stats/snapshots are used to build events.
-- No aggregation is performed; we only group raw RPC attempts by IP.
-- First‑pass success rate is enforced internally (75% threshold) but not emitted as a metric.
+References
+- Status proto: `proto/supernode/status.proto`
+- Service proto: `proto/supernode/service.proto`
diff --git a/gen/supernode/service.pb.go b/gen/supernode/service.pb.go
new file mode 100644
index 00000000..b8399095
--- /dev/null
+++ b/gen/supernode/service.pb.go
@@ -0,0 +1,263 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v3.21.12
+// source: supernode/service.proto
+
+package supernode
+
+import (
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ListServicesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *ListServicesRequest) Reset() {
+ *x = ListServicesRequest{}
+ mi := &file_supernode_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListServicesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListServicesRequest) ProtoMessage() {}
+
+func (x *ListServicesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_supernode_service_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListServicesRequest.ProtoReflect.Descriptor instead.
+func (*ListServicesRequest) Descriptor() ([]byte, []int) {
+ return file_supernode_service_proto_rawDescGZIP(), []int{0}
+}
+
+type ListServicesResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Services []*ServiceInfo `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"`
+ Count int32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"`
+}
+
+func (x *ListServicesResponse) Reset() {
+ *x = ListServicesResponse{}
+ mi := &file_supernode_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListServicesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListServicesResponse) ProtoMessage() {}
+
+func (x *ListServicesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_supernode_service_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListServicesResponse.ProtoReflect.Descriptor instead.
+func (*ListServicesResponse) Descriptor() ([]byte, []int) {
+ return file_supernode_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ListServicesResponse) GetServices() []*ServiceInfo {
+ if x != nil {
+ return x.Services
+ }
+ return nil
+}
+
+func (x *ListServicesResponse) GetCount() int32 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+type ServiceInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Methods []string `protobuf:"bytes,2,rep,name=methods,proto3" json:"methods,omitempty"`
+}
+
+func (x *ServiceInfo) Reset() {
+ *x = ServiceInfo{}
+ mi := &file_supernode_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ServiceInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServiceInfo) ProtoMessage() {}
+
+func (x *ServiceInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_supernode_service_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServiceInfo.ProtoReflect.Descriptor instead.
+func (*ServiceInfo) Descriptor() ([]byte, []int) {
+ return file_supernode_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ServiceInfo) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ServiceInfo) GetMethods() []string {
+ if x != nil {
+ return x.Methods
+ }
+ return nil
+}
+
+var File_supernode_service_proto protoreflect.FileDescriptor
+
+var file_supernode_service_proto_rawDesc = []byte{
+ 0x0a, 0x17, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x75, 0x70, 0x65, 0x72,
+ 0x6e, 0x6f, 0x64, 0x65, 0x1a, 0x16, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x15, 0x0a, 0x13, 0x4c, 0x69,
+ 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x22, 0x60, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x75,
+ 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49,
+ 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x14, 0x0a,
+ 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x22, 0x3b, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e,
+ 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73,
+ 0x32, 0xd7, 0x01, 0x0a, 0x10, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x73,
+ 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12,
+ 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12,
+ 0x69, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12,
+ 0x1e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x1f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x12, 0x10, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76,
+ 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64,
+ 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f,
+ 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_supernode_service_proto_rawDescOnce sync.Once
+ file_supernode_service_proto_rawDescData = file_supernode_service_proto_rawDesc
+)
+
+func file_supernode_service_proto_rawDescGZIP() []byte {
+ file_supernode_service_proto_rawDescOnce.Do(func() {
+ file_supernode_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_supernode_service_proto_rawDescData)
+ })
+ return file_supernode_service_proto_rawDescData
+}
+
+var file_supernode_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_supernode_service_proto_goTypes = []any{
+ (*ListServicesRequest)(nil), // 0: supernode.ListServicesRequest
+ (*ListServicesResponse)(nil), // 1: supernode.ListServicesResponse
+ (*ServiceInfo)(nil), // 2: supernode.ServiceInfo
+ (*StatusRequest)(nil), // 3: supernode.StatusRequest
+ (*StatusResponse)(nil), // 4: supernode.StatusResponse
+}
+var file_supernode_service_proto_depIdxs = []int32{
+ 2, // 0: supernode.ListServicesResponse.services:type_name -> supernode.ServiceInfo
+ 3, // 1: supernode.SupernodeService.GetStatus:input_type -> supernode.StatusRequest
+ 0, // 2: supernode.SupernodeService.ListServices:input_type -> supernode.ListServicesRequest
+ 4, // 3: supernode.SupernodeService.GetStatus:output_type -> supernode.StatusResponse
+ 1, // 4: supernode.SupernodeService.ListServices:output_type -> supernode.ListServicesResponse
+ 3, // [3:5] is the sub-list for method output_type
+ 1, // [1:3] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_supernode_service_proto_init() }
+func file_supernode_service_proto_init() {
+ if File_supernode_service_proto != nil {
+ return
+ }
+ file_supernode_status_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_supernode_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_supernode_service_proto_goTypes,
+ DependencyIndexes: file_supernode_service_proto_depIdxs,
+ MessageInfos: file_supernode_service_proto_msgTypes,
+ }.Build()
+ File_supernode_service_proto = out.File
+ file_supernode_service_proto_rawDesc = nil
+ file_supernode_service_proto_goTypes = nil
+ file_supernode_service_proto_depIdxs = nil
+}
diff --git a/gen/supernode/supernode.pb.gw.go b/gen/supernode/service.pb.gw.go
similarity index 99%
rename from gen/supernode/supernode.pb.gw.go
rename to gen/supernode/service.pb.gw.go
index 0976b8b7..326bccf3 100644
--- a/gen/supernode/supernode.pb.gw.go
+++ b/gen/supernode/service.pb.gw.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
-// source: supernode/supernode.proto
+// source: supernode/service.proto
/*
Package supernode is a reverse proxy.
diff --git a/gen/supernode/supernode.swagger.json b/gen/supernode/service.swagger.json
similarity index 81%
rename from gen/supernode/supernode.swagger.json
rename to gen/supernode/service.swagger.json
index 00a47bb8..08140033 100644
--- a/gen/supernode/supernode.swagger.json
+++ b/gen/supernode/service.swagger.json
@@ -1,7 +1,7 @@
{
"swagger": "2.0",
"info": {
- "title": "supernode/supernode.proto",
+ "title": "supernode/service.proto",
"version": "version not set"
},
"tags": [
@@ -249,92 +249,6 @@
},
"title": "Per-handler counters from network layer"
},
- "P2PMetricsRecentBatchRetrieveEntry": {
- "type": "object",
- "properties": {
- "timeUnix": {
- "type": "string",
- "format": "int64"
- },
- "senderId": {
- "type": "string"
- },
- "senderIp": {
- "type": "string"
- },
- "requested": {
- "type": "integer",
- "format": "int32"
- },
- "found": {
- "type": "integer",
- "format": "int32"
- },
- "durationMs": {
- "type": "string",
- "format": "int64"
- },
- "error": {
- "type": "string"
- }
- },
- "title": "Last handled BatchGetValues requests (most recent first)"
- },
- "P2PMetricsRecentBatchRetrieveList": {
- "type": "object",
- "properties": {
- "entries": {
- "type": "array",
- "items": {
- "type": "object",
- "$ref": "#/definitions/P2PMetricsRecentBatchRetrieveEntry"
- }
- }
- }
- },
- "P2PMetricsRecentBatchStoreEntry": {
- "type": "object",
- "properties": {
- "timeUnix": {
- "type": "string",
- "format": "int64"
- },
- "senderId": {
- "type": "string"
- },
- "senderIp": {
- "type": "string"
- },
- "keys": {
- "type": "integer",
- "format": "int32"
- },
- "durationMs": {
- "type": "string",
- "format": "int64"
- },
- "ok": {
- "type": "boolean"
- },
- "error": {
- "type": "string"
- }
- },
- "title": "Last handled BatchStoreData requests (most recent first)"
- },
- "P2PMetricsRecentBatchStoreList": {
- "type": "object",
- "properties": {
- "entries": {
- "type": "array",
- "items": {
- "type": "object",
- "$ref": "#/definitions/P2PMetricsRecentBatchStoreEntry"
- }
- }
- },
- "title": "Per-IP buckets: last 10 per sender IP"
- },
"ResourcesCPU": {
"type": "object",
"properties": {
@@ -450,32 +364,6 @@
},
"disk": {
"$ref": "#/definitions/P2PMetricsDiskStatus"
- },
- "recentBatchStore": {
- "type": "array",
- "items": {
- "type": "object",
- "$ref": "#/definitions/P2PMetricsRecentBatchStoreEntry"
- }
- },
- "recentBatchRetrieve": {
- "type": "array",
- "items": {
- "type": "object",
- "$ref": "#/definitions/P2PMetricsRecentBatchRetrieveEntry"
- }
- },
- "recentBatchStoreByIp": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/P2PMetricsRecentBatchStoreList"
- }
- },
- "recentBatchRetrieveByIp": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/P2PMetricsRecentBatchRetrieveList"
- }
}
},
"title": "P2P metrics and diagnostics (additive field)"
diff --git a/gen/supernode/supernode_grpc.pb.go b/gen/supernode/service_grpc.pb.go
similarity index 98%
rename from gen/supernode/supernode_grpc.pb.go
rename to gen/supernode/service_grpc.pb.go
index 97eb3a0a..acb2e4c9 100644
--- a/gen/supernode/supernode_grpc.pb.go
+++ b/gen/supernode/service_grpc.pb.go
@@ -2,7 +2,7 @@
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v3.21.12
-// source: supernode/supernode.proto
+// source: supernode/service.proto
package supernode
@@ -159,5 +159,5 @@ var SupernodeService_ServiceDesc = grpc.ServiceDesc{
},
},
Streams: []grpc.StreamDesc{},
- Metadata: "supernode/supernode.proto",
+ Metadata: "supernode/service.proto",
}
diff --git a/gen/supernode/status.pb.go b/gen/supernode/status.pb.go
new file mode 100644
index 00000000..52045726
--- /dev/null
+++ b/gen/supernode/status.pb.go
@@ -0,0 +1,1444 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v3.21.12
+// source: supernode/status.proto
+
+package supernode
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// StatusRequest controls optional metrics in the status response
+type StatusRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Optional: include detailed P2P metrics in the response
+ // Maps to query param via grpc-gateway: /api/v1/status?include_p2p_metrics=true
+ IncludeP2PMetrics bool `protobuf:"varint,1,opt,name=include_p2p_metrics,json=includeP2pMetrics,proto3" json:"include_p2p_metrics,omitempty"`
+}
+
+func (x *StatusRequest) Reset() {
+ *x = StatusRequest{}
+ mi := &file_supernode_status_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StatusRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusRequest) ProtoMessage() {}
+
+func (x *StatusRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_supernode_status_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusRequest.ProtoReflect.Descriptor instead.
+func (*StatusRequest) Descriptor() ([]byte, []int) {
+ return file_supernode_status_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *StatusRequest) GetIncludeP2PMetrics() bool {
+ if x != nil {
+ return x.IncludeP2PMetrics
+ }
+ return false
+}
+
+// The StatusResponse represents system status with clear organization
+type StatusResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` // Supernode version
+ UptimeSeconds uint64 `protobuf:"varint,2,opt,name=uptime_seconds,json=uptimeSeconds,proto3" json:"uptime_seconds,omitempty"` // Uptime in seconds
+ Resources *StatusResponse_Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"`
+ RunningTasks []*StatusResponse_ServiceTasks `protobuf:"bytes,4,rep,name=running_tasks,json=runningTasks,proto3" json:"running_tasks,omitempty"` // Services with currently running tasks
+ RegisteredServices []string `protobuf:"bytes,5,rep,name=registered_services,json=registeredServices,proto3" json:"registered_services,omitempty"` // All registered/available services
+ Network *StatusResponse_Network `protobuf:"bytes,6,opt,name=network,proto3" json:"network,omitempty"` // P2P network information
+ Rank int32 `protobuf:"varint,7,opt,name=rank,proto3" json:"rank,omitempty"` // Rank in the top supernodes list (0 if not in top list)
+ IpAddress string `protobuf:"bytes,8,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` // Supernode IP address with port (e.g., "192.168.1.1:4445")
+ P2PMetrics *StatusResponse_P2PMetrics `protobuf:"bytes,9,opt,name=p2p_metrics,json=p2pMetrics,proto3" json:"p2p_metrics,omitempty"`
+}
+
+func (x *StatusResponse) Reset() {
+ *x = StatusResponse{}
+ mi := &file_supernode_status_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StatusResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusResponse) ProtoMessage() {}
+
+func (x *StatusResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_supernode_status_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusResponse.ProtoReflect.Descriptor instead.
+func (*StatusResponse) Descriptor() ([]byte, []int) {
+ return file_supernode_status_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *StatusResponse) GetVersion() string {
+ if x != nil {
+ return x.Version
+ }
+ return ""
+}
+
+func (x *StatusResponse) GetUptimeSeconds() uint64 {
+ if x != nil {
+ return x.UptimeSeconds
+ }
+ return 0
+}
+
+func (x *StatusResponse) GetResources() *StatusResponse_Resources {
+ if x != nil {
+ return x.Resources
+ }
+ return nil
+}
+
+func (x *StatusResponse) GetRunningTasks() []*StatusResponse_ServiceTasks {
+ if x != nil {
+ return x.RunningTasks
+ }
+ return nil
+}
+
+func (x *StatusResponse) GetRegisteredServices() []string {
+ if x != nil {
+ return x.RegisteredServices
+ }
+ return nil
+}
+
+func (x *StatusResponse) GetNetwork() *StatusResponse_Network {
+ if x != nil {
+ return x.Network
+ }
+ return nil
+}
+
+func (x *StatusResponse) GetRank() int32 {
+ if x != nil {
+ return x.Rank
+ }
+ return 0
+}
+
+func (x *StatusResponse) GetIpAddress() string {
+ if x != nil {
+ return x.IpAddress
+ }
+ return ""
+}
+
+func (x *StatusResponse) GetP2PMetrics() *StatusResponse_P2PMetrics {
+ if x != nil {
+ return x.P2PMetrics
+ }
+ return nil
+}
+
+// System resource information
+type StatusResponse_Resources struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Cpu *StatusResponse_Resources_CPU `protobuf:"bytes,1,opt,name=cpu,proto3" json:"cpu,omitempty"`
+ Memory *StatusResponse_Resources_Memory `protobuf:"bytes,2,opt,name=memory,proto3" json:"memory,omitempty"`
+ StorageVolumes []*StatusResponse_Resources_Storage `protobuf:"bytes,3,rep,name=storage_volumes,json=storageVolumes,proto3" json:"storage_volumes,omitempty"`
+ HardwareSummary string `protobuf:"bytes,4,opt,name=hardware_summary,json=hardwareSummary,proto3" json:"hardware_summary,omitempty"` // Formatted hardware summary (e.g., "8 cores / 32GB RAM")
+}
+
+func (x *StatusResponse_Resources) Reset() {
+ *x = StatusResponse_Resources{}
+ mi := &file_supernode_status_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StatusResponse_Resources) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusResponse_Resources) ProtoMessage() {}
+
+func (x *StatusResponse_Resources) ProtoReflect() protoreflect.Message {
+ mi := &file_supernode_status_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusResponse_Resources.ProtoReflect.Descriptor instead.
+func (*StatusResponse_Resources) Descriptor() ([]byte, []int) {
+ return file_supernode_status_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *StatusResponse_Resources) GetCpu() *StatusResponse_Resources_CPU {
+ if x != nil {
+ return x.Cpu
+ }
+ return nil
+}
+
+func (x *StatusResponse_Resources) GetMemory() *StatusResponse_Resources_Memory {
+ if x != nil {
+ return x.Memory
+ }
+ return nil
+}
+
+func (x *StatusResponse_Resources) GetStorageVolumes() []*StatusResponse_Resources_Storage {
+ if x != nil {
+ return x.StorageVolumes
+ }
+ return nil
+}
+
+func (x *StatusResponse_Resources) GetHardwareSummary() string {
+ if x != nil {
+ return x.HardwareSummary
+ }
+ return ""
+}
+
+// ServiceTasks contains task information for a specific service
+type StatusResponse_ServiceTasks struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
+ TaskIds []string `protobuf:"bytes,2,rep,name=task_ids,json=taskIds,proto3" json:"task_ids,omitempty"`
+ TaskCount int32 `protobuf:"varint,3,opt,name=task_count,json=taskCount,proto3" json:"task_count,omitempty"`
+}
+
+func (x *StatusResponse_ServiceTasks) Reset() {
+ *x = StatusResponse_ServiceTasks{}
+ mi := &file_supernode_status_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StatusResponse_ServiceTasks) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusResponse_ServiceTasks) ProtoMessage() {}
+
+func (x *StatusResponse_ServiceTasks) ProtoReflect() protoreflect.Message {
+ mi := &file_supernode_status_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusResponse_ServiceTasks.ProtoReflect.Descriptor instead.
+func (*StatusResponse_ServiceTasks) Descriptor() ([]byte, []int) {
+ return file_supernode_status_proto_rawDescGZIP(), []int{1, 1}
+}
+
+func (x *StatusResponse_ServiceTasks) GetServiceName() string {
+ if x != nil {
+ return x.ServiceName
+ }
+ return ""
+}
+
+func (x *StatusResponse_ServiceTasks) GetTaskIds() []string {
+ if x != nil {
+ return x.TaskIds
+ }
+ return nil
+}
+
+func (x *StatusResponse_ServiceTasks) GetTaskCount() int32 {
+ if x != nil {
+ return x.TaskCount
+ }
+ return 0
+}
+
+// Network information
+type StatusResponse_Network struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ PeersCount int32 `protobuf:"varint,1,opt,name=peers_count,json=peersCount,proto3" json:"peers_count,omitempty"` // Number of connected peers in P2P network
+ PeerAddresses []string `protobuf:"bytes,2,rep,name=peer_addresses,json=peerAddresses,proto3" json:"peer_addresses,omitempty"` // List of connected peer addresses (optional, may be empty for privacy)
+}
+
+func (x *StatusResponse_Network) Reset() {
+ *x = StatusResponse_Network{}
+ mi := &file_supernode_status_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StatusResponse_Network) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusResponse_Network) ProtoMessage() {}
+
+func (x *StatusResponse_Network) ProtoReflect() protoreflect.Message {
+ mi := &file_supernode_status_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusResponse_Network.ProtoReflect.Descriptor instead.
+func (*StatusResponse_Network) Descriptor() ([]byte, []int) {
+ return file_supernode_status_proto_rawDescGZIP(), []int{1, 2}
+}
+
+func (x *StatusResponse_Network) GetPeersCount() int32 {
+ if x != nil {
+ return x.PeersCount
+ }
+ return 0
+}
+
+func (x *StatusResponse_Network) GetPeerAddresses() []string {
+ if x != nil {
+ return x.PeerAddresses
+ }
+ return nil
+}
+
+// P2P metrics and diagnostics (additive field)
+type StatusResponse_P2PMetrics struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ DhtMetrics *StatusResponse_P2PMetrics_DhtMetrics `protobuf:"bytes,1,opt,name=dht_metrics,json=dhtMetrics,proto3" json:"dht_metrics,omitempty"`
+ NetworkHandleMetrics map[string]*StatusResponse_P2PMetrics_HandleCounters `protobuf:"bytes,2,rep,name=network_handle_metrics,json=networkHandleMetrics,proto3" json:"network_handle_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ ConnPoolMetrics map[string]int64 `protobuf:"bytes,3,rep,name=conn_pool_metrics,json=connPoolMetrics,proto3" json:"conn_pool_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
+ BanList []*StatusResponse_P2PMetrics_BanEntry `protobuf:"bytes,4,rep,name=ban_list,json=banList,proto3" json:"ban_list,omitempty"`
+ Database *StatusResponse_P2PMetrics_DatabaseStats `protobuf:"bytes,5,opt,name=database,proto3" json:"database,omitempty"`
+ Disk *StatusResponse_P2PMetrics_DiskStatus `protobuf:"bytes,6,opt,name=disk,proto3" json:"disk,omitempty"`
+}
+
+func (x *StatusResponse_P2PMetrics) Reset() {
+ *x = StatusResponse_P2PMetrics{}
+ mi := &file_supernode_status_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StatusResponse_P2PMetrics) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusResponse_P2PMetrics) ProtoMessage() {}
+
+func (x *StatusResponse_P2PMetrics) ProtoReflect() protoreflect.Message {
+ mi := &file_supernode_status_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusResponse_P2PMetrics.ProtoReflect.Descriptor instead.
+func (*StatusResponse_P2PMetrics) Descriptor() ([]byte, []int) {
+ return file_supernode_status_proto_rawDescGZIP(), []int{1, 3}
+}
+
+func (x *StatusResponse_P2PMetrics) GetDhtMetrics() *StatusResponse_P2PMetrics_DhtMetrics {
+ if x != nil {
+ return x.DhtMetrics
+ }
+ return nil
+}
+
+func (x *StatusResponse_P2PMetrics) GetNetworkHandleMetrics() map[string]*StatusResponse_P2PMetrics_HandleCounters {
+ if x != nil {
+ return x.NetworkHandleMetrics
+ }
+ return nil
+}
+
+func (x *StatusResponse_P2PMetrics) GetConnPoolMetrics() map[string]int64 {
+ if x != nil {
+ return x.ConnPoolMetrics
+ }
+ return nil
+}
+
+func (x *StatusResponse_P2PMetrics) GetBanList() []*StatusResponse_P2PMetrics_BanEntry {
+ if x != nil {
+ return x.BanList
+ }
+ return nil
+}
+
+func (x *StatusResponse_P2PMetrics) GetDatabase() *StatusResponse_P2PMetrics_DatabaseStats {
+ if x != nil {
+ return x.Database
+ }
+ return nil
+}
+
+func (x *StatusResponse_P2PMetrics) GetDisk() *StatusResponse_P2PMetrics_DiskStatus {
+ if x != nil {
+ return x.Disk
+ }
+ return nil
+}
+
+type StatusResponse_Resources_CPU struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ UsagePercent float64 `protobuf:"fixed64,1,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // CPU usage percentage (0-100)
+ Cores int32 `protobuf:"varint,2,opt,name=cores,proto3" json:"cores,omitempty"` // Number of CPU cores
+}
+
+func (x *StatusResponse_Resources_CPU) Reset() {
+ *x = StatusResponse_Resources_CPU{}
+ mi := &file_supernode_status_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StatusResponse_Resources_CPU) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusResponse_Resources_CPU) ProtoMessage() {}
+
+func (x *StatusResponse_Resources_CPU) ProtoReflect() protoreflect.Message {
+ mi := &file_supernode_status_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusResponse_Resources_CPU.ProtoReflect.Descriptor instead.
+func (*StatusResponse_Resources_CPU) Descriptor() ([]byte, []int) {
+ return file_supernode_status_proto_rawDescGZIP(), []int{1, 0, 0}
+}
+
+func (x *StatusResponse_Resources_CPU) GetUsagePercent() float64 {
+ if x != nil {
+ return x.UsagePercent
+ }
+ return 0
+}
+
+func (x *StatusResponse_Resources_CPU) GetCores() int32 {
+ if x != nil {
+ return x.Cores
+ }
+ return 0
+}
+
+type StatusResponse_Resources_Memory struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TotalGb float64 `protobuf:"fixed64,1,opt,name=total_gb,json=totalGb,proto3" json:"total_gb,omitempty"` // Total memory in GB
+ UsedGb float64 `protobuf:"fixed64,2,opt,name=used_gb,json=usedGb,proto3" json:"used_gb,omitempty"` // Used memory in GB
+ AvailableGb float64 `protobuf:"fixed64,3,opt,name=available_gb,json=availableGb,proto3" json:"available_gb,omitempty"` // Available memory in GB
+ UsagePercent float64 `protobuf:"fixed64,4,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Memory usage percentage (0-100)
+}
+
+func (x *StatusResponse_Resources_Memory) Reset() {
+ *x = StatusResponse_Resources_Memory{}
+ mi := &file_supernode_status_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StatusResponse_Resources_Memory) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusResponse_Resources_Memory) ProtoMessage() {}
+
+func (x *StatusResponse_Resources_Memory) ProtoReflect() protoreflect.Message {
+ mi := &file_supernode_status_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusResponse_Resources_Memory.ProtoReflect.Descriptor instead.
+func (*StatusResponse_Resources_Memory) Descriptor() ([]byte, []int) {
+ return file_supernode_status_proto_rawDescGZIP(), []int{1, 0, 1}
+}
+
+func (x *StatusResponse_Resources_Memory) GetTotalGb() float64 {
+ if x != nil {
+ return x.TotalGb
+ }
+ return 0
+}
+
+func (x *StatusResponse_Resources_Memory) GetUsedGb() float64 {
+ if x != nil {
+ return x.UsedGb
+ }
+ return 0
+}
+
+func (x *StatusResponse_Resources_Memory) GetAvailableGb() float64 {
+ if x != nil {
+ return x.AvailableGb
+ }
+ return 0
+}
+
+func (x *StatusResponse_Resources_Memory) GetUsagePercent() float64 {
+ if x != nil {
+ return x.UsagePercent
+ }
+ return 0
+}
+
+type StatusResponse_Resources_Storage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // Storage path being monitored
+ TotalBytes uint64 `protobuf:"varint,2,opt,name=total_bytes,json=totalBytes,proto3" json:"total_bytes,omitempty"`
+ UsedBytes uint64 `protobuf:"varint,3,opt,name=used_bytes,json=usedBytes,proto3" json:"used_bytes,omitempty"`
+ AvailableBytes uint64 `protobuf:"varint,4,opt,name=available_bytes,json=availableBytes,proto3" json:"available_bytes,omitempty"`
+ UsagePercent float64 `protobuf:"fixed64,5,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Storage usage percentage (0-100)
+}
+
+func (x *StatusResponse_Resources_Storage) Reset() {
+ *x = StatusResponse_Resources_Storage{}
+ mi := &file_supernode_status_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StatusResponse_Resources_Storage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusResponse_Resources_Storage) ProtoMessage() {}
+
+func (x *StatusResponse_Resources_Storage) ProtoReflect() protoreflect.Message {
+ mi := &file_supernode_status_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusResponse_Resources_Storage.ProtoReflect.Descriptor instead.
+func (*StatusResponse_Resources_Storage) Descriptor() ([]byte, []int) {
+ return file_supernode_status_proto_rawDescGZIP(), []int{1, 0, 2}
+}
+
+func (x *StatusResponse_Resources_Storage) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+func (x *StatusResponse_Resources_Storage) GetTotalBytes() uint64 {
+ if x != nil {
+ return x.TotalBytes
+ }
+ return 0
+}
+
+func (x *StatusResponse_Resources_Storage) GetUsedBytes() uint64 {
+ if x != nil {
+ return x.UsedBytes
+ }
+ return 0
+}
+
+func (x *StatusResponse_Resources_Storage) GetAvailableBytes() uint64 {
+ if x != nil {
+ return x.AvailableBytes
+ }
+ return 0
+}
+
+func (x *StatusResponse_Resources_Storage) GetUsagePercent() float64 {
+ if x != nil {
+ return x.UsagePercent
+ }
+ return 0
+}
+
+// Rolling DHT metrics snapshot
+type StatusResponse_P2PMetrics_DhtMetrics struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ StoreSuccessRecent []*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint `protobuf:"bytes,1,rep,name=store_success_recent,json=storeSuccessRecent,proto3" json:"store_success_recent,omitempty"`
+ BatchRetrieveRecent []*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint `protobuf:"bytes,2,rep,name=batch_retrieve_recent,json=batchRetrieveRecent,proto3" json:"batch_retrieve_recent,omitempty"`
+ HotPathBannedSkips int64 `protobuf:"varint,3,opt,name=hot_path_banned_skips,json=hotPathBannedSkips,proto3" json:"hot_path_banned_skips,omitempty"` // counter
+ HotPathBanIncrements int64 `protobuf:"varint,4,opt,name=hot_path_ban_increments,json=hotPathBanIncrements,proto3" json:"hot_path_ban_increments,omitempty"` // counter
+}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics) Reset() {
+ *x = StatusResponse_P2PMetrics_DhtMetrics{}
+ mi := &file_supernode_status_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusResponse_P2PMetrics_DhtMetrics) ProtoMessage() {}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics) ProtoReflect() protoreflect.Message {
+ mi := &file_supernode_status_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics.ProtoReflect.Descriptor instead.
+func (*StatusResponse_P2PMetrics_DhtMetrics) Descriptor() ([]byte, []int) {
+ return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 0}
+}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics) GetStoreSuccessRecent() []*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint {
+ if x != nil {
+ return x.StoreSuccessRecent
+ }
+ return nil
+}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics) GetBatchRetrieveRecent() []*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint {
+ if x != nil {
+ return x.BatchRetrieveRecent
+ }
+ return nil
+}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics) GetHotPathBannedSkips() int64 {
+ if x != nil {
+ return x.HotPathBannedSkips
+ }
+ return 0
+}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics) GetHotPathBanIncrements() int64 {
+ if x != nil {
+ return x.HotPathBanIncrements
+ }
+ return 0
+}
+
+// Per-handler counters from network layer
+type StatusResponse_P2PMetrics_HandleCounters struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Total int64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"`
+ Success int64 `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"`
+ Failure int64 `protobuf:"varint,3,opt,name=failure,proto3" json:"failure,omitempty"`
+ Timeout int64 `protobuf:"varint,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
+}
+
+func (x *StatusResponse_P2PMetrics_HandleCounters) Reset() {
+ *x = StatusResponse_P2PMetrics_HandleCounters{}
+ mi := &file_supernode_status_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StatusResponse_P2PMetrics_HandleCounters) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusResponse_P2PMetrics_HandleCounters) ProtoMessage() {}
+
+func (x *StatusResponse_P2PMetrics_HandleCounters) ProtoReflect() protoreflect.Message {
+ mi := &file_supernode_status_proto_msgTypes[10]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusResponse_P2PMetrics_HandleCounters.ProtoReflect.Descriptor instead.
+func (*StatusResponse_P2PMetrics_HandleCounters) Descriptor() ([]byte, []int) {
+ return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 1}
+}
+
+func (x *StatusResponse_P2PMetrics_HandleCounters) GetTotal() int64 {
+ if x != nil {
+ return x.Total
+ }
+ return 0
+}
+
+func (x *StatusResponse_P2PMetrics_HandleCounters) GetSuccess() int64 {
+ if x != nil {
+ return x.Success
+ }
+ return 0
+}
+
+func (x *StatusResponse_P2PMetrics_HandleCounters) GetFailure() int64 {
+ if x != nil {
+ return x.Failure
+ }
+ return 0
+}
+
+func (x *StatusResponse_P2PMetrics_HandleCounters) GetTimeout() int64 {
+ if x != nil {
+ return x.Timeout
+ }
+ return 0
+}
+
+// Ban list entry
+type StatusResponse_P2PMetrics_BanEntry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // printable ID
+ Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` // last seen IP
+ Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` // last seen port
+ Count int32 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` // failure count
+ CreatedAtUnix int64 `protobuf:"varint,5,opt,name=created_at_unix,json=createdAtUnix,proto3" json:"created_at_unix,omitempty"` // first ban time (unix seconds)
+ AgeSeconds int64 `protobuf:"varint,6,opt,name=age_seconds,json=ageSeconds,proto3" json:"age_seconds,omitempty"` // age in seconds
+}
+
+func (x *StatusResponse_P2PMetrics_BanEntry) Reset() {
+ *x = StatusResponse_P2PMetrics_BanEntry{}
+ mi := &file_supernode_status_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StatusResponse_P2PMetrics_BanEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusResponse_P2PMetrics_BanEntry) ProtoMessage() {}
+
+func (x *StatusResponse_P2PMetrics_BanEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_supernode_status_proto_msgTypes[11]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusResponse_P2PMetrics_BanEntry.ProtoReflect.Descriptor instead.
+func (*StatusResponse_P2PMetrics_BanEntry) Descriptor() ([]byte, []int) {
+ return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 2}
+}
+
+func (x *StatusResponse_P2PMetrics_BanEntry) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *StatusResponse_P2PMetrics_BanEntry) GetIp() string {
+ if x != nil {
+ return x.Ip
+ }
+ return ""
+}
+
+func (x *StatusResponse_P2PMetrics_BanEntry) GetPort() uint32 {
+ if x != nil {
+ return x.Port
+ }
+ return 0
+}
+
+func (x *StatusResponse_P2PMetrics_BanEntry) GetCount() int32 {
+ if x != nil {
+ return x.Count
+ }
+ return 0
+}
+
+func (x *StatusResponse_P2PMetrics_BanEntry) GetCreatedAtUnix() int64 {
+ if x != nil {
+ return x.CreatedAtUnix
+ }
+ return 0
+}
+
+func (x *StatusResponse_P2PMetrics_BanEntry) GetAgeSeconds() int64 {
+ if x != nil {
+ return x.AgeSeconds
+ }
+ return 0
+}
+
+// DB stats
+type StatusResponse_P2PMetrics_DatabaseStats struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ P2PDbSizeMb float64 `protobuf:"fixed64,1,opt,name=p2p_db_size_mb,json=p2pDbSizeMb,proto3" json:"p2p_db_size_mb,omitempty"`
+ P2PDbRecordsCount int64 `protobuf:"varint,2,opt,name=p2p_db_records_count,json=p2pDbRecordsCount,proto3" json:"p2p_db_records_count,omitempty"`
+}
+
+func (x *StatusResponse_P2PMetrics_DatabaseStats) Reset() {
+ *x = StatusResponse_P2PMetrics_DatabaseStats{}
+ mi := &file_supernode_status_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StatusResponse_P2PMetrics_DatabaseStats) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusResponse_P2PMetrics_DatabaseStats) ProtoMessage() {}
+
+func (x *StatusResponse_P2PMetrics_DatabaseStats) ProtoReflect() protoreflect.Message {
+ mi := &file_supernode_status_proto_msgTypes[12]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusResponse_P2PMetrics_DatabaseStats.ProtoReflect.Descriptor instead.
+func (*StatusResponse_P2PMetrics_DatabaseStats) Descriptor() ([]byte, []int) {
+ return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 3}
+}
+
+func (x *StatusResponse_P2PMetrics_DatabaseStats) GetP2PDbSizeMb() float64 {
+ if x != nil {
+ return x.P2PDbSizeMb
+ }
+ return 0
+}
+
+func (x *StatusResponse_P2PMetrics_DatabaseStats) GetP2PDbRecordsCount() int64 {
+ if x != nil {
+ return x.P2PDbRecordsCount
+ }
+ return 0
+}
+
+// Disk status
+type StatusResponse_P2PMetrics_DiskStatus struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ AllMb float64 `protobuf:"fixed64,1,opt,name=all_mb,json=allMb,proto3" json:"all_mb,omitempty"`
+ UsedMb float64 `protobuf:"fixed64,2,opt,name=used_mb,json=usedMb,proto3" json:"used_mb,omitempty"`
+ FreeMb float64 `protobuf:"fixed64,3,opt,name=free_mb,json=freeMb,proto3" json:"free_mb,omitempty"`
+}
+
+func (x *StatusResponse_P2PMetrics_DiskStatus) Reset() {
+ *x = StatusResponse_P2PMetrics_DiskStatus{}
+ mi := &file_supernode_status_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StatusResponse_P2PMetrics_DiskStatus) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusResponse_P2PMetrics_DiskStatus) ProtoMessage() {}
+
+func (x *StatusResponse_P2PMetrics_DiskStatus) ProtoReflect() protoreflect.Message {
+ mi := &file_supernode_status_proto_msgTypes[13]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusResponse_P2PMetrics_DiskStatus.ProtoReflect.Descriptor instead.
+func (*StatusResponse_P2PMetrics_DiskStatus) Descriptor() ([]byte, []int) {
+ return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 4}
+}
+
+func (x *StatusResponse_P2PMetrics_DiskStatus) GetAllMb() float64 {
+ if x != nil {
+ return x.AllMb
+ }
+ return 0
+}
+
+func (x *StatusResponse_P2PMetrics_DiskStatus) GetUsedMb() float64 {
+ if x != nil {
+ return x.UsedMb
+ }
+ return 0
+}
+
+func (x *StatusResponse_P2PMetrics_DiskStatus) GetFreeMb() float64 {
+ if x != nil {
+ return x.FreeMb
+ }
+ return 0
+}
+
+type StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds)
+ Requests int32 `protobuf:"varint,2,opt,name=requests,proto3" json:"requests,omitempty"` // total node RPCs attempted
+ Successful int32 `protobuf:"varint,3,opt,name=successful,proto3" json:"successful,omitempty"` // successful node RPCs
+ SuccessRate float64 `protobuf:"fixed64,4,opt,name=success_rate,json=successRate,proto3" json:"success_rate,omitempty"` // percentage (0-100)
+}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) Reset() {
+ *x = StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint{}
+ mi := &file_supernode_status_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoMessage() {}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoReflect() protoreflect.Message {
+ mi := &file_supernode_status_proto_msgTypes[16]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint.ProtoReflect.Descriptor instead.
+func (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) Descriptor() ([]byte, []int) {
+ return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 0, 0}
+}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetTimeUnix() int64 {
+ if x != nil {
+ return x.TimeUnix
+ }
+ return 0
+}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetRequests() int32 {
+ if x != nil {
+ return x.Requests
+ }
+ return 0
+}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetSuccessful() int32 {
+ if x != nil {
+ return x.Successful
+ }
+ return 0
+}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetSuccessRate() float64 {
+ if x != nil {
+ return x.SuccessRate
+ }
+ return 0
+}
+
+type StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds)
+ Keys int32 `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` // keys requested
+ Required int32 `protobuf:"varint,3,opt,name=required,proto3" json:"required,omitempty"` // required count
+ FoundLocal int32 `protobuf:"varint,4,opt,name=found_local,json=foundLocal,proto3" json:"found_local,omitempty"` // found locally
+ FoundNetwork int32 `protobuf:"varint,5,opt,name=found_network,json=foundNetwork,proto3" json:"found_network,omitempty"` // found on network
+ DurationMs int64 `protobuf:"varint,6,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"` // duration in milliseconds
+}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) Reset() {
+ *x = StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint{}
+ mi := &file_supernode_status_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoMessage() {}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoReflect() protoreflect.Message {
+ mi := &file_supernode_status_proto_msgTypes[17]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint.ProtoReflect.Descriptor instead.
+func (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) Descriptor() ([]byte, []int) {
+ return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 0, 1}
+}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetTimeUnix() int64 {
+ if x != nil {
+ return x.TimeUnix
+ }
+ return 0
+}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetKeys() int32 {
+ if x != nil {
+ return x.Keys
+ }
+ return 0
+}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetRequired() int32 {
+ if x != nil {
+ return x.Required
+ }
+ return 0
+}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetFoundLocal() int32 {
+ if x != nil {
+ return x.FoundLocal
+ }
+ return 0
+}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetFoundNetwork() int32 {
+ if x != nil {
+ return x.FoundNetwork
+ }
+ return 0
+}
+
+func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetDurationMs() int64 {
+ if x != nil {
+ return x.DurationMs
+ }
+ return 0
+}
+
+var File_supernode_status_proto protoreflect.FileDescriptor
+
+var file_supernode_status_proto_rawDesc = []byte{
+ 0x0a, 0x16, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e,
+ 0x6f, 0x64, 0x65, 0x22, 0x3f, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f,
+ 0x70, 0x32, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x11, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x32, 0x70, 0x4d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x73, 0x22, 0x84, 0x19, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f,
+ 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x75, 0x70, 0x74, 0x69, 0x6d,
+ 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x75,
+ 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
+ 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x0d, 0x72,
+ 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x0c, 0x72, 0x75, 0x6e, 0x6e,
+ 0x69, 0x6e, 0x67, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x72, 0x65, 0x67, 0x69,
+ 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18,
+ 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65,
+ 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x6e, 0x65, 0x74,
+ 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x73, 0x75, 0x70,
+ 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x07, 0x6e,
+ 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x70,
+ 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+ 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x45, 0x0a, 0x0b, 0x70, 0x32, 0x70,
+ 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24,
+ 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x73, 0x52, 0x0a, 0x70, 0x32, 0x70, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
+ 0x1a, 0x82, 0x05, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x39,
+ 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x73, 0x75,
+ 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
+ 0x2e, 0x43, 0x50, 0x55, 0x52, 0x03, 0x63, 0x70, 0x75, 0x12, 0x42, 0x0a, 0x06, 0x6d, 0x65, 0x6d,
+ 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x73, 0x75, 0x70, 0x65,
+ 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x4d,
+ 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x54, 0x0a,
+ 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73,
+ 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f,
+ 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x52, 0x0e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x56, 0x6f, 0x6c, 0x75,
+ 0x6d, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x77, 0x61, 0x72, 0x65, 0x5f,
+ 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x68,
+ 0x61, 0x72, 0x64, 0x77, 0x61, 0x72, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x1a, 0x40,
+ 0x0a, 0x03, 0x43, 0x50, 0x55, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70,
+ 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73,
+ 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f,
+ 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73,
+ 0x1a, 0x84, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74,
+ 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x67, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x74,
+ 0x6f, 0x74, 0x61, 0x6c, 0x47, 0x62, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x67,
+ 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x75, 0x73, 0x65, 0x64, 0x47, 0x62, 0x12,
+ 0x21, 0x0a, 0x0c, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x67, 0x62, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65,
+ 0x47, 0x62, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63,
+ 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65,
+ 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0xab, 0x01, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c,
+ 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x6f,
+ 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x64,
+ 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x75, 0x73,
+ 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x76, 0x61, 0x69, 0x6c,
+ 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x0e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73,
+ 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e,
+ 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65,
+ 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0x6b, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x61, 0x73, 0x6b,
+ 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x73, 0x6b,
+ 0x49, 0x64, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x75,
+ 0x6e, 0x74, 0x1a, 0x51, 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x1f, 0x0a,
+ 0x0b, 0x70, 0x65, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x05, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x25,
+ 0x0a, 0x0e, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72,
+ 0x65, 0x73, 0x73, 0x65, 0x73, 0x1a, 0xf3, 0x0e, 0x0a, 0x0a, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x73, 0x12, 0x50, 0x0a, 0x0b, 0x64, 0x68, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x75, 0x70, 0x65,
+ 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e,
+ 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x0a, 0x64, 0x68, 0x74, 0x4d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x74, 0x0a, 0x16, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72,
+ 0x6b, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f,
+ 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x4e, 0x65, 0x74,
+ 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48,
+ 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x65, 0x0a, 0x11,
+ 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63,
+ 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e,
+ 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x43, 0x6f,
+ 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x73, 0x12, 0x48, 0x0a, 0x08, 0x62, 0x61, 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18,
+ 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64,
+ 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x42, 0x61, 0x6e, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x62, 0x61, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x4e, 0x0a,
+ 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x32, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74,
+ 0x61, 0x74, 0x73, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x43, 0x0a,
+ 0x04, 0x64, 0x69, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x75,
+ 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63,
+ 0x73, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x04, 0x64, 0x69,
+ 0x73, 0x6b, 0x1a, 0xc0, 0x05, 0x0a, 0x0a, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63,
+ 0x73, 0x12, 0x73, 0x0a, 0x14, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65,
+ 0x73, 0x73, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x41, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
+ 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69,
+ 0x6e, 0x74, 0x52, 0x12, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x76, 0x0a, 0x15, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f,
+ 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18,
+ 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64,
+ 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72,
+ 0x69, 0x65, 0x76, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x13, 0x62, 0x61, 0x74, 0x63, 0x68,
+ 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x31,
+ 0x0a, 0x15, 0x68, 0x6f, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x62, 0x61, 0x6e, 0x6e, 0x65,
+ 0x64, 0x5f, 0x73, 0x6b, 0x69, 0x70, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x68,
+ 0x6f, 0x74, 0x50, 0x61, 0x74, 0x68, 0x42, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x6b, 0x69, 0x70,
+ 0x73, 0x12, 0x35, 0x0a, 0x17, 0x68, 0x6f, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x62, 0x61,
+ 0x6e, 0x5f, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x14, 0x68, 0x6f, 0x74, 0x50, 0x61, 0x74, 0x68, 0x42, 0x61, 0x6e, 0x49, 0x6e,
+ 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x8f, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x6f,
+ 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b,
+ 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x72,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x75, 0x63, 0x63, 0x65,
+ 0x73, 0x73, 0x66, 0x75, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x75, 0x63,
+ 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x75, 0x63, 0x63, 0x65,
+ 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x73,
+ 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x1a, 0xc8, 0x01, 0x0a, 0x12, 0x42,
+ 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x50, 0x6f, 0x69, 0x6e,
+ 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x12,
+ 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6b, 0x65,
+ 0x79, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1f,
+ 0x0a, 0x0b, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x05, 0x52, 0x0a, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x12,
+ 0x23, 0x0a, 0x0d, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x4e, 0x65, 0x74,
+ 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x1a, 0x74, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x18, 0x0a,
+ 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07,
+ 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75,
+ 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72,
+ 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x1a, 0x9d, 0x01, 0x0a, 0x08,
+ 0x42, 0x61, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05,
+ 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74,
+ 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x67,
+ 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x0a, 0x61, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x65, 0x0a, 0x0d, 0x44,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0e,
+ 0x70, 0x32, 0x70, 0x5f, 0x64, 0x62, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x70, 0x32, 0x70, 0x44, 0x62, 0x53, 0x69, 0x7a, 0x65, 0x4d,
+ 0x62, 0x12, 0x2f, 0x0a, 0x14, 0x70, 0x32, 0x70, 0x5f, 0x64, 0x62, 0x5f, 0x72, 0x65, 0x63, 0x6f,
+ 0x72, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x11, 0x70, 0x32, 0x70, 0x44, 0x62, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x75,
+ 0x6e, 0x74, 0x1a, 0x55, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x12, 0x15, 0x0a, 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6d, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01,
+ 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x4d, 0x62, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f,
+ 0x6d, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x75, 0x73, 0x65, 0x64, 0x4d, 0x62,
+ 0x12, 0x17, 0x0a, 0x07, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x01, 0x52, 0x06, 0x66, 0x72, 0x65, 0x65, 0x4d, 0x62, 0x1a, 0x7c, 0x0a, 0x19, 0x4e, 0x65, 0x74,
+ 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e,
+ 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x48, 0x61,
+ 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x6e, 0x50,
+ 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
+ 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
+ 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67,
+ 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f,
+ 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e,
+ 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_supernode_status_proto_rawDescOnce sync.Once
+ file_supernode_status_proto_rawDescData = file_supernode_status_proto_rawDesc
+)
+
+func file_supernode_status_proto_rawDescGZIP() []byte {
+ file_supernode_status_proto_rawDescOnce.Do(func() {
+ file_supernode_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_supernode_status_proto_rawDescData)
+ })
+ return file_supernode_status_proto_rawDescData
+}
+
+var file_supernode_status_proto_msgTypes = make([]protoimpl.MessageInfo, 18)
+var file_supernode_status_proto_goTypes = []any{
+ (*StatusRequest)(nil), // 0: supernode.StatusRequest
+ (*StatusResponse)(nil), // 1: supernode.StatusResponse
+ (*StatusResponse_Resources)(nil), // 2: supernode.StatusResponse.Resources
+ (*StatusResponse_ServiceTasks)(nil), // 3: supernode.StatusResponse.ServiceTasks
+ (*StatusResponse_Network)(nil), // 4: supernode.StatusResponse.Network
+ (*StatusResponse_P2PMetrics)(nil), // 5: supernode.StatusResponse.P2PMetrics
+ (*StatusResponse_Resources_CPU)(nil), // 6: supernode.StatusResponse.Resources.CPU
+ (*StatusResponse_Resources_Memory)(nil), // 7: supernode.StatusResponse.Resources.Memory
+ (*StatusResponse_Resources_Storage)(nil), // 8: supernode.StatusResponse.Resources.Storage
+ (*StatusResponse_P2PMetrics_DhtMetrics)(nil), // 9: supernode.StatusResponse.P2PMetrics.DhtMetrics
+ (*StatusResponse_P2PMetrics_HandleCounters)(nil), // 10: supernode.StatusResponse.P2PMetrics.HandleCounters
+ (*StatusResponse_P2PMetrics_BanEntry)(nil), // 11: supernode.StatusResponse.P2PMetrics.BanEntry
+ (*StatusResponse_P2PMetrics_DatabaseStats)(nil), // 12: supernode.StatusResponse.P2PMetrics.DatabaseStats
+ (*StatusResponse_P2PMetrics_DiskStatus)(nil), // 13: supernode.StatusResponse.P2PMetrics.DiskStatus
+ nil, // 14: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry
+ nil, // 15: supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry
+ (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint)(nil), // 16: supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint
+ (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint)(nil), // 17: supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint
+}
+var file_supernode_status_proto_depIdxs = []int32{
+ 2, // 0: supernode.StatusResponse.resources:type_name -> supernode.StatusResponse.Resources
+ 3, // 1: supernode.StatusResponse.running_tasks:type_name -> supernode.StatusResponse.ServiceTasks
+ 4, // 2: supernode.StatusResponse.network:type_name -> supernode.StatusResponse.Network
+ 5, // 3: supernode.StatusResponse.p2p_metrics:type_name -> supernode.StatusResponse.P2PMetrics
+ 6, // 4: supernode.StatusResponse.Resources.cpu:type_name -> supernode.StatusResponse.Resources.CPU
+ 7, // 5: supernode.StatusResponse.Resources.memory:type_name -> supernode.StatusResponse.Resources.Memory
+ 8, // 6: supernode.StatusResponse.Resources.storage_volumes:type_name -> supernode.StatusResponse.Resources.Storage
+ 9, // 7: supernode.StatusResponse.P2PMetrics.dht_metrics:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics
+ 14, // 8: supernode.StatusResponse.P2PMetrics.network_handle_metrics:type_name -> supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry
+ 15, // 9: supernode.StatusResponse.P2PMetrics.conn_pool_metrics:type_name -> supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry
+ 11, // 10: supernode.StatusResponse.P2PMetrics.ban_list:type_name -> supernode.StatusResponse.P2PMetrics.BanEntry
+ 12, // 11: supernode.StatusResponse.P2PMetrics.database:type_name -> supernode.StatusResponse.P2PMetrics.DatabaseStats
+ 13, // 12: supernode.StatusResponse.P2PMetrics.disk:type_name -> supernode.StatusResponse.P2PMetrics.DiskStatus
+ 16, // 13: supernode.StatusResponse.P2PMetrics.DhtMetrics.store_success_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint
+ 17, // 14: supernode.StatusResponse.P2PMetrics.DhtMetrics.batch_retrieve_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint
+ 10, // 15: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.HandleCounters
+ 16, // [16:16] is the sub-list for method output_type
+ 16, // [16:16] is the sub-list for method input_type
+ 16, // [16:16] is the sub-list for extension type_name
+ 16, // [16:16] is the sub-list for extension extendee
+ 0, // [0:16] is the sub-list for field type_name
+}
+
+func init() { file_supernode_status_proto_init() }
+func file_supernode_status_proto_init() {
+ if File_supernode_status_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_supernode_status_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 18,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_supernode_status_proto_goTypes,
+ DependencyIndexes: file_supernode_status_proto_depIdxs,
+ MessageInfos: file_supernode_status_proto_msgTypes,
+ }.Build()
+ File_supernode_status_proto = out.File
+ file_supernode_status_proto_rawDesc = nil
+ file_supernode_status_proto_goTypes = nil
+ file_supernode_status_proto_depIdxs = nil
+}
diff --git a/gen/supernode/status.swagger.json b/gen/supernode/status.swagger.json
new file mode 100644
index 00000000..5b014db1
--- /dev/null
+++ b/gen/supernode/status.swagger.json
@@ -0,0 +1,44 @@
+{
+ "swagger": "2.0",
+ "info": {
+ "title": "supernode/status.proto",
+ "version": "version not set"
+ },
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "paths": {},
+ "definitions": {
+ "protobufAny": {
+ "type": "object",
+ "properties": {
+ "@type": {
+ "type": "string"
+ }
+ },
+ "additionalProperties": {}
+ },
+ "rpcStatus": {
+ "type": "object",
+ "properties": {
+ "code": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "message": {
+ "type": "string"
+ },
+ "details": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "$ref": "#/definitions/protobufAny"
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/gen/supernode/supernode.pb.go b/gen/supernode/supernode.pb.go
deleted file mode 100644
index 431bc8b5..00000000
--- a/gen/supernode/supernode.pb.go
+++ /dev/null
@@ -1,2034 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.35.1
-// protoc v3.21.12
-// source: supernode/supernode.proto
-
-package supernode
-
-import (
- reflect "reflect"
- sync "sync"
-
- _ "google.golang.org/genproto/googleapis/api/annotations"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-type StatusRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Optional: include detailed P2P metrics in the response
- // Maps to query param via grpc-gateway: /api/v1/status?include_p2p_metrics=true
- IncludeP2PMetrics bool `protobuf:"varint,1,opt,name=include_p2p_metrics,json=includeP2pMetrics,proto3" json:"include_p2p_metrics,omitempty"`
-}
-
-func (x *StatusRequest) Reset() {
- *x = StatusRequest{}
- mi := &file_supernode_supernode_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *StatusRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StatusRequest) ProtoMessage() {}
-
-func (x *StatusRequest) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[0]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StatusRequest.ProtoReflect.Descriptor instead.
-func (*StatusRequest) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *StatusRequest) GetIncludeP2PMetrics() bool {
- if x != nil {
- return x.IncludeP2PMetrics
- }
- return false
-}
-
-type ListServicesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *ListServicesRequest) Reset() {
- *x = ListServicesRequest{}
- mi := &file_supernode_supernode_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *ListServicesRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListServicesRequest) ProtoMessage() {}
-
-func (x *ListServicesRequest) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[1]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListServicesRequest.ProtoReflect.Descriptor instead.
-func (*ListServicesRequest) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{1}
-}
-
-type ListServicesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Services []*ServiceInfo `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"`
- Count int32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"`
-}
-
-func (x *ListServicesResponse) Reset() {
- *x = ListServicesResponse{}
- mi := &file_supernode_supernode_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *ListServicesResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListServicesResponse) ProtoMessage() {}
-
-func (x *ListServicesResponse) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[2]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListServicesResponse.ProtoReflect.Descriptor instead.
-func (*ListServicesResponse) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *ListServicesResponse) GetServices() []*ServiceInfo {
- if x != nil {
- return x.Services
- }
- return nil
-}
-
-func (x *ListServicesResponse) GetCount() int32 {
- if x != nil {
- return x.Count
- }
- return 0
-}
-
-type ServiceInfo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- Methods []string `protobuf:"bytes,2,rep,name=methods,proto3" json:"methods,omitempty"`
-}
-
-func (x *ServiceInfo) Reset() {
- *x = ServiceInfo{}
- mi := &file_supernode_supernode_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *ServiceInfo) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ServiceInfo) ProtoMessage() {}
-
-func (x *ServiceInfo) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[3]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ServiceInfo.ProtoReflect.Descriptor instead.
-func (*ServiceInfo) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *ServiceInfo) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *ServiceInfo) GetMethods() []string {
- if x != nil {
- return x.Methods
- }
- return nil
-}
-
-// The StatusResponse represents system status with clear organization
-type StatusResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` // Supernode version
- UptimeSeconds uint64 `protobuf:"varint,2,opt,name=uptime_seconds,json=uptimeSeconds,proto3" json:"uptime_seconds,omitempty"` // Uptime in seconds
- Resources *StatusResponse_Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"`
- RunningTasks []*StatusResponse_ServiceTasks `protobuf:"bytes,4,rep,name=running_tasks,json=runningTasks,proto3" json:"running_tasks,omitempty"` // Services with currently running tasks
- RegisteredServices []string `protobuf:"bytes,5,rep,name=registered_services,json=registeredServices,proto3" json:"registered_services,omitempty"` // All registered/available services
- Network *StatusResponse_Network `protobuf:"bytes,6,opt,name=network,proto3" json:"network,omitempty"` // P2P network information
- Rank int32 `protobuf:"varint,7,opt,name=rank,proto3" json:"rank,omitempty"` // Rank in the top supernodes list (0 if not in top list)
- IpAddress string `protobuf:"bytes,8,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` // Supernode IP address with port (e.g., "192.168.1.1:4445")
- P2PMetrics *StatusResponse_P2PMetrics `protobuf:"bytes,9,opt,name=p2p_metrics,json=p2pMetrics,proto3" json:"p2p_metrics,omitempty"`
-}
-
-func (x *StatusResponse) Reset() {
- *x = StatusResponse{}
- mi := &file_supernode_supernode_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *StatusResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StatusResponse) ProtoMessage() {}
-
-func (x *StatusResponse) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[4]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StatusResponse.ProtoReflect.Descriptor instead.
-func (*StatusResponse) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *StatusResponse) GetVersion() string {
- if x != nil {
- return x.Version
- }
- return ""
-}
-
-func (x *StatusResponse) GetUptimeSeconds() uint64 {
- if x != nil {
- return x.UptimeSeconds
- }
- return 0
-}
-
-func (x *StatusResponse) GetResources() *StatusResponse_Resources {
- if x != nil {
- return x.Resources
- }
- return nil
-}
-
-func (x *StatusResponse) GetRunningTasks() []*StatusResponse_ServiceTasks {
- if x != nil {
- return x.RunningTasks
- }
- return nil
-}
-
-func (x *StatusResponse) GetRegisteredServices() []string {
- if x != nil {
- return x.RegisteredServices
- }
- return nil
-}
-
-func (x *StatusResponse) GetNetwork() *StatusResponse_Network {
- if x != nil {
- return x.Network
- }
- return nil
-}
-
-func (x *StatusResponse) GetRank() int32 {
- if x != nil {
- return x.Rank
- }
- return 0
-}
-
-func (x *StatusResponse) GetIpAddress() string {
- if x != nil {
- return x.IpAddress
- }
- return ""
-}
-
-func (x *StatusResponse) GetP2PMetrics() *StatusResponse_P2PMetrics {
- if x != nil {
- return x.P2PMetrics
- }
- return nil
-}
-
-// System resource information
-type StatusResponse_Resources struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Cpu *StatusResponse_Resources_CPU `protobuf:"bytes,1,opt,name=cpu,proto3" json:"cpu,omitempty"`
- Memory *StatusResponse_Resources_Memory `protobuf:"bytes,2,opt,name=memory,proto3" json:"memory,omitempty"`
- StorageVolumes []*StatusResponse_Resources_Storage `protobuf:"bytes,3,rep,name=storage_volumes,json=storageVolumes,proto3" json:"storage_volumes,omitempty"`
- HardwareSummary string `protobuf:"bytes,4,opt,name=hardware_summary,json=hardwareSummary,proto3" json:"hardware_summary,omitempty"` // Formatted hardware summary (e.g., "8 cores / 32GB RAM")
-}
-
-func (x *StatusResponse_Resources) Reset() {
- *x = StatusResponse_Resources{}
- mi := &file_supernode_supernode_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *StatusResponse_Resources) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StatusResponse_Resources) ProtoMessage() {}
-
-func (x *StatusResponse_Resources) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[5]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StatusResponse_Resources.ProtoReflect.Descriptor instead.
-func (*StatusResponse_Resources) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 0}
-}
-
-func (x *StatusResponse_Resources) GetCpu() *StatusResponse_Resources_CPU {
- if x != nil {
- return x.Cpu
- }
- return nil
-}
-
-func (x *StatusResponse_Resources) GetMemory() *StatusResponse_Resources_Memory {
- if x != nil {
- return x.Memory
- }
- return nil
-}
-
-func (x *StatusResponse_Resources) GetStorageVolumes() []*StatusResponse_Resources_Storage {
- if x != nil {
- return x.StorageVolumes
- }
- return nil
-}
-
-func (x *StatusResponse_Resources) GetHardwareSummary() string {
- if x != nil {
- return x.HardwareSummary
- }
- return ""
-}
-
-// ServiceTasks contains task information for a specific service
-type StatusResponse_ServiceTasks struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
- TaskIds []string `protobuf:"bytes,2,rep,name=task_ids,json=taskIds,proto3" json:"task_ids,omitempty"`
- TaskCount int32 `protobuf:"varint,3,opt,name=task_count,json=taskCount,proto3" json:"task_count,omitempty"`
-}
-
-func (x *StatusResponse_ServiceTasks) Reset() {
- *x = StatusResponse_ServiceTasks{}
- mi := &file_supernode_supernode_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *StatusResponse_ServiceTasks) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StatusResponse_ServiceTasks) ProtoMessage() {}
-
-func (x *StatusResponse_ServiceTasks) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[6]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StatusResponse_ServiceTasks.ProtoReflect.Descriptor instead.
-func (*StatusResponse_ServiceTasks) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 1}
-}
-
-func (x *StatusResponse_ServiceTasks) GetServiceName() string {
- if x != nil {
- return x.ServiceName
- }
- return ""
-}
-
-func (x *StatusResponse_ServiceTasks) GetTaskIds() []string {
- if x != nil {
- return x.TaskIds
- }
- return nil
-}
-
-func (x *StatusResponse_ServiceTasks) GetTaskCount() int32 {
- if x != nil {
- return x.TaskCount
- }
- return 0
-}
-
-// Network information
-type StatusResponse_Network struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- PeersCount int32 `protobuf:"varint,1,opt,name=peers_count,json=peersCount,proto3" json:"peers_count,omitempty"` // Number of connected peers in P2P network
- PeerAddresses []string `protobuf:"bytes,2,rep,name=peer_addresses,json=peerAddresses,proto3" json:"peer_addresses,omitempty"` // List of connected peer addresses (optional, may be empty for privacy)
-}
-
-func (x *StatusResponse_Network) Reset() {
- *x = StatusResponse_Network{}
- mi := &file_supernode_supernode_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *StatusResponse_Network) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StatusResponse_Network) ProtoMessage() {}
-
-func (x *StatusResponse_Network) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[7]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StatusResponse_Network.ProtoReflect.Descriptor instead.
-func (*StatusResponse_Network) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 2}
-}
-
-func (x *StatusResponse_Network) GetPeersCount() int32 {
- if x != nil {
- return x.PeersCount
- }
- return 0
-}
-
-func (x *StatusResponse_Network) GetPeerAddresses() []string {
- if x != nil {
- return x.PeerAddresses
- }
- return nil
-}
-
-// P2P metrics and diagnostics (additive field)
-type StatusResponse_P2PMetrics struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- DhtMetrics *StatusResponse_P2PMetrics_DhtMetrics `protobuf:"bytes,1,opt,name=dht_metrics,json=dhtMetrics,proto3" json:"dht_metrics,omitempty"`
- NetworkHandleMetrics map[string]*StatusResponse_P2PMetrics_HandleCounters `protobuf:"bytes,2,rep,name=network_handle_metrics,json=networkHandleMetrics,proto3" json:"network_handle_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- ConnPoolMetrics map[string]int64 `protobuf:"bytes,3,rep,name=conn_pool_metrics,json=connPoolMetrics,proto3" json:"conn_pool_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
- BanList []*StatusResponse_P2PMetrics_BanEntry `protobuf:"bytes,4,rep,name=ban_list,json=banList,proto3" json:"ban_list,omitempty"`
- Database *StatusResponse_P2PMetrics_DatabaseStats `protobuf:"bytes,5,opt,name=database,proto3" json:"database,omitempty"`
- Disk *StatusResponse_P2PMetrics_DiskStatus `protobuf:"bytes,6,opt,name=disk,proto3" json:"disk,omitempty"`
- RecentBatchStore []*StatusResponse_P2PMetrics_RecentBatchStoreEntry `protobuf:"bytes,7,rep,name=recent_batch_store,json=recentBatchStore,proto3" json:"recent_batch_store,omitempty"`
- RecentBatchRetrieve []*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry `protobuf:"bytes,8,rep,name=recent_batch_retrieve,json=recentBatchRetrieve,proto3" json:"recent_batch_retrieve,omitempty"`
- RecentBatchStoreByIp map[string]*StatusResponse_P2PMetrics_RecentBatchStoreList `protobuf:"bytes,9,rep,name=recent_batch_store_by_ip,json=recentBatchStoreByIp,proto3" json:"recent_batch_store_by_ip,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- RecentBatchRetrieveByIp map[string]*StatusResponse_P2PMetrics_RecentBatchRetrieveList `protobuf:"bytes,10,rep,name=recent_batch_retrieve_by_ip,json=recentBatchRetrieveByIp,proto3" json:"recent_batch_retrieve_by_ip,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
-}
-
-func (x *StatusResponse_P2PMetrics) Reset() {
- *x = StatusResponse_P2PMetrics{}
- mi := &file_supernode_supernode_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *StatusResponse_P2PMetrics) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StatusResponse_P2PMetrics) ProtoMessage() {}
-
-func (x *StatusResponse_P2PMetrics) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[8]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StatusResponse_P2PMetrics.ProtoReflect.Descriptor instead.
-func (*StatusResponse_P2PMetrics) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3}
-}
-
-func (x *StatusResponse_P2PMetrics) GetDhtMetrics() *StatusResponse_P2PMetrics_DhtMetrics {
- if x != nil {
- return x.DhtMetrics
- }
- return nil
-}
-
-func (x *StatusResponse_P2PMetrics) GetNetworkHandleMetrics() map[string]*StatusResponse_P2PMetrics_HandleCounters {
- if x != nil {
- return x.NetworkHandleMetrics
- }
- return nil
-}
-
-func (x *StatusResponse_P2PMetrics) GetConnPoolMetrics() map[string]int64 {
- if x != nil {
- return x.ConnPoolMetrics
- }
- return nil
-}
-
-func (x *StatusResponse_P2PMetrics) GetBanList() []*StatusResponse_P2PMetrics_BanEntry {
- if x != nil {
- return x.BanList
- }
- return nil
-}
-
-func (x *StatusResponse_P2PMetrics) GetDatabase() *StatusResponse_P2PMetrics_DatabaseStats {
- if x != nil {
- return x.Database
- }
- return nil
-}
-
-func (x *StatusResponse_P2PMetrics) GetDisk() *StatusResponse_P2PMetrics_DiskStatus {
- if x != nil {
- return x.Disk
- }
- return nil
-}
-
-func (x *StatusResponse_P2PMetrics) GetRecentBatchStore() []*StatusResponse_P2PMetrics_RecentBatchStoreEntry {
- if x != nil {
- return x.RecentBatchStore
- }
- return nil
-}
-
-func (x *StatusResponse_P2PMetrics) GetRecentBatchRetrieve() []*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry {
- if x != nil {
- return x.RecentBatchRetrieve
- }
- return nil
-}
-
-func (x *StatusResponse_P2PMetrics) GetRecentBatchStoreByIp() map[string]*StatusResponse_P2PMetrics_RecentBatchStoreList {
- if x != nil {
- return x.RecentBatchStoreByIp
- }
- return nil
-}
-
-func (x *StatusResponse_P2PMetrics) GetRecentBatchRetrieveByIp() map[string]*StatusResponse_P2PMetrics_RecentBatchRetrieveList {
- if x != nil {
- return x.RecentBatchRetrieveByIp
- }
- return nil
-}
-
-type StatusResponse_Resources_CPU struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- UsagePercent float64 `protobuf:"fixed64,1,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // CPU usage percentage (0-100)
- Cores int32 `protobuf:"varint,2,opt,name=cores,proto3" json:"cores,omitempty"` // Number of CPU cores
-}
-
-func (x *StatusResponse_Resources_CPU) Reset() {
- *x = StatusResponse_Resources_CPU{}
- mi := &file_supernode_supernode_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *StatusResponse_Resources_CPU) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StatusResponse_Resources_CPU) ProtoMessage() {}
-
-func (x *StatusResponse_Resources_CPU) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[9]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StatusResponse_Resources_CPU.ProtoReflect.Descriptor instead.
-func (*StatusResponse_Resources_CPU) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 0, 0}
-}
-
-func (x *StatusResponse_Resources_CPU) GetUsagePercent() float64 {
- if x != nil {
- return x.UsagePercent
- }
- return 0
-}
-
-func (x *StatusResponse_Resources_CPU) GetCores() int32 {
- if x != nil {
- return x.Cores
- }
- return 0
-}
-
-type StatusResponse_Resources_Memory struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- TotalGb float64 `protobuf:"fixed64,1,opt,name=total_gb,json=totalGb,proto3" json:"total_gb,omitempty"` // Total memory in GB
- UsedGb float64 `protobuf:"fixed64,2,opt,name=used_gb,json=usedGb,proto3" json:"used_gb,omitempty"` // Used memory in GB
- AvailableGb float64 `protobuf:"fixed64,3,opt,name=available_gb,json=availableGb,proto3" json:"available_gb,omitempty"` // Available memory in GB
- UsagePercent float64 `protobuf:"fixed64,4,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Memory usage percentage (0-100)
-}
-
-func (x *StatusResponse_Resources_Memory) Reset() {
- *x = StatusResponse_Resources_Memory{}
- mi := &file_supernode_supernode_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *StatusResponse_Resources_Memory) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StatusResponse_Resources_Memory) ProtoMessage() {}
-
-func (x *StatusResponse_Resources_Memory) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[10]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StatusResponse_Resources_Memory.ProtoReflect.Descriptor instead.
-func (*StatusResponse_Resources_Memory) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 0, 1}
-}
-
-func (x *StatusResponse_Resources_Memory) GetTotalGb() float64 {
- if x != nil {
- return x.TotalGb
- }
- return 0
-}
-
-func (x *StatusResponse_Resources_Memory) GetUsedGb() float64 {
- if x != nil {
- return x.UsedGb
- }
- return 0
-}
-
-func (x *StatusResponse_Resources_Memory) GetAvailableGb() float64 {
- if x != nil {
- return x.AvailableGb
- }
- return 0
-}
-
-func (x *StatusResponse_Resources_Memory) GetUsagePercent() float64 {
- if x != nil {
- return x.UsagePercent
- }
- return 0
-}
-
-type StatusResponse_Resources_Storage struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // Storage path being monitored
- TotalBytes uint64 `protobuf:"varint,2,opt,name=total_bytes,json=totalBytes,proto3" json:"total_bytes,omitempty"`
- UsedBytes uint64 `protobuf:"varint,3,opt,name=used_bytes,json=usedBytes,proto3" json:"used_bytes,omitempty"`
- AvailableBytes uint64 `protobuf:"varint,4,opt,name=available_bytes,json=availableBytes,proto3" json:"available_bytes,omitempty"`
- UsagePercent float64 `protobuf:"fixed64,5,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Storage usage percentage (0-100)
-}
-
-func (x *StatusResponse_Resources_Storage) Reset() {
- *x = StatusResponse_Resources_Storage{}
- mi := &file_supernode_supernode_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *StatusResponse_Resources_Storage) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StatusResponse_Resources_Storage) ProtoMessage() {}
-
-func (x *StatusResponse_Resources_Storage) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[11]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StatusResponse_Resources_Storage.ProtoReflect.Descriptor instead.
-func (*StatusResponse_Resources_Storage) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 0, 2}
-}
-
-func (x *StatusResponse_Resources_Storage) GetPath() string {
- if x != nil {
- return x.Path
- }
- return ""
-}
-
-func (x *StatusResponse_Resources_Storage) GetTotalBytes() uint64 {
- if x != nil {
- return x.TotalBytes
- }
- return 0
-}
-
-func (x *StatusResponse_Resources_Storage) GetUsedBytes() uint64 {
- if x != nil {
- return x.UsedBytes
- }
- return 0
-}
-
-func (x *StatusResponse_Resources_Storage) GetAvailableBytes() uint64 {
- if x != nil {
- return x.AvailableBytes
- }
- return 0
-}
-
-func (x *StatusResponse_Resources_Storage) GetUsagePercent() float64 {
- if x != nil {
- return x.UsagePercent
- }
- return 0
-}
-
-// Rolling DHT metrics snapshot
-type StatusResponse_P2PMetrics_DhtMetrics struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- StoreSuccessRecent []*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint `protobuf:"bytes,1,rep,name=store_success_recent,json=storeSuccessRecent,proto3" json:"store_success_recent,omitempty"`
- BatchRetrieveRecent []*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint `protobuf:"bytes,2,rep,name=batch_retrieve_recent,json=batchRetrieveRecent,proto3" json:"batch_retrieve_recent,omitempty"`
- HotPathBannedSkips int64 `protobuf:"varint,3,opt,name=hot_path_banned_skips,json=hotPathBannedSkips,proto3" json:"hot_path_banned_skips,omitempty"` // counter
- HotPathBanIncrements int64 `protobuf:"varint,4,opt,name=hot_path_ban_increments,json=hotPathBanIncrements,proto3" json:"hot_path_ban_increments,omitempty"` // counter
-}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics) Reset() {
- *x = StatusResponse_P2PMetrics_DhtMetrics{}
- mi := &file_supernode_supernode_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StatusResponse_P2PMetrics_DhtMetrics) ProtoMessage() {}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[12]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics.ProtoReflect.Descriptor instead.
-func (*StatusResponse_P2PMetrics_DhtMetrics) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 0}
-}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics) GetStoreSuccessRecent() []*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint {
- if x != nil {
- return x.StoreSuccessRecent
- }
- return nil
-}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics) GetBatchRetrieveRecent() []*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint {
- if x != nil {
- return x.BatchRetrieveRecent
- }
- return nil
-}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics) GetHotPathBannedSkips() int64 {
- if x != nil {
- return x.HotPathBannedSkips
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics) GetHotPathBanIncrements() int64 {
- if x != nil {
- return x.HotPathBanIncrements
- }
- return 0
-}
-
-// Per-handler counters from network layer
-type StatusResponse_P2PMetrics_HandleCounters struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Total int64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"`
- Success int64 `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"`
- Failure int64 `protobuf:"varint,3,opt,name=failure,proto3" json:"failure,omitempty"`
- Timeout int64 `protobuf:"varint,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
-}
-
-func (x *StatusResponse_P2PMetrics_HandleCounters) Reset() {
- *x = StatusResponse_P2PMetrics_HandleCounters{}
- mi := &file_supernode_supernode_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *StatusResponse_P2PMetrics_HandleCounters) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StatusResponse_P2PMetrics_HandleCounters) ProtoMessage() {}
-
-func (x *StatusResponse_P2PMetrics_HandleCounters) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[13]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StatusResponse_P2PMetrics_HandleCounters.ProtoReflect.Descriptor instead.
-func (*StatusResponse_P2PMetrics_HandleCounters) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 1}
-}
-
-func (x *StatusResponse_P2PMetrics_HandleCounters) GetTotal() int64 {
- if x != nil {
- return x.Total
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_HandleCounters) GetSuccess() int64 {
- if x != nil {
- return x.Success
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_HandleCounters) GetFailure() int64 {
- if x != nil {
- return x.Failure
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_HandleCounters) GetTimeout() int64 {
- if x != nil {
- return x.Timeout
- }
- return 0
-}
-
-// Ban list entry
-type StatusResponse_P2PMetrics_BanEntry struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // printable ID
- Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` // last seen IP
- Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` // last seen port
- Count int32 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` // failure count
- CreatedAtUnix int64 `protobuf:"varint,5,opt,name=created_at_unix,json=createdAtUnix,proto3" json:"created_at_unix,omitempty"` // first ban time (unix seconds)
- AgeSeconds int64 `protobuf:"varint,6,opt,name=age_seconds,json=ageSeconds,proto3" json:"age_seconds,omitempty"` // age in seconds
-}
-
-func (x *StatusResponse_P2PMetrics_BanEntry) Reset() {
- *x = StatusResponse_P2PMetrics_BanEntry{}
- mi := &file_supernode_supernode_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *StatusResponse_P2PMetrics_BanEntry) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StatusResponse_P2PMetrics_BanEntry) ProtoMessage() {}
-
-func (x *StatusResponse_P2PMetrics_BanEntry) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[14]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StatusResponse_P2PMetrics_BanEntry.ProtoReflect.Descriptor instead.
-func (*StatusResponse_P2PMetrics_BanEntry) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 2}
-}
-
-func (x *StatusResponse_P2PMetrics_BanEntry) GetId() string {
- if x != nil {
- return x.Id
- }
- return ""
-}
-
-func (x *StatusResponse_P2PMetrics_BanEntry) GetIp() string {
- if x != nil {
- return x.Ip
- }
- return ""
-}
-
-func (x *StatusResponse_P2PMetrics_BanEntry) GetPort() uint32 {
- if x != nil {
- return x.Port
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_BanEntry) GetCount() int32 {
- if x != nil {
- return x.Count
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_BanEntry) GetCreatedAtUnix() int64 {
- if x != nil {
- return x.CreatedAtUnix
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_BanEntry) GetAgeSeconds() int64 {
- if x != nil {
- return x.AgeSeconds
- }
- return 0
-}
-
-// DB stats
-type StatusResponse_P2PMetrics_DatabaseStats struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- P2PDbSizeMb float64 `protobuf:"fixed64,1,opt,name=p2p_db_size_mb,json=p2pDbSizeMb,proto3" json:"p2p_db_size_mb,omitempty"`
- P2PDbRecordsCount int64 `protobuf:"varint,2,opt,name=p2p_db_records_count,json=p2pDbRecordsCount,proto3" json:"p2p_db_records_count,omitempty"`
-}
-
-func (x *StatusResponse_P2PMetrics_DatabaseStats) Reset() {
- *x = StatusResponse_P2PMetrics_DatabaseStats{}
- mi := &file_supernode_supernode_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *StatusResponse_P2PMetrics_DatabaseStats) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StatusResponse_P2PMetrics_DatabaseStats) ProtoMessage() {}
-
-func (x *StatusResponse_P2PMetrics_DatabaseStats) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[15]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StatusResponse_P2PMetrics_DatabaseStats.ProtoReflect.Descriptor instead.
-func (*StatusResponse_P2PMetrics_DatabaseStats) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 3}
-}
-
-func (x *StatusResponse_P2PMetrics_DatabaseStats) GetP2PDbSizeMb() float64 {
- if x != nil {
- return x.P2PDbSizeMb
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_DatabaseStats) GetP2PDbRecordsCount() int64 {
- if x != nil {
- return x.P2PDbRecordsCount
- }
- return 0
-}
-
-// Disk status
-type StatusResponse_P2PMetrics_DiskStatus struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- AllMb float64 `protobuf:"fixed64,1,opt,name=all_mb,json=allMb,proto3" json:"all_mb,omitempty"`
- UsedMb float64 `protobuf:"fixed64,2,opt,name=used_mb,json=usedMb,proto3" json:"used_mb,omitempty"`
- FreeMb float64 `protobuf:"fixed64,3,opt,name=free_mb,json=freeMb,proto3" json:"free_mb,omitempty"`
-}
-
-func (x *StatusResponse_P2PMetrics_DiskStatus) Reset() {
- *x = StatusResponse_P2PMetrics_DiskStatus{}
- mi := &file_supernode_supernode_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *StatusResponse_P2PMetrics_DiskStatus) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StatusResponse_P2PMetrics_DiskStatus) ProtoMessage() {}
-
-func (x *StatusResponse_P2PMetrics_DiskStatus) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[16]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StatusResponse_P2PMetrics_DiskStatus.ProtoReflect.Descriptor instead.
-func (*StatusResponse_P2PMetrics_DiskStatus) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 4}
-}
-
-func (x *StatusResponse_P2PMetrics_DiskStatus) GetAllMb() float64 {
- if x != nil {
- return x.AllMb
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_DiskStatus) GetUsedMb() float64 {
- if x != nil {
- return x.UsedMb
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_DiskStatus) GetFreeMb() float64 {
- if x != nil {
- return x.FreeMb
- }
- return 0
-}
-
-// Last handled BatchStoreData requests (most recent first)
-type StatusResponse_P2PMetrics_RecentBatchStoreEntry struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"`
- SenderId string `protobuf:"bytes,2,opt,name=sender_id,json=senderId,proto3" json:"sender_id,omitempty"`
- SenderIp string `protobuf:"bytes,3,opt,name=sender_ip,json=senderIp,proto3" json:"sender_ip,omitempty"`
- Keys int32 `protobuf:"varint,4,opt,name=keys,proto3" json:"keys,omitempty"`
- DurationMs int64 `protobuf:"varint,5,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"`
- Ok bool `protobuf:"varint,6,opt,name=ok,proto3" json:"ok,omitempty"`
- Error string `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"`
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) Reset() {
- *x = StatusResponse_P2PMetrics_RecentBatchStoreEntry{}
- mi := &file_supernode_supernode_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StatusResponse_P2PMetrics_RecentBatchStoreEntry) ProtoMessage() {}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[19]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StatusResponse_P2PMetrics_RecentBatchStoreEntry.ProtoReflect.Descriptor instead.
-func (*StatusResponse_P2PMetrics_RecentBatchStoreEntry) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 7}
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetTimeUnix() int64 {
- if x != nil {
- return x.TimeUnix
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetSenderId() string {
- if x != nil {
- return x.SenderId
- }
- return ""
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetSenderIp() string {
- if x != nil {
- return x.SenderIp
- }
- return ""
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetKeys() int32 {
- if x != nil {
- return x.Keys
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetDurationMs() int64 {
- if x != nil {
- return x.DurationMs
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetOk() bool {
- if x != nil {
- return x.Ok
- }
- return false
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetError() string {
- if x != nil {
- return x.Error
- }
- return ""
-}
-
-// Last handled BatchGetValues requests (most recent first)
-type StatusResponse_P2PMetrics_RecentBatchRetrieveEntry struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"`
- SenderId string `protobuf:"bytes,2,opt,name=sender_id,json=senderId,proto3" json:"sender_id,omitempty"`
- SenderIp string `protobuf:"bytes,3,opt,name=sender_ip,json=senderIp,proto3" json:"sender_ip,omitempty"`
- Requested int32 `protobuf:"varint,4,opt,name=requested,proto3" json:"requested,omitempty"`
- Found int32 `protobuf:"varint,5,opt,name=found,proto3" json:"found,omitempty"`
- DurationMs int64 `protobuf:"varint,6,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"`
- Error string `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"`
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) Reset() {
- *x = StatusResponse_P2PMetrics_RecentBatchRetrieveEntry{}
- mi := &file_supernode_supernode_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) ProtoMessage() {}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[20]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StatusResponse_P2PMetrics_RecentBatchRetrieveEntry.ProtoReflect.Descriptor instead.
-func (*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 8}
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetTimeUnix() int64 {
- if x != nil {
- return x.TimeUnix
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetSenderId() string {
- if x != nil {
- return x.SenderId
- }
- return ""
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetSenderIp() string {
- if x != nil {
- return x.SenderIp
- }
- return ""
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetRequested() int32 {
- if x != nil {
- return x.Requested
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetFound() int32 {
- if x != nil {
- return x.Found
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetDurationMs() int64 {
- if x != nil {
- return x.DurationMs
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetError() string {
- if x != nil {
- return x.Error
- }
- return ""
-}
-
-// Per-IP buckets: last 10 per sender IP
-type StatusResponse_P2PMetrics_RecentBatchStoreList struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Entries []*StatusResponse_P2PMetrics_RecentBatchStoreEntry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchStoreList) Reset() {
- *x = StatusResponse_P2PMetrics_RecentBatchStoreList{}
- mi := &file_supernode_supernode_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchStoreList) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StatusResponse_P2PMetrics_RecentBatchStoreList) ProtoMessage() {}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchStoreList) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[21]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StatusResponse_P2PMetrics_RecentBatchStoreList.ProtoReflect.Descriptor instead.
-func (*StatusResponse_P2PMetrics_RecentBatchStoreList) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 9}
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchStoreList) GetEntries() []*StatusResponse_P2PMetrics_RecentBatchStoreEntry {
- if x != nil {
- return x.Entries
- }
- return nil
-}
-
-type StatusResponse_P2PMetrics_RecentBatchRetrieveList struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Entries []*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveList) Reset() {
- *x = StatusResponse_P2PMetrics_RecentBatchRetrieveList{}
- mi := &file_supernode_supernode_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveList) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StatusResponse_P2PMetrics_RecentBatchRetrieveList) ProtoMessage() {}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveList) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[22]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StatusResponse_P2PMetrics_RecentBatchRetrieveList.ProtoReflect.Descriptor instead.
-func (*StatusResponse_P2PMetrics_RecentBatchRetrieveList) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 10}
-}
-
-func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveList) GetEntries() []*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry {
- if x != nil {
- return x.Entries
- }
- return nil
-}
-
-type StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds)
- Requests int32 `protobuf:"varint,2,opt,name=requests,proto3" json:"requests,omitempty"` // total node RPCs attempted
- Successful int32 `protobuf:"varint,3,opt,name=successful,proto3" json:"successful,omitempty"` // successful node RPCs
- SuccessRate float64 `protobuf:"fixed64,4,opt,name=success_rate,json=successRate,proto3" json:"success_rate,omitempty"` // percentage (0-100)
-}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) Reset() {
- *x = StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint{}
- mi := &file_supernode_supernode_proto_msgTypes[25]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoMessage() {}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[25]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint.ProtoReflect.Descriptor instead.
-func (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 0, 0}
-}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetTimeUnix() int64 {
- if x != nil {
- return x.TimeUnix
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetRequests() int32 {
- if x != nil {
- return x.Requests
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetSuccessful() int32 {
- if x != nil {
- return x.Successful
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetSuccessRate() float64 {
- if x != nil {
- return x.SuccessRate
- }
- return 0
-}
-
-type StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds)
- Keys int32 `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` // keys requested
- Required int32 `protobuf:"varint,3,opt,name=required,proto3" json:"required,omitempty"` // required count
- FoundLocal int32 `protobuf:"varint,4,opt,name=found_local,json=foundLocal,proto3" json:"found_local,omitempty"` // found locally
- FoundNetwork int32 `protobuf:"varint,5,opt,name=found_network,json=foundNetwork,proto3" json:"found_network,omitempty"` // found on network
- DurationMs int64 `protobuf:"varint,6,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"` // duration in milliseconds
-}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) Reset() {
- *x = StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint{}
- mi := &file_supernode_supernode_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoMessage() {}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoReflect() protoreflect.Message {
- mi := &file_supernode_supernode_proto_msgTypes[26]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint.ProtoReflect.Descriptor instead.
-func (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) Descriptor() ([]byte, []int) {
- return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 0, 1}
-}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetTimeUnix() int64 {
- if x != nil {
- return x.TimeUnix
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetKeys() int32 {
- if x != nil {
- return x.Keys
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetRequired() int32 {
- if x != nil {
- return x.Required
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetFoundLocal() int32 {
- if x != nil {
- return x.FoundLocal
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetFoundNetwork() int32 {
- if x != nil {
- return x.FoundNetwork
- }
- return 0
-}
-
-func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetDurationMs() int64 {
- if x != nil {
- return x.DurationMs
- }
- return 0
-}
-
-var File_supernode_supernode_proto protoreflect.FileDescriptor
-
-var file_supernode_supernode_proto_rawDesc = []byte{
- 0x0a, 0x19, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x73, 0x75, 0x70, 0x65,
- 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x75, 0x70,
- 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61,
- 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x3f, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65,
- 0x5f, 0x70, 0x32, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x08, 0x52, 0x11, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x32, 0x70, 0x4d, 0x65,
- 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x60, 0x0a, 0x14,
- 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f,
- 0x64, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08,
- 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e,
- 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x3b,
- 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03,
- 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x22, 0xf7, 0x23, 0x0a, 0x0e,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18,
- 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x75, 0x70, 0x74, 0x69,
- 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
- 0x52, 0x0d, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12,
- 0x41, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x0d, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61,
- 0x73, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x73, 0x75, 0x70, 0x65,
- 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b,
- 0x73, 0x52, 0x0c, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12,
- 0x2f, 0x0a, 0x13, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x73, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65,
- 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
- 0x12, 0x3b, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x21, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x65, 0x74,
- 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x12, 0x0a,
- 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x72, 0x61, 0x6e,
- 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18,
- 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
- 0x12, 0x45, 0x0a, 0x0b, 0x70, 0x32, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18,
- 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64,
- 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x0a, 0x70, 0x32, 0x70,
- 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x1a, 0x82, 0x05, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x43, 0x50, 0x55, 0x52, 0x03, 0x63, 0x70, 0x75,
- 0x12, 0x42, 0x0a, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x2a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x52, 0x06, 0x6d, 0x65,
- 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x54, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f,
- 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e,
- 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x73, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x0e, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x68, 0x61,
- 0x72, 0x64, 0x77, 0x61, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x68, 0x61, 0x72, 0x64, 0x77, 0x61, 0x72, 0x65, 0x53, 0x75,
- 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x1a, 0x40, 0x0a, 0x03, 0x43, 0x50, 0x55, 0x12, 0x23, 0x0a, 0x0d,
- 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e,
- 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05,
- 0x52, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x1a, 0x84, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x6d, 0x6f,
- 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x67, 0x62, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x47, 0x62, 0x12, 0x17, 0x0a,
- 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x67, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06,
- 0x75, 0x73, 0x65, 0x64, 0x47, 0x62, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61,
- 0x62, 0x6c, 0x65, 0x5f, 0x67, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x61, 0x76,
- 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x47, 0x62, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61,
- 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01,
- 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0xab,
- 0x01, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61,
- 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f,
- 0x0a, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12,
- 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x04, 0x52, 0x09, 0x75, 0x73, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x27,
- 0x0a, 0x0f, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65,
- 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62,
- 0x6c, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65,
- 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c,
- 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0x6b, 0x0a, 0x0c,
- 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x21, 0x0a, 0x0c,
- 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12,
- 0x19, 0x0a, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
- 0x09, 0x52, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61,
- 0x73, 0x6b, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09,
- 0x74, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x51, 0x0a, 0x07, 0x4e, 0x65, 0x74,
- 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x65, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f,
- 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x73,
- 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x61, 0x64,
- 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x70,
- 0x65, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x1a, 0xe6, 0x19, 0x0a,
- 0x0a, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x50, 0x0a, 0x0b, 0x64,
- 0x68, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x2f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d,
- 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63,
- 0x73, 0x52, 0x0a, 0x64, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x74, 0x0a,
- 0x16, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x5f,
- 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e,
- 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72,
- 0x69, 0x63, 0x73, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c,
- 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x6e,
- 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72,
- 0x69, 0x63, 0x73, 0x12, 0x65, 0x0a, 0x11, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x70, 0x6f, 0x6f, 0x6c,
- 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39,
- 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74,
- 0x72, 0x69, 0x63, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74,
- 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x50,
- 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x48, 0x0a, 0x08, 0x62, 0x61,
- 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73,
- 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69,
- 0x63, 0x73, 0x2e, 0x42, 0x61, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x62, 0x61, 0x6e,
- 0x4c, 0x69, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f,
- 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x64, 0x69, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32,
- 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x52, 0x04, 0x64, 0x69, 0x73, 0x6b, 0x12, 0x68, 0x0a, 0x12, 0x72, 0x65, 0x63,
- 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x18,
- 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64,
- 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65,
- 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x45, 0x6e, 0x74, 0x72,
- 0x79, 0x52, 0x10, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74,
- 0x6f, 0x72, 0x65, 0x12, 0x71, 0x0a, 0x15, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61,
- 0x74, 0x63, 0x68, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x18, 0x08, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32,
- 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42,
- 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x45, 0x6e, 0x74, 0x72,
- 0x79, 0x52, 0x13, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65,
- 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x12, 0x76, 0x0a, 0x18, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74,
- 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x62, 0x79, 0x5f,
- 0x69, 0x70, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72,
- 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x52,
- 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x42,
- 0x79, 0x49, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74,
- 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x79, 0x49, 0x70, 0x12, 0x7f,
- 0x0a, 0x1b, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x72,
- 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x69, 0x70, 0x18, 0x0a, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50,
- 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74,
- 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x42, 0x79, 0x49,
- 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x17, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61,
- 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x42, 0x79, 0x49, 0x70, 0x1a,
- 0xc0, 0x05, 0x0a, 0x0a, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x73,
- 0x0a, 0x14, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f,
- 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x73,
- 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69,
- 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x53, 0x74,
- 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52,
- 0x12, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x63,
- 0x65, 0x6e, 0x74, 0x12, 0x76, 0x0a, 0x15, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x72, 0x65, 0x74,
- 0x72, 0x69, 0x65, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32,
- 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72,
- 0x69, 0x63, 0x73, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76,
- 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x13, 0x62, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74,
- 0x72, 0x69, 0x65, 0x76, 0x65, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x15, 0x68,
- 0x6f, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x62, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73,
- 0x6b, 0x69, 0x70, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x68, 0x6f, 0x74, 0x50,
- 0x61, 0x74, 0x68, 0x42, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x6b, 0x69, 0x70, 0x73, 0x12, 0x35,
- 0x0a, 0x17, 0x68, 0x6f, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x62, 0x61, 0x6e, 0x5f, 0x69,
- 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52,
- 0x14, 0x68, 0x6f, 0x74, 0x50, 0x61, 0x74, 0x68, 0x42, 0x61, 0x6e, 0x49, 0x6e, 0x63, 0x72, 0x65,
- 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x8f, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53,
- 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74,
- 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08,
- 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66,
- 0x75, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73,
- 0x73, 0x66, 0x75, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f,
- 0x72, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x73, 0x75, 0x63, 0x63,
- 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x1a, 0xc8, 0x01, 0x0a, 0x12, 0x42, 0x61, 0x74, 0x63,
- 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b,
- 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x6b,
- 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12,
- 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x05, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x66,
- 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05,
- 0x52, 0x0a, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x12, 0x23, 0x0a, 0x0d,
- 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x05, 0x52, 0x0c, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72,
- 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73,
- 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x4d, 0x73, 0x1a, 0x74, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e,
- 0x74, 0x65, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75,
- 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x75, 0x63,
- 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x18,
- 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52,
- 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x1a, 0x9d, 0x01, 0x0a, 0x08, 0x42, 0x61, 0x6e,
- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75,
- 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12,
- 0x26, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x75, 0x6e,
- 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65,
- 0x64, 0x41, 0x74, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x67, 0x65, 0x5f, 0x73,
- 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x61, 0x67,
- 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x65, 0x0a, 0x0d, 0x44, 0x61, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0e, 0x70, 0x32, 0x70,
- 0x5f, 0x64, 0x62, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x01, 0x52, 0x0b, 0x70, 0x32, 0x70, 0x44, 0x62, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x62, 0x12, 0x2f,
- 0x0a, 0x14, 0x70, 0x32, 0x70, 0x5f, 0x64, 0x62, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73,
- 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x70, 0x32,
- 0x70, 0x44, 0x62, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a,
- 0x55, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x15, 0x0a,
- 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6d, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x61,
- 0x6c, 0x6c, 0x4d, 0x62, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x6d, 0x62, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x75, 0x73, 0x65, 0x64, 0x4d, 0x62, 0x12, 0x17, 0x0a,
- 0x07, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06,
- 0x66, 0x72, 0x65, 0x65, 0x4d, 0x62, 0x1a, 0x7c, 0x0a, 0x19, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72,
- 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e,
- 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65,
- 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e,
- 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c,
- 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c,
- 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
- 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
- 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xc9, 0x01, 0x0a, 0x15, 0x52, 0x65, 0x63,
- 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x45, 0x6e, 0x74,
- 0x72, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12,
- 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09,
- 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x08, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x49, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79,
- 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x1f, 0x0a,
- 0x0b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x03, 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x12, 0x0e,
- 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x12, 0x14,
- 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65,
- 0x72, 0x72, 0x6f, 0x72, 0x1a, 0xdc, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42,
- 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x45, 0x6e, 0x74, 0x72,
- 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x1b,
- 0x0a, 0x09, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x08, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73,
- 0x65, 0x6e, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x49, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x72, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x1f, 0x0a, 0x0b,
- 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28,
- 0x03, 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x12, 0x14, 0x0a,
- 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72,
- 0x72, 0x6f, 0x72, 0x1a, 0x6c, 0x0a, 0x14, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74,
- 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x54, 0x0a, 0x07, 0x65,
- 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x73,
- 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69,
- 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74,
- 0x6f, 0x72, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65,
- 0x73, 0x1a, 0x72, 0x0a, 0x17, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68,
- 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x57, 0x0a, 0x07,
- 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e,
- 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72,
- 0x69, 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52,
- 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e,
- 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x82, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74,
- 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x79, 0x49, 0x70, 0x45, 0x6e,
- 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65,
- 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e,
- 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e,
- 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x88, 0x01, 0x0a, 0x1c, 0x52,
- 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65,
- 0x76, 0x65, 0x42, 0x79, 0x49, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
- 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x52, 0x0a,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73,
- 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69,
- 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65,
- 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x3a, 0x02, 0x38, 0x01, 0x32, 0xd7, 0x01, 0x0a, 0x10, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e,
- 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x47, 0x65,
- 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e,
- 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x19, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3,
- 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x12, 0x69, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65,
- 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65,
- 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x12, 0x10, 0x2f,
- 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42,
- 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75,
- 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70,
- 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75,
- 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_supernode_supernode_proto_rawDescOnce sync.Once
- file_supernode_supernode_proto_rawDescData = file_supernode_supernode_proto_rawDesc
-)
-
-func file_supernode_supernode_proto_rawDescGZIP() []byte {
- file_supernode_supernode_proto_rawDescOnce.Do(func() {
- file_supernode_supernode_proto_rawDescData = protoimpl.X.CompressGZIP(file_supernode_supernode_proto_rawDescData)
- })
- return file_supernode_supernode_proto_rawDescData
-}
-
-var file_supernode_supernode_proto_msgTypes = make([]protoimpl.MessageInfo, 27)
-var file_supernode_supernode_proto_goTypes = []any{
- (*StatusRequest)(nil), // 0: supernode.StatusRequest
- (*ListServicesRequest)(nil), // 1: supernode.ListServicesRequest
- (*ListServicesResponse)(nil), // 2: supernode.ListServicesResponse
- (*ServiceInfo)(nil), // 3: supernode.ServiceInfo
- (*StatusResponse)(nil), // 4: supernode.StatusResponse
- (*StatusResponse_Resources)(nil), // 5: supernode.StatusResponse.Resources
- (*StatusResponse_ServiceTasks)(nil), // 6: supernode.StatusResponse.ServiceTasks
- (*StatusResponse_Network)(nil), // 7: supernode.StatusResponse.Network
- (*StatusResponse_P2PMetrics)(nil), // 8: supernode.StatusResponse.P2PMetrics
- (*StatusResponse_Resources_CPU)(nil), // 9: supernode.StatusResponse.Resources.CPU
- (*StatusResponse_Resources_Memory)(nil), // 10: supernode.StatusResponse.Resources.Memory
- (*StatusResponse_Resources_Storage)(nil), // 11: supernode.StatusResponse.Resources.Storage
- (*StatusResponse_P2PMetrics_DhtMetrics)(nil), // 12: supernode.StatusResponse.P2PMetrics.DhtMetrics
- (*StatusResponse_P2PMetrics_HandleCounters)(nil), // 13: supernode.StatusResponse.P2PMetrics.HandleCounters
- (*StatusResponse_P2PMetrics_BanEntry)(nil), // 14: supernode.StatusResponse.P2PMetrics.BanEntry
- (*StatusResponse_P2PMetrics_DatabaseStats)(nil), // 15: supernode.StatusResponse.P2PMetrics.DatabaseStats
- (*StatusResponse_P2PMetrics_DiskStatus)(nil), // 16: supernode.StatusResponse.P2PMetrics.DiskStatus
- nil, // 17: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry
- nil, // 18: supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry
- (*StatusResponse_P2PMetrics_RecentBatchStoreEntry)(nil), // 19: supernode.StatusResponse.P2PMetrics.RecentBatchStoreEntry
- (*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry)(nil), // 20: supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveEntry
- (*StatusResponse_P2PMetrics_RecentBatchStoreList)(nil), // 21: supernode.StatusResponse.P2PMetrics.RecentBatchStoreList
- (*StatusResponse_P2PMetrics_RecentBatchRetrieveList)(nil), // 22: supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveList
- nil, // 23: supernode.StatusResponse.P2PMetrics.RecentBatchStoreByIpEntry
- nil, // 24: supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveByIpEntry
- (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint)(nil), // 25: supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint
- (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint)(nil), // 26: supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint
-}
-var file_supernode_supernode_proto_depIdxs = []int32{
- 3, // 0: supernode.ListServicesResponse.services:type_name -> supernode.ServiceInfo
- 5, // 1: supernode.StatusResponse.resources:type_name -> supernode.StatusResponse.Resources
- 6, // 2: supernode.StatusResponse.running_tasks:type_name -> supernode.StatusResponse.ServiceTasks
- 7, // 3: supernode.StatusResponse.network:type_name -> supernode.StatusResponse.Network
- 8, // 4: supernode.StatusResponse.p2p_metrics:type_name -> supernode.StatusResponse.P2PMetrics
- 9, // 5: supernode.StatusResponse.Resources.cpu:type_name -> supernode.StatusResponse.Resources.CPU
- 10, // 6: supernode.StatusResponse.Resources.memory:type_name -> supernode.StatusResponse.Resources.Memory
- 11, // 7: supernode.StatusResponse.Resources.storage_volumes:type_name -> supernode.StatusResponse.Resources.Storage
- 12, // 8: supernode.StatusResponse.P2PMetrics.dht_metrics:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics
- 17, // 9: supernode.StatusResponse.P2PMetrics.network_handle_metrics:type_name -> supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry
- 18, // 10: supernode.StatusResponse.P2PMetrics.conn_pool_metrics:type_name -> supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry
- 14, // 11: supernode.StatusResponse.P2PMetrics.ban_list:type_name -> supernode.StatusResponse.P2PMetrics.BanEntry
- 15, // 12: supernode.StatusResponse.P2PMetrics.database:type_name -> supernode.StatusResponse.P2PMetrics.DatabaseStats
- 16, // 13: supernode.StatusResponse.P2PMetrics.disk:type_name -> supernode.StatusResponse.P2PMetrics.DiskStatus
- 19, // 14: supernode.StatusResponse.P2PMetrics.recent_batch_store:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchStoreEntry
- 20, // 15: supernode.StatusResponse.P2PMetrics.recent_batch_retrieve:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveEntry
- 23, // 16: supernode.StatusResponse.P2PMetrics.recent_batch_store_by_ip:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchStoreByIpEntry
- 24, // 17: supernode.StatusResponse.P2PMetrics.recent_batch_retrieve_by_ip:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveByIpEntry
- 25, // 18: supernode.StatusResponse.P2PMetrics.DhtMetrics.store_success_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint
- 26, // 19: supernode.StatusResponse.P2PMetrics.DhtMetrics.batch_retrieve_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint
- 13, // 20: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.HandleCounters
- 19, // 21: supernode.StatusResponse.P2PMetrics.RecentBatchStoreList.entries:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchStoreEntry
- 20, // 22: supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveList.entries:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveEntry
- 21, // 23: supernode.StatusResponse.P2PMetrics.RecentBatchStoreByIpEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchStoreList
- 22, // 24: supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveByIpEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveList
- 0, // 25: supernode.SupernodeService.GetStatus:input_type -> supernode.StatusRequest
- 1, // 26: supernode.SupernodeService.ListServices:input_type -> supernode.ListServicesRequest
- 4, // 27: supernode.SupernodeService.GetStatus:output_type -> supernode.StatusResponse
- 2, // 28: supernode.SupernodeService.ListServices:output_type -> supernode.ListServicesResponse
- 27, // [27:29] is the sub-list for method output_type
- 25, // [25:27] is the sub-list for method input_type
- 25, // [25:25] is the sub-list for extension type_name
- 25, // [25:25] is the sub-list for extension extendee
- 0, // [0:25] is the sub-list for field type_name
-}
-
-func init() { file_supernode_supernode_proto_init() }
-func file_supernode_supernode_proto_init() {
- if File_supernode_supernode_proto != nil {
- return
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_supernode_supernode_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 27,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_supernode_supernode_proto_goTypes,
- DependencyIndexes: file_supernode_supernode_proto_depIdxs,
- MessageInfos: file_supernode_supernode_proto_msgTypes,
- }.Build()
- File_supernode_supernode_proto = out.File
- file_supernode_supernode_proto_rawDesc = nil
- file_supernode_supernode_proto_goTypes = nil
- file_supernode_supernode_proto_depIdxs = nil
-}
diff --git a/go.mod b/go.mod
index b7f53ec1..a581736e 100644
--- a/go.mod
+++ b/go.mod
@@ -3,7 +3,6 @@ module github.com/LumeraProtocol/supernode/v2
go 1.24.1
replace (
- github.com/LumeraProtocol/supernode/v2/supernode => ./supernode
github.com/bytedance/sonic => github.com/bytedance/sonic v1.14.0
github.com/bytedance/sonic/loader => github.com/bytedance/sonic/loader v0.3.0
)
@@ -11,7 +10,7 @@ replace (
require (
cosmossdk.io/math v1.5.3
github.com/AlecAivazis/survey/v2 v2.3.7
- github.com/LumeraProtocol/lumera v1.7.0
+ github.com/LumeraProtocol/lumera v1.7.2
github.com/LumeraProtocol/rq-go v0.2.1
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
github.com/cenkalti/backoff/v4 v4.3.0
@@ -35,7 +34,6 @@ require (
github.com/pkg/errors v0.9.1
github.com/shirou/gopsutil/v3 v3.24.5
github.com/spf13/cobra v1.8.1
- github.com/spf13/viper v1.19.0
github.com/stretchr/testify v1.10.0
go.uber.org/mock v0.5.2
go.uber.org/ratelimit v0.3.1
@@ -167,6 +165,7 @@ require (
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.7.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
+ github.com/spf13/viper v1.19.0 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
diff --git a/go.sum b/go.sum
index d8170371..839f29a2 100644
--- a/go.sum
+++ b/go.sum
@@ -63,8 +63,8 @@ github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3
github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ=
github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
-github.com/LumeraProtocol/lumera v1.7.0 h1:F5zgRBnCtgGfdMB6jz01PFWIzbS8VjQfCu1H9OYt3BU=
-github.com/LumeraProtocol/lumera v1.7.0/go.mod h1:c1M+sjewuCvxw+pznwlspUzenDJI8Y+suKB3RFKS2Wo=
+github.com/LumeraProtocol/lumera v1.7.2 h1:qA0qwEOfCqW6yY232/MEK6gfLYq4HVYSmbcOCOZqEoc=
+github.com/LumeraProtocol/lumera v1.7.2/go.mod h1:c1M+sjewuCvxw+pznwlspUzenDJI8Y+suKB3RFKS2Wo=
github.com/LumeraProtocol/rq-go v0.2.1 h1:8B3UzRChLsGMmvZ+UVbJsJj6JZzL9P9iYxbdUwGsQI4=
github.com/LumeraProtocol/rq-go v0.2.1/go.mod h1:APnKCZRh1Es2Vtrd2w4kCLgAyaL5Bqrkz/BURoRJ+O8=
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s=
diff --git a/p2p/kademlia/bootstrap.go b/p2p/kademlia/bootstrap.go
index 5b29f44d..25dc3b54 100644
--- a/p2p/kademlia/bootstrap.go
+++ b/p2p/kademlia/bootstrap.go
@@ -102,7 +102,7 @@ func (s *DHT) setBootstrapNodesFromConfigVar(ctx context.Context, bootstrapNodes
})
}
s.options.BootstrapNodes = nodes
- logtrace.Info(ctx, "Bootstrap nodes set from config var", logtrace.Fields{
+ logtrace.Debug(ctx, "Bootstrap nodes set from config var", logtrace.Fields{
logtrace.FieldModule: "p2p",
"bootstrap_nodes": nodes,
})
diff --git a/p2p/kademlia/dht.go b/p2p/kademlia/dht.go
index 69c45023..9d029479 100644
--- a/p2p/kademlia/dht.go
+++ b/p2p/kademlia/dht.go
@@ -23,7 +23,6 @@ import (
"github.com/LumeraProtocol/supernode/v2/pkg/logtrace"
"github.com/LumeraProtocol/supernode/v2/pkg/lumera"
ltc "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials"
- "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics"
"github.com/LumeraProtocol/supernode/v2/pkg/storage"
"github.com/LumeraProtocol/supernode/v2/pkg/storage/memory"
"github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore"
@@ -42,9 +41,10 @@ const (
delKeysCountThreshold = 10
lowSpaceThreshold = 50 // GB
batchRetrieveSize = 1000
- storeSameSymbolsBatchConcurrency = 3
- fetchSymbolsBatchConcurrency = 6
- minimumDataStoreSuccessRate = 75.0
+
+ storeSameSymbolsBatchConcurrency = 3
+ fetchSymbolsBatchConcurrency = 6
+ minimumDataStoreSuccessRate = 75.0
maxIterations = 4
macConcurrentNetworkStoreCalls = 16
@@ -103,7 +103,7 @@ func (s *DHT) bootstrapIgnoreList(ctx context.Context) error {
}
if added > 0 {
- logtrace.Info(ctx, "Ignore list bootstrapped from replication info", logtrace.Fields{
+ logtrace.Debug(ctx, "Ignore list bootstrapped from replication info", logtrace.Fields{
logtrace.FieldModule: "p2p",
"ignored_count": added,
})
@@ -124,7 +124,7 @@ func (s *DHT) ConnPoolSnapshot() map[string]int64 {
// Options contains configuration options for the queries node
type Options struct {
- ID []byte
+ ID []byte
// The queries IPv4 or IPv6 address
IP string
@@ -141,6 +141,7 @@ type Options struct {
// Keyring for credentials
Keyring keyring.Keyring
+
}
// NewDHT returns a new DHT node
@@ -358,7 +359,7 @@ func (s *DHT) Store(ctx context.Context, data []byte, typ int) (string, error) {
// measured success rate for node RPCs is below the configured minimum, an error
// is returned. Metrics are not returned through the API.
func (s *DHT) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID string) error {
- logtrace.Info(ctx, "Store DB batch begin", logtrace.Fields{
+ logtrace.Debug(ctx, "DHT StoreBatch begin", logtrace.Fields{
logtrace.FieldModule: "dht",
logtrace.FieldTaskID: taskID,
"records": len(values),
@@ -366,7 +367,7 @@ func (s *DHT) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID s
if err := s.store.StoreBatch(ctx, values, typ, true); err != nil {
return fmt.Errorf("store batch: %v", err)
}
- logtrace.Info(ctx, "Store DB batch done, store network batch begin", logtrace.Fields{
+ logtrace.Debug(ctx, "DHT StoreBatch: local stored; network begin", logtrace.Fields{
logtrace.FieldModule: "dht",
logtrace.FieldTaskID: taskID,
})
@@ -376,7 +377,7 @@ func (s *DHT) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID s
return fmt.Errorf("iterate batch store: %v", err)
}
- logtrace.Info(ctx, "Store network batch workers done", logtrace.Fields{
+ logtrace.Debug(ctx, "DHT StoreBatch: network done", logtrace.Fields{
logtrace.FieldModule: "dht",
logtrace.FieldTaskID: taskID,
})
@@ -387,6 +388,7 @@ func (s *DHT) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID s
// Retrieve data from the networking using key. Key is the base58 encoded
// identifier of the data.
func (s *DHT) Retrieve(ctx context.Context, key string, localOnly ...bool) ([]byte, error) {
+ start := time.Now()
decoded := base58.Decode(key)
if len(decoded) != B/8 {
return nil, fmt.Errorf("invalid key: %v", key)
@@ -402,6 +404,7 @@ func (s *DHT) Retrieve(ctx context.Context, key string, localOnly ...bool) ([]by
// retrieve the key/value from queries storage
value, err := s.store.Retrieve(ctx, decoded)
if err == nil && len(value) > 0 {
+ logtrace.Debug(ctx, "DHT Retrieve local hit", logtrace.Fields{"key": hex.EncodeToString(decoded), "ms": time.Since(start).Milliseconds()})
return value, nil
} else if err != nil {
logtrace.Error(ctx, "Error retrieving key from local storage", logtrace.Fields{
@@ -417,20 +420,23 @@ func (s *DHT) Retrieve(ctx context.Context, key string, localOnly ...bool) ([]by
}
// if not found locally, iterative find value from kademlia network
+ logtrace.Debug(ctx, "DHT Retrieve network lookup", logtrace.Fields{"key": dbKey})
peerValue, err := s.iterate(ctx, IterateFindValue, decoded, nil, 0)
if err != nil {
return nil, errors.Errorf("retrieve from peer: %w", err)
}
if len(peerValue) > 0 {
- logtrace.Info(ctx, "Not found locally, retrieved from other nodes", logtrace.Fields{
+ logtrace.Debug(ctx, "DHT Retrieve network hit", logtrace.Fields{
logtrace.FieldModule: "dht",
"key": dbKey,
"data_len": len(peerValue),
+ "ms": time.Since(start).Milliseconds(),
})
} else {
- logtrace.Info(ctx, "Not found locally, not found in other nodes", logtrace.Fields{
+ logtrace.Debug(ctx, "DHT Retrieve miss", logtrace.Fields{
logtrace.FieldModule: "dht",
"key": dbKey,
+ "ms": time.Since(start).Milliseconds(),
})
}
@@ -465,17 +471,7 @@ func (s *DHT) Stats(ctx context.Context) (map[string]interface{}, error) {
dhtStats["peers_count"] = len(s.ht.nodes())
dhtStats["peers"] = s.ht.nodes()
dhtStats["network"] = s.network.HandleMetricsSnapshot()
- // Include recent request snapshots for observability
- if s.network != nil {
- if overall, byIP := s.network.RecentBatchStoreSnapshot(); len(overall) > 0 || len(byIP) > 0 {
- dhtStats["recent_batch_store_overall"] = overall
- dhtStats["recent_batch_store_by_ip"] = byIP
- }
- if overall, byIP := s.network.RecentBatchRetrieveSnapshot(); len(overall) > 0 || len(byIP) > 0 {
- dhtStats["recent_batch_retrieve_overall"] = overall
- dhtStats["recent_batch_retrieve_by_ip"] = byIP
- }
- }
+ // Removed: recent per-request snapshots (logs provide visibility)
dhtStats["database"] = dbStats
return dhtStats, nil
@@ -525,15 +521,18 @@ func (s *DHT) GetValueFromNode(ctx context.Context, target []byte, n *Node) ([]b
cctx, ccancel := context.WithTimeout(ctx, time.Second*5)
defer ccancel()
+ // Minimal per-RPC visibility
+ logtrace.Debug(ctx, "RPC FindValue send", logtrace.Fields{"node": n.String(), "key": hex.EncodeToString(target)})
response, err := s.network.Call(cctx, request, false)
if err != nil {
- logtrace.Info(ctx, "Network call request failed", logtrace.Fields{
+ logtrace.Debug(ctx, "Network call request failed", logtrace.Fields{
logtrace.FieldModule: "p2p",
logtrace.FieldError: err.Error(),
"request": request.String(),
})
return nil, fmt.Errorf("network call request %s failed: %w", request.String(), err)
}
+ logtrace.Debug(ctx, "RPC FindValue completed", logtrace.Fields{"node": n.String()})
v, ok := response.Data.(*FindValueResponse)
if ok && v.Status.Result == ResultOk && len(v.Value) > 0 {
@@ -569,7 +568,7 @@ func (s *DHT) doMultiWorkers(ctx context.Context, iterativeType int, target []by
// update the running goroutines
number++
- logtrace.Info(ctx, "Start work for node", logtrace.Fields{
+ logtrace.Debug(ctx, "Start work for node", logtrace.Fields{
logtrace.FieldModule: "p2p",
"iterate_type": iterativeType,
"node": node.String(),
@@ -593,18 +592,35 @@ func (s *DHT) doMultiWorkers(ctx context.Context, iterativeType int, target []by
// new a request message
request := s.newMessage(messageType, receiver, data)
+ // Minimal per-RPC visibility
+ op := ""
+ switch messageType {
+ case FindNode:
+ op = "FindNode"
+ case FindValue:
+ op = "FindValue"
+ default:
+ op = "RPC"
+ }
+ fields := logtrace.Fields{"node": receiver.String()}
+ if messageType == FindValue {
+ fields["key"] = hex.EncodeToString(target)
+ }
+ logtrace.Debug(ctx, "RPC "+op+" send", fields)
// send the request and receive the response
response, err := s.network.Call(ctx, request, false)
if err != nil {
- logtrace.Info(ctx, "Network call request failed", logtrace.Fields{
+ logtrace.Debug(ctx, "Iterate worker RPC failed", logtrace.Fields{
logtrace.FieldModule: "p2p",
logtrace.FieldError: err.Error(),
"request": request.String(),
+ "node": receiver.String(),
})
// node is unreachable, remove the node
//removedNodes = append(removedNodes, receiver)
return
}
+ logtrace.Debug(ctx, "RPC "+op+" completed", logtrace.Fields{"node": receiver.String()})
// send the response to message channel
responses <- response
@@ -633,7 +649,7 @@ func (s *DHT) fetchAndAddLocalKeys(ctx context.Context, hexKeys []string, result
batchHexKeys := hexKeys[start:end]
- logtrace.Info(ctx, "Processing batch of local keys", logtrace.Fields{
+ logtrace.Debug(ctx, "Processing batch of local keys", logtrace.Fields{
logtrace.FieldModule: "dht",
"batch_size": len(batchHexKeys),
"total_keys": len(hexKeys),
@@ -666,7 +682,7 @@ func (s *DHT) fetchAndAddLocalKeys(ctx context.Context, hexKeys []string, result
}
func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, txID string, localOnly ...bool) (result map[string][]byte, err error) {
- start := time.Now()
+ logtrace.Debug(ctx, "DHT BatchRetrieve begin", logtrace.Fields{"txid": txID, "keys": len(keys), "required": required})
result = make(map[string][]byte)
var resMap sync.Map
var foundLocalCount int32
@@ -738,21 +754,23 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32,
if err != nil {
return nil, fmt.Errorf("fetch and add local keys: %v", err)
}
- // Report how many were found locally, for event metrics
- p2pmetrics.ReportFoundLocal(p2pmetrics.TaskIDFromContext(ctx), int(foundLocalCount))
+ // Found locally count is logged via summary below; no external metrics
if foundLocalCount >= required {
return result, nil
}
- batchSize := batchRetrieveSize
- var networkFound int32
- totalBatches := int(math.Ceil(float64(required) / float64(batchSize)))
- parallelBatches := int(math.Min(float64(totalBatches), float64(fetchSymbolsBatchConcurrency)))
+ batchSize := batchRetrieveSize
+ var networkFound int32
+ totalBatches := int(math.Ceil(float64(required) / float64(batchSize)))
+ parallelBatches := int(math.Min(float64(totalBatches), float64(fetchSymbolsBatchConcurrency)))
- semaphore := make(chan struct{}, parallelBatches)
- var wg sync.WaitGroup
- gctx, cancel := context.WithCancel(ctx)
- defer cancel()
+ semaphore := make(chan struct{}, parallelBatches)
+ var wg sync.WaitGroup
+ gctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ // Measure only the network retrieval phase (after local scan)
+ netStart := time.Now()
for start := 0; start < len(keys); start += batchSize {
end := start + batchSize
@@ -784,11 +802,15 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32,
wg.Wait()
- netFound := int(atomic.LoadInt32(&networkFound))
- // Record batch retrieve stats for internal DHT snapshot window
- s.metrics.RecordBatchRetrieve(len(keys), int(required), int(foundLocalCount), netFound, time.Since(start))
- // Also feed retrieve counts into the per-task collector for stream events
- p2pmetrics.SetRetrieveBatchSummary(p2pmetrics.TaskIDFromContext(ctx), len(keys), int(required), int(foundLocalCount), netFound, time.Since(start).Milliseconds())
+ netFound := int(atomic.LoadInt32(&networkFound))
+{
+ f := logtrace.Fields{"txid": txID, "found_local": foundLocalCount, "found_network": netFound, "required": required, "ms": time.Since(netStart).Milliseconds(), logtrace.FieldRole: "client"}
+ if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o }
+ logtrace.Info(ctx, "dht: batch retrieve summary", f)
+}
+ // Record batch retrieve stats for internal DHT snapshot window (network phase only)
+ s.metrics.RecordBatchRetrieve(len(keys), int(required), int(foundLocalCount), netFound, time.Since(netStart))
+ // No per-task metrics collector updates
return result, nil
}
@@ -899,7 +921,7 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node,
defer cancel()
for nodeID, node := range nodes {
if s.ignorelist.Banned(node) {
- logtrace.Info(ctx, "Ignore banned node in iterate batch get values", logtrace.Fields{
+ logtrace.Debug(ctx, "Ignore banned node in iterate batch get values", logtrace.Fields{
logtrace.FieldModule: "dht",
"node": node.String(),
})
@@ -920,8 +942,7 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node,
defer func() { <-semaphore }()
}
- callStart := time.Now()
- indices := fetchMap[nodeID]
+ indices := fetchMap[nodeID]
requestKeys := make(map[string]KeyValWithClosest)
for _, idx := range indices {
if idx < len(hexKeys) {
@@ -945,17 +966,9 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node,
firstErr = err
}
mu.Unlock()
- // record failed RPC per-node
- p2pmetrics.RecordRetrieve(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{
- IP: node.IP,
- Address: node.String(),
- Keys: 0,
- Success: false,
- Error: err.Error(),
- DurationMS: time.Since(callStart).Milliseconds(),
- })
- return
- }
+ // per-node metrics removed; logs retained
+ return
+ }
returned := 0
for k, v := range decompressedData {
@@ -975,21 +988,13 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node,
}
}
- // record successful RPC per-node (returned may be 0). Success is true when no error.
- p2pmetrics.RecordRetrieve(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{
- IP: node.IP,
- Address: node.String(),
- Keys: returned,
- Success: true,
- Error: "",
- DurationMS: time.Since(callStart).Milliseconds(),
- })
- }(node, nodeID)
- }
+ // per-node metrics removed; logs retained
+ }(node, nodeID)
+ }
wg.Wait()
- logtrace.Info(ctx, "Iterate batch get values done", logtrace.Fields{
+ logtrace.Debug(ctx, "Iterate batch get values done", logtrace.Fields{
logtrace.FieldModule: "dht",
"found_count": atomic.LoadInt32(&foundCount),
})
@@ -1033,10 +1038,20 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node,
func (s *DHT) doBatchGetValuesCall(ctx context.Context, node *Node, requestKeys map[string]KeyValWithClosest) (map[string]KeyValWithClosest, error) {
request := s.newMessage(BatchGetValues, node, &BatchGetValuesRequest{Data: requestKeys})
+{
+ f := logtrace.Fields{"node": node.String(), "keys": len(requestKeys), logtrace.FieldRole: "client"}
+ if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o }
+ logtrace.Info(ctx, "dht: batch get send", f)
+}
response, err := s.network.Call(ctx, request, false)
if err != nil {
return nil, fmt.Errorf("network call request %s failed: %w", request.String(), err)
}
+{
+ f := logtrace.Fields{"node": node.String(), logtrace.FieldRole: "client"}
+ if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o }
+ logtrace.Info(ctx, "dht: batch get ok", f)
+}
resp, ok := response.Data.(*BatchGetValuesResponse)
if !ok {
@@ -1071,7 +1086,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat
// find the closest contacts for the target node from queries route tables
nl, _ := s.ht.closestContacts(Alpha, target, igList)
if len(igList) > 0 {
- logtrace.Info(ctx, "Closest contacts", logtrace.Fields{
+ logtrace.Debug(ctx, "Closest contacts", logtrace.Fields{
logtrace.FieldModule: "p2p",
"nodes": nl.String(),
"ignored": s.ignorelist.String(),
@@ -1081,7 +1096,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat
if nl.Len() == 0 {
return nil, nil
}
- logtrace.Info(ctx, "Iterate start", logtrace.Fields{
+ logtrace.Debug(ctx, "Iterate start", logtrace.Fields{
logtrace.FieldModule: "p2p",
"task_id": taskID,
"type": iterativeType,
@@ -1095,7 +1110,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat
if iterativeType == IterateFindNode {
hashedTargetID, _ := utils.Blake3Hash(target)
bucket := s.ht.bucketIndex(s.ht.self.HashedID, hashedTargetID)
- logtrace.Info(ctx, "Bucket for target", logtrace.Fields{
+ logtrace.Debug(ctx, "Bucket for target", logtrace.Fields{
logtrace.FieldModule: "p2p",
"target": sKey,
})
@@ -1119,7 +1134,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat
// Set a maximum number of iterations to prevent indefinite looping
maxIterations := 5 // Adjust the maximum iterations as needed
- logtrace.Info(ctx, "Begin iteration", logtrace.Fields{
+ logtrace.Debug(ctx, "Begin iteration", logtrace.Fields{
logtrace.FieldModule: "p2p",
"task_id": taskID,
"key": sKey,
@@ -1130,7 +1145,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat
case <-ctx.Done():
return nil, fmt.Errorf("iterate cancelled: %w", ctx.Err())
case <-timeout:
- logtrace.Info(ctx, "Iteration timed out", logtrace.Fields{
+ logtrace.Debug(ctx, "Iteration timed out", logtrace.Fields{
logtrace.FieldModule: "p2p",
})
return nil, nil
@@ -1153,7 +1168,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat
}
default:
- logtrace.Error(ctx, "Unknown message type", logtrace.Fields{
+ logtrace.Debug(ctx, "Unknown message type", logtrace.Fields{
logtrace.FieldModule: "dht",
"type": response.MessageType,
})
@@ -1162,7 +1177,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat
// Stop search if no more nodes to contact
if !searchRest && len(nl.Nodes) == 0 {
- logtrace.Info(ctx, "Search stopped", logtrace.Fields{
+ logtrace.Debug(ctx, "Search stopped", logtrace.Fields{
logtrace.FieldModule: "p2p",
"task_id": taskID,
"key": sKey,
@@ -1174,7 +1189,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat
nl.Comparator = target
nl.Sort()
- logtrace.Info(ctx, "Iterate sorted nodes", logtrace.Fields{
+ logtrace.Debug(ctx, "Iterate sorted nodes", logtrace.Fields{
logtrace.FieldModule: "p2p",
"id": base58.Encode(s.ht.self.ID),
"iterate": iterativeType,
@@ -1211,7 +1226,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat
}
}
- logtrace.Info(ctx, "Finish iteration without results", logtrace.Fields{
+ logtrace.Debug(ctx, "Finish iteration without results", logtrace.Fields{
logtrace.FieldModule: "p2p",
"task_id": taskID,
"key": sKey,
@@ -1232,7 +1247,7 @@ func (s *DHT) handleResponses(ctx context.Context, responses <-chan *Message, nl
v, ok := response.Data.(*FindValueResponse)
if ok {
if v.Status.Result == ResultOk && len(v.Value) > 0 {
- logtrace.Info(ctx, "Iterate found value from network", logtrace.Fields{
+ logtrace.Debug(ctx, "Iterate found value from network", logtrace.Fields{
logtrace.FieldModule: "p2p",
})
return nl, v.Value
@@ -1262,7 +1277,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target []
// nl will have the closest nodes to the target value, it will ignore the nodes in igList
nl, _ := s.ht.closestContacts(Alpha, target, igList)
if len(igList) > 0 {
- logtrace.Info(ctx, "Closest contacts", logtrace.Fields{
+ logtrace.Debug(ctx, "Closest contacts", logtrace.Fields{
logtrace.FieldModule: "p2p",
"nodes": nl.String(),
"ignored": s.ignorelist.String(),
@@ -1277,7 +1292,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target []
searchRest := false
// keep track of contacted nodes so that we don't hit them again
contacted := make(map[string]bool)
- logtrace.Info(ctx, "Begin iteration", logtrace.Fields{
+ logtrace.Debug(ctx, "Begin iteration", logtrace.Fields{
logtrace.FieldModule: "p2p",
"task_id": taskID,
"key": sKey,
@@ -1286,7 +1301,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target []
var closestNode *Node
var iterationCount int
for iterationCount = 0; iterationCount < maxIterations; iterationCount++ {
- logtrace.Info(ctx, "Begin find value", logtrace.Fields{
+ logtrace.Debug(ctx, "Begin find value", logtrace.Fields{
logtrace.FieldModule: "p2p",
"task_id": taskID,
"nl": nl.Len(),
@@ -1295,7 +1310,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target []
})
if nl.Len() == 0 {
- logtrace.Error(ctx, "Nodes list length is 0", logtrace.Fields{
+ logtrace.Debug(ctx, "Nodes list length is 0", logtrace.Fields{
logtrace.FieldModule: "p2p",
"task_id": taskID,
"key": sKey,
@@ -1306,7 +1321,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target []
// if the closest node is the same as the last iteration and we don't want to search rest of nodes, we are done
if !searchRest && (closestNode != nil && bytes.Equal(nl.Nodes[0].ID, closestNode.ID)) {
- logtrace.Info(ctx, "Closest node is the same as the last iteration", logtrace.Fields{
+ logtrace.Debug(ctx, "Closest node is the same as the last iteration", logtrace.Fields{
logtrace.FieldModule: "p2p",
"task_id": taskID,
"key": sKey,
@@ -1325,7 +1340,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target []
nl.Sort()
- logtrace.Info(ctx, "Iteration progress", logtrace.Fields{
+ logtrace.Debug(ctx, "Iteration progress", logtrace.Fields{
logtrace.FieldModule: "p2p",
"task_id": taskID,
"key": sKey,
@@ -1334,7 +1349,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target []
})
}
- logtrace.Info(ctx, "Finished iterations without results", logtrace.Fields{
+ logtrace.Debug(ctx, "Finished iterations without results", logtrace.Fields{
logtrace.FieldModule: "p2p",
"task_id": taskID,
"key": sKey,
@@ -1514,7 +1529,7 @@ func (s *DHT) storeToAlphaNodes(ctx context.Context, nl *NodeList, data []byte,
for i := Alpha; i < nl.Len() && finalStoreCount < int32(Alpha); i++ {
n := nl.Nodes[i]
if s.ignorelist.Banned(n) {
- logtrace.Info(ctx, "Ignore banned node during sequential store", logtrace.Fields{
+ logtrace.Debug(ctx, "Ignore banned node during sequential store", logtrace.Fields{
logtrace.FieldModule: "p2p",
"node": n.String(),
"task_id": taskID,
@@ -1547,7 +1562,7 @@ func (s *DHT) storeToAlphaNodes(ctx context.Context, nl *NodeList, data []byte,
skey, _ := utils.Blake3Hash(data)
if finalStoreCount >= int32(Alpha) {
- logtrace.Info(ctx, "Store data to alpha nodes success", logtrace.Fields{
+ logtrace.Debug(ctx, "Store data to alpha nodes success", logtrace.Fields{
logtrace.FieldModule: "dht",
"task_id": taskID,
"len_total_nodes": nl.Len(),
@@ -1557,7 +1572,7 @@ func (s *DHT) storeToAlphaNodes(ctx context.Context, nl *NodeList, data []byte,
return nil
}
- logtrace.Info(ctx, "Store data to alpha nodes failed", logtrace.Fields{
+ logtrace.Debug(ctx, "Store data to alpha nodes failed", logtrace.Fields{
logtrace.FieldModule: "dht",
"task_id": taskID,
"store_count": finalStoreCount,
@@ -1570,7 +1585,7 @@ func (s *DHT) storeToAlphaNodes(ctx context.Context, nl *NodeList, data []byte,
func (s *DHT) removeNode(ctx context.Context, node *Node) {
// ensure this is not itself address
if bytes.Equal(node.ID, s.ht.self.ID) {
- logtrace.Info(ctx, "Trying to remove itself", logtrace.Fields{
+ logtrace.Debug(ctx, "Trying to remove itself", logtrace.Fields{
logtrace.FieldModule: "p2p",
})
return
@@ -1586,7 +1601,7 @@ func (s *DHT) removeNode(ctx context.Context, node *Node) {
"bucket": index,
})
} else {
- logtrace.Info(ctx, "Removed node from bucket success", logtrace.Fields{
+ logtrace.Debug(ctx, "Removed node from bucket success", logtrace.Fields{
logtrace.FieldModule: "p2p",
"node": node.String(),
"bucket": index,
@@ -1644,12 +1659,11 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i
knownNodes := make(map[string]*Node)
hashes := make([][]byte, len(values))
- logtrace.Info(ctx, "Iterate batch store begin", logtrace.Fields{
- logtrace.FieldModule: "dht",
- "task_id": id,
- "keys": len(values),
- "len_nodes": len(s.ht.nodes()),
- })
+{
+ f := logtrace.Fields{logtrace.FieldModule: "dht", "task_id": id, "keys": len(values), "len_nodes": len(s.ht.nodes()), logtrace.FieldRole: "client"}
+ if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o }
+ logtrace.Info(ctx, "dht: batch store start", f)
+}
for i := 0; i < len(values); i++ {
target, _ := utils.Blake3Hash(values[i])
hashes[i] = target
@@ -1671,17 +1685,15 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i
requests := 0
successful := 0
+ logtrace.Debug(ctx, "Iterate batch store: dispatching to nodes", logtrace.Fields{"task_id": id, "nodes": len(knownNodes)})
storeResponses := s.batchStoreNetwork(ctx, values, knownNodes, storageMap, typ)
for response := range storeResponses {
requests++
- var nodeAddr string
- var nodeIP string
+ var nodeAddr string
if response.Receiver != nil {
- nodeAddr = response.Receiver.String()
- nodeIP = response.Receiver.IP
+ nodeAddr = response.Receiver.String()
} else if response.Message != nil && response.Message.Sender != nil {
- nodeAddr = response.Message.Sender.String()
- nodeIP = response.Message.Sender.IP
+ nodeAddr = response.Message.Sender.String()
}
errMsg := ""
@@ -1712,15 +1724,7 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i
}
}
- // Emit per-node store RPC call via metrics bridge (no P2P API coupling)
- p2pmetrics.RecordStore(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{
- IP: nodeIP,
- Address: nodeAddr,
- Keys: response.KeysCount,
- Success: errMsg == "" && response.Error == nil,
- Error: errMsg,
- DurationMS: response.DurationMS,
- })
+ // per-node store metrics removed; logs retained
}
@@ -1729,14 +1733,14 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i
successRate := float64(successful) / float64(requests) * 100
if successRate >= minimumDataStoreSuccessRate {
- logtrace.Info(ctx, "Successful store operations", logtrace.Fields{
+ logtrace.Info(ctx, "dht: batch store ok", logtrace.Fields{
logtrace.FieldModule: "dht",
"task_id": id,
"success_rate": fmt.Sprintf("%.2f%%", successRate),
})
return nil
} else {
- logtrace.Info(ctx, "Failed to achieve desired success rate", logtrace.Fields{
+ logtrace.Info(ctx, "dht: batch store below threshold", logtrace.Fields{
logtrace.FieldModule: "dht",
"task_id": id,
"success_rate": fmt.Sprintf("%.2f%%", successRate),
@@ -1763,12 +1767,9 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[
var wg sync.WaitGroup
for key, node := range nodes {
- logtrace.Info(ctx, "Node", logtrace.Fields{
- logtrace.FieldModule: "dht",
- "port": node.String(),
- })
+ logtrace.Debug(ctx, "Preparing batch store to node", logtrace.Fields{logtrace.FieldModule: "dht", "node": node.String()})
if s.ignorelist.Banned(node) {
- logtrace.Info(ctx, "Ignoring banned node in batch store network call", logtrace.Fields{
+ logtrace.Debug(ctx, "Ignoring banned node in batch store network call", logtrace.Fields{
logtrace.FieldModule: "dht",
"node": node.String(),
})
@@ -1796,15 +1797,15 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[
totalBytes += len(values[idx])
}
- logtrace.Info(ctx, "Batch store to node", logtrace.Fields{
- logtrace.FieldModule: "dht",
- "keys": len(toStore),
- "size_before_compress": utils.BytesIntToMB(totalBytes),
- })
+ {
+ f := logtrace.Fields{logtrace.FieldModule: "dht", "node": receiver.String(), "keys": len(toStore), "size_mb": utils.BytesIntToMB(totalBytes), logtrace.FieldRole: "client"}
+ if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o }
+ logtrace.Info(ctx, "dht: batch store RPC send", f)
+ }
// Skip empty payloads: avoid sending empty store RPCs and do not record no-op metrics.
if len(toStore) == 0 {
- logtrace.Info(ctx, "Skipping store RPC with empty payload", logtrace.Fields{
+ logtrace.Debug(ctx, "Skipping store RPC with empty payload", logtrace.Fields{
logtrace.FieldModule: "dht",
"node": receiver.String(),
})
@@ -1821,15 +1822,16 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[
s.metrics.IncHotPathBanIncr()
}
- logtrace.Info(ctx, "Network call batch store request failed", logtrace.Fields{
- logtrace.FieldModule: "p2p",
- logtrace.FieldError: err.Error(),
- "request": request.String(),
- })
+ logtrace.Error(ctx, "RPC BatchStoreData failed", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err.Error(), "node": receiver.String(), "ms": dur})
responses <- &MessageWithError{Error: err, Message: response, KeysCount: len(toStore), Receiver: receiver, DurationMS: dur}
return
}
+ {
+ f := logtrace.Fields{logtrace.FieldModule: "p2p", "node": receiver.String(), "keys": len(toStore), "ms": dur, logtrace.FieldRole: "client"}
+ if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o }
+ logtrace.Info(ctx, "dht: batch store RPC ok", f)
+ }
responses <- &MessageWithError{Message: response, KeysCount: len(toStore), Receiver: receiver, DurationMS: dur}
}
}(node, key)
@@ -1842,7 +1844,7 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[
}
func (s *DHT) batchFindNode(ctx context.Context, payload [][]byte, nodes map[string]*Node, contacted map[string]bool, txid string) (chan *MessageWithError, bool) {
- logtrace.Info(ctx, "Batch find node begin", logtrace.Fields{
+ logtrace.Debug(ctx, "Batch find node begin", logtrace.Fields{
logtrace.FieldModule: "dht",
"task_id": txid,
"nodes_count": len(nodes),
@@ -1865,7 +1867,7 @@ func (s *DHT) batchFindNode(ctx context.Context, payload [][]byte, nodes map[str
continue
}
if s.ignorelist.Banned(node) {
- logtrace.Info(ctx, "Ignoring banned node in batch find call", logtrace.Fields{
+ logtrace.Debug(ctx, "Ignoring banned node in batch find call", logtrace.Fields{
logtrace.FieldModule: "dht",
"node": node.String(),
"txid": txid,
@@ -1913,7 +1915,7 @@ func (s *DHT) batchFindNode(ctx context.Context, payload [][]byte, nodes map[str
}
wg.Wait()
close(responses)
- logtrace.Info(ctx, "Batch find node done", logtrace.Fields{
+ logtrace.Debug(ctx, "Batch find node done", logtrace.Fields{
logtrace.FieldModule: "dht",
"nodes_count": len(nodes),
"len_resp": len(responses),
diff --git a/p2p/kademlia/fetch_and_store.go b/p2p/kademlia/fetch_and_store.go
index 9803bf3d..6344095d 100644
--- a/p2p/kademlia/fetch_and_store.go
+++ b/p2p/kademlia/fetch_and_store.go
@@ -26,12 +26,12 @@ const (
// FetchAndStore fetches all keys from the queries TODO replicate list, fetches value from respective nodes and stores them in the queries store
func (s *DHT) FetchAndStore(ctx context.Context) error {
- logtrace.Info(ctx, "Getting fetch and store keys", logtrace.Fields{})
+ logtrace.Debug(ctx, "Getting fetch and store keys", logtrace.Fields{})
keys, err := s.store.GetAllToDoRepKeys(failedKeysClosestContactsLookupCount+maxBatchAttempts+1, totalMaxAttempts)
if err != nil {
return fmt.Errorf("get all keys error: %w", err)
}
- logtrace.Info(ctx, "got keys from queries store", logtrace.Fields{"count": len(keys)})
+ logtrace.Debug(ctx, "got keys from queries store", logtrace.Fields{"count": len(keys)})
if len(keys) == 0 {
return nil
@@ -79,7 +79,7 @@ func (s *DHT) FetchAndStore(ctx context.Context) error {
return
}
- logtrace.Info(cctx, "iterate fetch for replication success", logtrace.Fields{"key": info.Key, "ip": info.IP})
+ logtrace.Debug(cctx, "iterate fetch for replication success", logtrace.Fields{"key": info.Key, "ip": info.IP})
}
if err := s.store.Store(cctx, sKey, value, 0, false); err != nil {
@@ -94,7 +94,7 @@ func (s *DHT) FetchAndStore(ctx context.Context) error {
atomic.AddInt32(&successCounter, 1) // Increment the counter atomically
- logtrace.Info(cctx, "fetch & store key success", logtrace.Fields{"key": info.Key, "ip": info.IP})
+ logtrace.Debug(cctx, "fetch & store key success", logtrace.Fields{"key": info.Key, "ip": info.IP})
}(key)
time.Sleep(100 * time.Millisecond)
@@ -102,7 +102,7 @@ func (s *DHT) FetchAndStore(ctx context.Context) error {
//wg.Wait()
- logtrace.Info(ctx, "Successfully fetched & stored keys", logtrace.Fields{"todo-keys": len(keys), "successfully-added-keys": atomic.LoadInt32(&successCounter)}) // Log the final count
+ logtrace.Debug(ctx, "Successfully fetched & stored keys", logtrace.Fields{"todo-keys": len(keys), "successfully-added-keys": atomic.LoadInt32(&successCounter)}) // Log the final count
return nil
}
@@ -114,7 +114,7 @@ func (s *DHT) BatchFetchAndStoreFailedKeys(ctx context.Context) error {
if err != nil {
return fmt.Errorf("get all keys error: %w", err)
}
- logtrace.Info(ctx, "read failed keys from store", logtrace.Fields{"count": len(keys)})
+ logtrace.Debug(ctx, "read failed keys from store", logtrace.Fields{"count": len(keys)})
if len(keys) == 0 {
return nil
@@ -143,7 +143,7 @@ func (s *DHT) BatchFetchAndStoreFailedKeys(ctx context.Context) error {
repKeys = append(repKeys, repKey)
}
}
- logtrace.Info(ctx, "got 2nd tier replication keys from queries store", logtrace.Fields{"count": len(repKeys)})
+ logtrace.Debug(ctx, "got 2nd tier replication keys from queries store", logtrace.Fields{"count": len(repKeys)})
if err := s.GroupAndBatchFetch(ctx, repKeys, 0, false); err != nil {
logtrace.Error(ctx, "group and batch fetch failed-keys error", logtrace.Fields{logtrace.FieldError: err})
@@ -160,7 +160,7 @@ func (s *DHT) BatchFetchAndStore(ctx context.Context) error {
if err != nil {
return fmt.Errorf("get all keys error: %w", err)
}
- logtrace.Info(ctx, "got batch todo rep-keys from queries store", logtrace.Fields{"count": len(keys)})
+ logtrace.Debug(ctx, "got batch todo rep-keys from queries store", logtrace.Fields{"count": len(keys)})
if len(keys) == 0 {
return nil
@@ -213,12 +213,12 @@ func (s *DHT) GroupAndBatchFetch(ctx context.Context, repKeys []domain.ToRepKey,
totalKeysFound := 0
for len(stringKeys) > 0 && iterations < maxSingleBatchIterations {
iterations++
- logtrace.Info(ctx, "fetching batch values from node", logtrace.Fields{"node-ip": node.IP, "count": len(stringKeys), "keys[0]": stringKeys[0], "keys[len()]": stringKeys[len(stringKeys)-1]})
+ logtrace.Debug(ctx, "fetching batch values from node", logtrace.Fields{"node-ip": node.IP, "count": len(stringKeys), "keys[0]": stringKeys[0], "keys[len()]": stringKeys[len(stringKeys)-1]})
isDone, retMap, failedKeys, err := s.GetBatchValuesFromNode(ctx, stringKeys, node)
if err != nil {
// Log the error but don't stop the process, continue to the next node
- logtrace.Info(ctx, "failed to get batch values", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err})
+ logtrace.Debug(ctx, "failed to get batch values", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err})
continue
}
@@ -238,7 +238,7 @@ func (s *DHT) GroupAndBatchFetch(ctx context.Context, repKeys []domain.ToRepKey,
err = s.store.StoreBatch(ctx, response, datatype, isOriginal)
if err != nil {
// Log the error but don't stop the process, continue to the next node
- logtrace.Info(ctx, "failed to store batch values", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err})
+ logtrace.Debug(ctx, "failed to store batch values", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err})
continue
}
@@ -246,7 +246,7 @@ func (s *DHT) GroupAndBatchFetch(ctx context.Context, repKeys []domain.ToRepKey,
err = s.store.BatchDeleteRepKeys(stringDelKeys)
if err != nil {
// Log the error but don't stop the process, continue to the next node
- logtrace.Info(ctx, "failed to delete rep keys", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err})
+ logtrace.Debug(ctx, "failed to delete rep keys", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err})
continue
}
} else {
@@ -255,7 +255,7 @@ func (s *DHT) GroupAndBatchFetch(ctx context.Context, repKeys []domain.ToRepKey,
if isDone && len(failedKeys) > 0 {
if err := s.store.IncrementAttempts(failedKeys); err != nil {
- logtrace.Info(ctx, "failed to increment attempts", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err})
+ logtrace.Debug(ctx, "failed to increment attempts", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err})
// not adding 'continue' here because we want to delete the keys from the todo list
}
} else if isDone {
@@ -265,7 +265,7 @@ func (s *DHT) GroupAndBatchFetch(ctx context.Context, repKeys []domain.ToRepKey,
}
}
- logtrace.Info(ctx, "fetch batch values from node successfully", logtrace.Fields{"node-ip": node.IP, "count": totalKeysFound, "iterations": iterations})
+ logtrace.Debug(ctx, "fetch batch values from node successfully", logtrace.Fields{"node-ip": node.IP, "count": totalKeysFound, "iterations": iterations})
}
}
@@ -274,7 +274,9 @@ func (s *DHT) GroupAndBatchFetch(ctx context.Context, repKeys []domain.ToRepKey,
// GetBatchValuesFromNode get values from node in bateches
func (s *DHT) GetBatchValuesFromNode(ctx context.Context, keys []string, n *Node) (bool, map[string][]byte, []string, error) {
- logtrace.Info(ctx, "sending batch fetch request", logtrace.Fields{"node-ip": n.IP, "keys": len(keys)})
+ logtrace.Debug(ctx, "sending batch fetch request", logtrace.Fields{"node-ip": n.IP, "keys": len(keys)})
+ // Minimal per-RPC visibility for background replication path
+ logtrace.Debug(ctx, "RPC BatchFindValues send", logtrace.Fields{"node": n.String(), "keys": len(keys)})
messageType := BatchFindValues
@@ -347,8 +349,9 @@ func (s *DHT) GetBatchValuesFromNode(ctx context.Context, keys []string, n *Node
if err != nil {
return isDone, nil, nil, fmt.Errorf("failed to verify and filter data: %w", err)
}
- logtrace.Info(ctx, "batch fetch response rcvd and keys verified", logtrace.Fields{"node-ip": n.IP, "received-keys": len(decompressedMap), "verified-keys": len(retMap), "failed-keys": len(failedKeys)})
+ logtrace.Debug(ctx, "batch fetch response rcvd and keys verified", logtrace.Fields{"node-ip": n.IP, "received-keys": len(decompressedMap), "verified-keys": len(retMap), "failed-keys": len(failedKeys)})
+ logtrace.Debug(ctx, "RPC BatchFindValues completed", logtrace.Fields{"node": n.String(), "received_keys": len(decompressedMap), "verified_keys": len(retMap)})
return v.Done, retMap, failedKeys, nil
}
diff --git a/p2p/kademlia/message.go b/p2p/kademlia/message.go
index 0baef37c..4f778d1f 100644
--- a/p2p/kademlia/message.go
+++ b/p2p/kademlia/message.go
@@ -66,6 +66,11 @@ type Message struct {
Receiver *Node // the receiver node
MessageType int // the message type
Data interface{} // the real data for the request
+ // CorrelationID carries a best-effort trace identifier so that logs
+ // across nodes can be joined in external systems.
+ CorrelationID string
+ // Origin carries the phase that produced this message (first_pass | worker | download)
+ Origin string
}
func (m *Message) String() string {
diff --git a/p2p/kademlia/network.go b/p2p/kademlia/network.go
index a2322ff7..e4ab76e5 100644
--- a/p2p/kademlia/network.go
+++ b/p2p/kademlia/network.go
@@ -2,7 +2,6 @@ package kademlia
import (
"context"
- "encoding/hex"
"fmt"
"io"
"net"
@@ -70,12 +69,6 @@ type Network struct {
metrics sync.Map
- // recent request tracking (last 10 entries overall and per IP)
- recentMu sync.Mutex
- recentStoreOverall []RecentBatchStoreEntry
- recentStoreByIP map[string][]RecentBatchStoreEntry
- recentRetrieveOverall []RecentBatchRetrieveEntry
- recentRetrieveByIP map[string][]RecentBatchRetrieveEntry
}
// NewNetwork returns a network service
@@ -197,7 +190,7 @@ func (s *Network) handleFindValue(ctx context.Context, message *Message) (res []
request, ok := message.Data.(*FindValueRequest)
if !ok {
err := errors.New("invalid FindValueRequest")
- return s.generateResponseMessage(FindValue, message.Sender, ResultFailed, err.Error())
+ return s.generateResponseMessage(ctx, FindValue, message.Sender, ResultFailed, err.Error())
}
// add the sender to queries hash table
@@ -252,7 +245,7 @@ func (s *Network) handleStoreData(ctx context.Context, message *Message) (res []
request, ok := message.Data.(*StoreDataRequest)
if !ok {
err := errors.New("invalid StoreDataRequest")
- return s.generateResponseMessage(StoreData, message.Sender, ResultFailed, err.Error())
+ return s.generateResponseMessage(ctx, StoreData, message.Sender, ResultFailed, err.Error())
}
logtrace.Debug(ctx, "Handle store data", logtrace.Fields{logtrace.FieldModule: "p2p", "message": message.String()})
@@ -268,7 +261,7 @@ func (s *Network) handleStoreData(ctx context.Context, message *Message) (res []
// store the data to queries storage
if err := s.dht.store.Store(ctx, key, request.Data, request.Type, false); err != nil {
err = errors.Errorf("store the data: %w", err)
- return s.generateResponseMessage(StoreData, message.Sender, ResultFailed, err.Error())
+ return s.generateResponseMessage(ctx, StoreData, message.Sender, ResultFailed, err.Error())
}
}
@@ -293,13 +286,13 @@ func (s *Network) handleReplicate(ctx context.Context, message *Message) (res []
request, ok := message.Data.(*ReplicateDataRequest)
if !ok {
err := errors.New("invalid ReplicateDataRequest")
- return s.generateResponseMessage(Replicate, message.Sender, ResultFailed, err.Error())
+ return s.generateResponseMessage(ctx, Replicate, message.Sender, ResultFailed, err.Error())
}
logtrace.Debug(ctx, "Handle replicate data", logtrace.Fields{logtrace.FieldModule: "p2p", "message": message.String()})
if err := s.handleReplicateRequest(ctx, request, message.Sender.ID, message.Sender.IP, message.Sender.Port); err != nil {
- return s.generateResponseMessage(Replicate, message.Sender, ResultFailed, err.Error())
+ return s.generateResponseMessage(ctx, Replicate, message.Sender, ResultFailed, err.Error())
}
response := &ReplicateDataResponse{
@@ -337,7 +330,7 @@ func (s *Network) handleReplicateRequest(ctx context.Context, req *ReplicateData
return fmt.Errorf("unable to store batch replication keys: %w", err)
}
- logtrace.Info(ctx, "Store batch replication keys stored", logtrace.Fields{
+ logtrace.Debug(ctx, "Store batch replication keys stored", logtrace.Fields{
logtrace.FieldModule: "p2p",
"to-store-keys": len(keysToStore),
"rcvd-keys": len(req.Keys),
@@ -348,7 +341,7 @@ func (s *Network) handleReplicateRequest(ctx context.Context, req *ReplicateData
return nil
}
-func (s *Network) handlePing(_ context.Context, message *Message) ([]byte, error) {
+func (s *Network) handlePing(ctx context.Context, message *Message) ([]byte, error) {
// new a response message
resMsg := s.dht.newMessage(Ping, message.Sender, nil)
@@ -413,6 +406,16 @@ func (s *Network) handleConn(ctx context.Context, rawConn net.Conn) {
})
return
}
+ // stitch correlation + origin into context for downstream handler logs
+ if request != nil {
+ if s := strings.TrimSpace(request.CorrelationID); s != "" {
+ ctx = logtrace.CtxWithCorrelationID(ctx, s)
+ }
+ if o := strings.TrimSpace(request.Origin); o != "" {
+ ctx = logtrace.CtxWithOrigin(ctx, o)
+ }
+ }
+
reqID := uuid.New().String()
mt := request.MessageType
@@ -593,6 +596,33 @@ func (s *Network) Call(ctx context.Context, request *Message, isLong bool) (*Mes
// pool key: bech32@ip:port (bech32 identity is your invariant)
idStr := string(request.Receiver.ID)
remoteAddr := fmt.Sprintf("%s@%s:%d", idStr, strings.TrimSpace(request.Receiver.IP), request.Receiver.Port)
+ // Log raw RPC start (reduce noise: Info only for high-signal messages)
+ startFields := logtrace.Fields{
+ logtrace.FieldModule: "p2p",
+ "remote": remoteAddr,
+ "message": msgName(request.MessageType),
+ "timeout_ms": int64(timeout / time.Millisecond),
+ }
+ // Tag role/origin for filtering
+ startFields[logtrace.FieldRole] = "client"
+ if o := logtrace.OriginFromContext(ctx); o != "" {
+ startFields[logtrace.FieldOrigin] = o
+ }
+ if isHighSignalMsg(request.MessageType) {
+ logtrace.Info(ctx, fmt.Sprintf("RPC %s start remote=%s timeout_ms=%d", msgName(request.MessageType), remoteAddr, int64(timeout/time.Millisecond)), startFields)
+ } else {
+ logtrace.Debug(ctx, fmt.Sprintf("RPC %s start remote=%s timeout_ms=%d", msgName(request.MessageType), remoteAddr, int64(timeout/time.Millisecond)), startFields)
+ }
+
+ // Attach correlation id only for high‑signal messages (store/retrieve batches)
+ if isHighSignalMsg(request.MessageType) {
+ if cid := logtrace.CorrelationIDFromContext(ctx); cid != "unknown" {
+ request.CorrelationID = cid
+ }
+ if o := logtrace.OriginFromContext(ctx); o != "" {
+ request.Origin = o
+ }
+ }
// try get from pool
s.connPoolMtx.Lock()
@@ -634,6 +664,7 @@ func (s *Network) Call(ctx context.Context, request *Message, isLong bool) (*Mes
// ---- retryable RPC helpers -------------------------------------------------
func (s *Network) rpcOnceWrapper(ctx context.Context, cw *connWrapper, remoteAddr string, data []byte, timeout time.Duration, msgType int) (*Message, error) {
+ start := time.Now()
writeDL := calcWriteDeadline(timeout, len(data), 1.0) // target ~1 MB/s
retried := false
@@ -650,7 +681,7 @@ func (s *Network) rpcOnceWrapper(ctx context.Context, cw *connWrapper, remoteAdd
if _, e := cw.secureConn.Write(data); e != nil {
cw.mtx.Unlock()
if isStaleConnError(e) && !retried {
- logtrace.Info(ctx, "Stale pooled connection on write; redialing", logtrace.Fields{
+ logtrace.Debug(ctx, "Stale pooled connection on write; redialing", logtrace.Fields{
logtrace.FieldModule: "p2p",
"remote": remoteAddr,
"message_type": msgType,
@@ -691,7 +722,7 @@ func (s *Network) rpcOnceWrapper(ctx context.Context, cw *connWrapper, remoteAdd
cw.mtx.Unlock()
if e != nil {
if isStaleConnError(e) && !retried {
- logtrace.Info(ctx, "Stale pooled connection on read; redialing", logtrace.Fields{
+ logtrace.Debug(ctx, "Stale pooled connection on read; redialing", logtrace.Fields{
logtrace.FieldModule: "p2p",
"remote": remoteAddr,
"message_type": msgType,
@@ -718,11 +749,20 @@ func (s *Network) rpcOnceWrapper(ctx context.Context, cw *connWrapper, remoteAdd
s.dropFromPool(remoteAddr, cw)
return nil, errors.Errorf("conn read: %w", e)
}
+ // Single-line completion for successful outbound RPC
+ if isHighSignalMsg(msgType) {
+ f := logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"}
+ if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o }
+ logtrace.Info(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), f)
+ } else {
+ logtrace.Debug(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"})
+ }
return r, nil
}
}
func (s *Network) rpcOnceNonWrapper(ctx context.Context, conn net.Conn, remoteAddr string, data []byte, timeout time.Duration, msgType int) (*Message, error) {
+ start := time.Now()
sizeMB := float64(len(data)) / (1024.0 * 1024.0) // data is your gob-encoded message
throughputFloor := 8.0 // MB/s (~64 Mbps)
est := time.Duration(sizeMB / throughputFloor * float64(time.Second))
@@ -744,7 +784,7 @@ Retry:
}
if _, err := conn.Write(data); err != nil {
if isStaleConnError(err) && !retried {
- logtrace.Info(ctx, "Stale pooled connection on write; redialing", logtrace.Fields{
+ logtrace.Debug(ctx, "Stale pooled connection on write; redialing", logtrace.Fields{
logtrace.FieldModule: "p2p",
"remote": remoteAddr,
"message_type": msgType,
@@ -778,7 +818,7 @@ Retry:
_ = conn.SetDeadline(time.Time{})
if err != nil {
if isStaleConnError(err) && !retried {
- logtrace.Info(ctx, "Stale pooled connection on read; redialing", logtrace.Fields{
+ logtrace.Debug(ctx, "Stale pooled connection on read; redialing", logtrace.Fields{
logtrace.FieldModule: "p2p",
"remote": remoteAddr,
"message_type": msgType,
@@ -802,6 +842,13 @@ Retry:
s.dropFromPool(remoteAddr, conn)
return nil, errors.Errorf("conn read: %w", err)
}
+ if isHighSignalMsg(msgType) {
+ f := logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"}
+ if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o }
+ logtrace.Info(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), f)
+ } else {
+ logtrace.Debug(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"})
+ }
return resp, nil
}
@@ -842,16 +889,16 @@ func (s *Network) handleBatchFindValues(ctx context.Context, message *Message, r
// Try to acquire the semaphore, wait up to 1 minute
logtrace.Debug(ctx, "Attempting to acquire semaphore immediately", logtrace.Fields{logtrace.FieldModule: "p2p"})
if !s.sem.TryAcquire(1) {
- logtrace.Info(ctx, "Immediate acquisition failed. Waiting up to 1 minute", logtrace.Fields{logtrace.FieldModule: "p2p"})
+ logtrace.Debug(ctx, "Immediate acquisition failed. Waiting up to 1 minute", logtrace.Fields{logtrace.FieldModule: "p2p"})
ctxWithTimeout, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
if err := s.sem.Acquire(ctxWithTimeout, 1); err != nil {
logtrace.Error(ctx, "Failed to acquire semaphore within 1 minute", logtrace.Fields{logtrace.FieldModule: "p2p"})
// failed to acquire semaphore within 1 minute
- return s.generateResponseMessage(BatchFindValues, message.Sender, ResultFailed, errorBusy)
+ return s.generateResponseMessage(ctx, BatchFindValues, message.Sender, ResultFailed, errorBusy)
}
- logtrace.Info(ctx, "Semaphore acquired after waiting", logtrace.Fields{logtrace.FieldModule: "p2p"})
+ logtrace.Debug(ctx, "Semaphore acquired after waiting", logtrace.Fields{logtrace.FieldModule: "p2p"})
}
// Add a defer function to recover from panic
@@ -875,18 +922,18 @@ func (s *Network) handleBatchFindValues(ctx context.Context, message *Message, r
err = errors.New("unknown error")
}
- res, _ = s.generateResponseMessage(BatchFindValues, message.Sender, ResultFailed, err.Error())
+ res, _ = s.generateResponseMessage(ctx, BatchFindValues, message.Sender, ResultFailed, err.Error())
}
}()
request, ok := message.Data.(*BatchFindValuesRequest)
if !ok {
- return s.generateResponseMessage(BatchFindValues, message.Sender, ResultFailed, "invalid BatchFindValueRequest")
+ return s.generateResponseMessage(ctx, BatchFindValues, message.Sender, ResultFailed, "invalid BatchFindValueRequest")
}
isDone, data, err := s.handleBatchFindValuesRequest(ctx, request, message.Sender.IP, reqID)
if err != nil {
- return s.generateResponseMessage(BatchFindValues, message.Sender, ResultFailed, err.Error())
+ return s.generateResponseMessage(ctx, BatchFindValues, message.Sender, ResultFailed, err.Error())
}
response := &BatchFindValuesResponse{
@@ -898,46 +945,24 @@ func (s *Network) handleBatchFindValues(ctx context.Context, message *Message, r
}
resMsg := s.dht.newMessage(BatchFindValues, message.Sender, response)
+ resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx)
return s.encodeMesage(resMsg)
}
func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, reqID string) (res []byte, err error) {
- start := time.Now()
- appended := false
- defer func() {
- if response, err := s.handlePanic(ctx, message.Sender, BatchGetValues); response != nil || err != nil {
- res = response
- if !appended {
- s.appendRetrieveEntry(message.Sender.IP, RecentBatchRetrieveEntry{
- TimeUnix: time.Now().UTC().Unix(),
- SenderID: string(message.Sender.ID),
- SenderIP: message.Sender.IP,
- Requested: 0,
- Found: 0,
- DurationMS: time.Since(start).Milliseconds(),
- Error: "panic/recovered",
- })
- }
- }
- }()
-
- request, ok := message.Data.(*BatchGetValuesRequest)
- if !ok {
- err := errors.New("invalid BatchGetValuesRequest")
- s.appendRetrieveEntry(message.Sender.IP, RecentBatchRetrieveEntry{
- TimeUnix: time.Now().UTC().Unix(),
- SenderID: string(message.Sender.ID),
- SenderIP: message.Sender.IP,
- Requested: 0,
- Found: 0,
- DurationMS: time.Since(start).Milliseconds(),
- Error: err.Error(),
- })
- appended = true
- return s.generateResponseMessage(BatchGetValues, message.Sender, ResultFailed, err.Error())
- }
-
- logtrace.Info(ctx, "Batch get values request received", logtrace.Fields{
+ defer func() {
+ if response, err := s.handlePanic(ctx, message.Sender, BatchGetValues); response != nil || err != nil {
+ res = response
+ }
+ }()
+
+ request, ok := message.Data.(*BatchGetValuesRequest)
+ if !ok {
+ err := errors.New("invalid BatchGetValuesRequest")
+ return s.generateResponseMessage(ctx, BatchGetValues, message.Sender, ResultFailed, err.Error())
+ }
+
+ logtrace.Debug(ctx, "Batch get values request received", logtrace.Fields{
logtrace.FieldModule: "p2p",
"from": message.Sender.String(),
})
@@ -951,42 +976,22 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message,
i++
}
- values, count, err := s.dht.store.RetrieveBatchValues(ctx, keys, true)
- if err != nil {
- err = errors.Errorf("batch find values: %w", err)
- s.appendRetrieveEntry(message.Sender.IP, RecentBatchRetrieveEntry{
- TimeUnix: time.Now().UTC().Unix(),
- SenderID: string(message.Sender.ID),
- SenderIP: message.Sender.IP,
- Requested: len(keys),
- Found: count,
- DurationMS: time.Since(start).Milliseconds(),
- Error: err.Error(),
- })
- appended = true
- return s.generateResponseMessage(BatchGetValues, message.Sender, ResultFailed, err.Error())
- }
+ values, count, err := s.dht.store.RetrieveBatchValues(ctx, keys, false)
+ if err != nil {
+ err = errors.Errorf("batch find values: %w", err)
+ return s.generateResponseMessage(ctx, BatchGetValues, message.Sender, ResultFailed, err.Error())
+ }
- logtrace.Info(ctx, "Batch get values request processed", logtrace.Fields{
- logtrace.FieldModule: "p2p",
- "requested-keys": len(keys),
- "found": count,
- "sender": message.Sender.String(),
- })
+ {
+ f := logtrace.Fields{logtrace.FieldModule: "p2p", "requested-keys": len(keys), "found": count, "sender": message.Sender.String(), logtrace.FieldRole: "server"}
+ if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o }
+ logtrace.Info(ctx, "network: batch get values ok", f)
+ }
for i, key := range keys {
val := KeyValWithClosest{
- Value: values[i],
- }
- if len(val.Value) == 0 {
- decodedKey, err := hex.DecodeString(keys[i])
- if err != nil {
- err = errors.Errorf("batch find vals: decode key: %w - key %s", err, keys[i])
- return s.generateResponseMessage(BatchGetValues, message.Sender, ResultFailed, err.Error())
- }
-
- nodes, _ := s.dht.ht.closestContacts(Alpha, decodedKey, []*Node{message.Sender})
- val.Closest = nodes.Nodes
+ Value: values[i],
+ Closest: make([]*Node, 0), // for compatibility, not used - each node now has full view of the whole network
}
request.Data[key] = val
@@ -1000,23 +1005,14 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message,
}
// new a response message
- resMsg := s.dht.newMessage(BatchGetValues, message.Sender, response)
- s.appendRetrieveEntry(message.Sender.IP, RecentBatchRetrieveEntry{
- TimeUnix: time.Now().UTC().Unix(),
- SenderID: string(message.Sender.ID),
- SenderIP: message.Sender.IP,
- Requested: len(keys),
- Found: count,
- DurationMS: time.Since(start).Milliseconds(),
- Error: "",
- })
- appended = true
- return s.encodeMesage(resMsg)
+ resMsg := s.dht.newMessage(BatchGetValues, message.Sender, response)
+ resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx)
+ return s.encodeMesage(resMsg)
}
func (s *Network) handleBatchFindValuesRequest(ctx context.Context, req *BatchFindValuesRequest, ip string, reqID string) (isDone bool, compressedData []byte, err error) {
// log.WithContext(ctx).WithField("p2p-req-id", reqID).WithField("keys", len(req.Keys)).WithField("from-ip", ip).Info("batch find values request received")
- logtrace.Info(ctx, "Batch find values request received", logtrace.Fields{
+ logtrace.Debug(ctx, "Batch find values request received", logtrace.Fields{
logtrace.FieldModule: "p2p",
"from": ip,
"keys": len(req.Keys),
@@ -1039,7 +1035,7 @@ func (s *Network) handleBatchFindValuesRequest(ctx context.Context, req *BatchFi
return false, nil, fmt.Errorf("failed to retrieve batch values: %w", err)
}
// log.WithContext(ctx).WithField("p2p-req-id", reqID).WithField("values-len", len(values)).WithField("found", count).WithField("from-ip", ip).Info("batch find values request processed")
- logtrace.Info(ctx, "Batch find values request processed", logtrace.Fields{
+ logtrace.Debug(ctx, "Batch find values request processed", logtrace.Fields{
logtrace.FieldModule: "p2p",
"p2p-req-id": reqID,
"values-len": len(values),
@@ -1054,7 +1050,7 @@ func (s *Network) handleBatchFindValuesRequest(ctx context.Context, req *BatchFi
// log.WithContext(ctx).WithField("p2p-req-id", reqID).WithField("compressed-data-len", utils.BytesToMB(uint64(len(compressedData)))).WithField("found", count).
// WithField("from-ip", ip).Info("batch find values response sent")
- logtrace.Info(ctx, "Batch find values response sent", logtrace.Fields{
+ logtrace.Debug(ctx, "Batch find values response sent", logtrace.Fields{
logtrace.FieldModule: "p2p",
"p2p-req-id": reqID,
"compressed-data-len": utils.BytesToMB(uint64(len(compressedData))),
@@ -1182,65 +1178,32 @@ func findTopHeaviestKeys(dataMap map[string][]byte, size int) (int, []string) {
}
func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (res []byte, err error) {
- start := time.Now()
- appended := false
- defer func() {
- if response, err := s.handlePanic(ctx, message.Sender, BatchStoreData); response != nil || err != nil {
- res = response
- if !appended {
- s.appendStoreEntry(message.Sender.IP, RecentBatchStoreEntry{
- TimeUnix: time.Now().UTC().Unix(),
- SenderID: string(message.Sender.ID),
- SenderIP: message.Sender.IP,
- Keys: 0,
- DurationMS: time.Since(start).Milliseconds(),
- OK: false,
- Error: "panic/recovered",
- })
- }
- }
- }()
-
- request, ok := message.Data.(*BatchStoreDataRequest)
- if !ok {
- err := errors.New("invalid BatchStoreDataRequest")
- s.appendStoreEntry(message.Sender.IP, RecentBatchStoreEntry{
- TimeUnix: time.Now().UTC().Unix(),
- SenderID: string(message.Sender.ID),
- SenderIP: message.Sender.IP,
- Keys: 0,
- DurationMS: time.Since(start).Milliseconds(),
- OK: false,
- Error: err.Error(),
- })
- appended = true
- return s.generateResponseMessage(BatchStoreData, message.Sender, ResultFailed, err.Error())
- }
+ defer func() {
+ if response, err := s.handlePanic(ctx, message.Sender, BatchStoreData); response != nil || err != nil {
+ res = response
+ }
+ }()
+
+ request, ok := message.Data.(*BatchStoreDataRequest)
+ if !ok {
+ err := errors.New("invalid BatchStoreDataRequest")
+ return s.generateResponseMessage(ctx, BatchStoreData, message.Sender, ResultFailed, err.Error())
+ }
// log.P2P().WithContext(ctx).Info("handle batch store data request received")
- logtrace.Info(ctx, "Handle batch store data request received", logtrace.Fields{
- logtrace.FieldModule: "p2p",
- "sender": message.Sender.String(),
- "keys": len(request.Data),
- })
+ {
+ f := logtrace.Fields{logtrace.FieldModule: "p2p", "sender": message.Sender.String(), "keys": len(request.Data), logtrace.FieldRole: "server"}
+ if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o }
+ logtrace.Info(ctx, "network: batch store recv", f)
+ }
// add the sender to queries hash table
s.dht.addNode(ctx, message.Sender)
- if err := s.dht.store.StoreBatch(ctx, request.Data, 1, false); err != nil {
- err = errors.Errorf("batch store the data: %w", err)
- s.appendStoreEntry(message.Sender.IP, RecentBatchStoreEntry{
- TimeUnix: time.Now().UTC().Unix(),
- SenderID: string(message.Sender.ID),
- SenderIP: message.Sender.IP,
- Keys: len(request.Data),
- DurationMS: time.Since(start).Milliseconds(),
- OK: false,
- Error: err.Error(),
- })
- appended = true
- return s.generateResponseMessage(BatchStoreData, message.Sender, ResultFailed, err.Error())
- }
+ if err := s.dht.store.StoreBatch(ctx, request.Data, 1, false); err != nil {
+ err = errors.Errorf("batch store the data: %w", err)
+ return s.generateResponseMessage(ctx, BatchStoreData, message.Sender, ResultFailed, err.Error())
+ }
response := &StoreDataResponse{
Status: ResponseStatus{
@@ -1248,25 +1211,16 @@ func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (r
},
}
// log.P2P().WithContext(ctx).Info("handle batch store data request processed")
- logtrace.Info(ctx, "Handle batch store data request processed", logtrace.Fields{
- logtrace.FieldModule: "p2p",
- "sender": message.Sender.String(),
- "keys": len(request.Data),
- })
+ {
+ f := logtrace.Fields{logtrace.FieldModule: "p2p", "sender": message.Sender.String(), "keys": len(request.Data), logtrace.FieldRole: "server"}
+ if o := logtrace.OriginFromContext(ctx); o != "" { f[logtrace.FieldOrigin] = o }
+ logtrace.Info(ctx, "network: batch store ok", f)
+ }
// new a response message
- resMsg := s.dht.newMessage(BatchStoreData, message.Sender, response)
- s.appendStoreEntry(message.Sender.IP, RecentBatchStoreEntry{
- TimeUnix: time.Now().UTC().Unix(),
- SenderID: string(message.Sender.ID),
- SenderIP: message.Sender.IP,
- Keys: len(request.Data),
- DurationMS: time.Since(start).Milliseconds(),
- OK: true,
- Error: "",
- })
- appended = true
- return s.encodeMesage(resMsg)
+ resMsg := s.dht.newMessage(BatchStoreData, message.Sender, response)
+ resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx)
+ return s.encodeMesage(resMsg)
}
func (s *Network) handleBatchFindNode(ctx context.Context, message *Message) (res []byte, err error) {
@@ -1279,7 +1233,7 @@ func (s *Network) handleBatchFindNode(ctx context.Context, message *Message) (re
request, ok := message.Data.(*BatchFindNodeRequest)
if !ok {
err := errors.New("invalid FindNodeRequest")
- return s.generateResponseMessage(BatchFindNode, message.Sender, ResultFailed, err.Error())
+ return s.generateResponseMessage(ctx, BatchFindNode, message.Sender, ResultFailed, err.Error())
}
// add the sender to queries hash table
@@ -1293,7 +1247,7 @@ func (s *Network) handleBatchFindNode(ctx context.Context, message *Message) (re
closestMap := make(map[string][]*Node)
// log.WithContext(ctx).WithField("sender", message.Sender.String()).Info("Batch Find Nodes Request Received")
- logtrace.Info(ctx, "Batch Find Nodes Request Received", logtrace.Fields{
+ logtrace.Debug(ctx, "Batch Find Nodes Request Received", logtrace.Fields{
logtrace.FieldModule: "p2p",
"sender": message.Sender.String(),
"hashed-targets": len(request.HashedTarget),
@@ -1304,7 +1258,7 @@ func (s *Network) handleBatchFindNode(ctx context.Context, message *Message) (re
}
response.ClosestNodes = closestMap
// log.WithContext(ctx).WithField("sender", message.Sender.String()).Info("Batch Find Nodes Request Processed")
- logtrace.Info(ctx, "Batch Find Nodes Request Processed", logtrace.Fields{
+ logtrace.Debug(ctx, "Batch Find Nodes Request Processed", logtrace.Fields{
logtrace.FieldModule: "p2p",
"sender": message.Sender.String(),
})
@@ -1314,7 +1268,7 @@ func (s *Network) handleBatchFindNode(ctx context.Context, message *Message) (re
return s.encodeMesage(resMsg)
}
-func (s *Network) generateResponseMessage(messageType int, receiver *Node, result ResultType, errMsg string) ([]byte, error) {
+func (s *Network) generateResponseMessage(ctx context.Context, messageType int, receiver *Node, result ResultType, errMsg string) ([]byte, error) {
responseStatus := ResponseStatus{
Result: result,
ErrMsg: errMsg,
@@ -1342,6 +1296,10 @@ func (s *Network) generateResponseMessage(messageType int, receiver *Node, resul
}
resMsg := s.dht.newMessage(messageType, receiver, response)
+ // propagate correlation id on responses too, but only for high‑signal messages
+ if isHighSignalMsg(messageType) {
+ resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx)
+ }
return s.encodeMesage(resMsg)
}
@@ -1363,7 +1321,7 @@ func (s *Network) handlePanic(ctx context.Context, sender *Node, messageType int
err = errors.New("unknown error")
}
- if res, err := s.generateResponseMessage(messageType, sender, ResultFailed, err.Error()); err != nil {
+ if res, err := s.generateResponseMessage(ctx, messageType, sender, ResultFailed, err.Error()); err != nil {
// log.WithContext(ctx).Errorf("Error generating response message: %v", err)
logtrace.Error(ctx, "Error generating response message", logtrace.Fields{
logtrace.FieldModule: "p2p",
@@ -1478,6 +1436,18 @@ func msgName(t int) string {
}
}
+// isHighSignalMsg returns true for message types that are heavy and relevant
+// to artefact store/retrieve visibility. Lightweight chatter like Ping or
+// FindNode is excluded to avoid log noise at Info level.
+func isHighSignalMsg(t int) bool {
+ switch t {
+ case BatchStoreData, BatchGetValues, BatchFindValues:
+ return true
+ default:
+ return false
+ }
+}
+
func (s *Network) HandleMetricsSnapshot() map[string]HandleCounters {
out := make(map[string]HandleCounters)
s.metrics.Range(func(k, v any) bool {
diff --git a/p2p/kademlia/node_activity.go b/p2p/kademlia/node_activity.go
index cc7089d6..88e09f7a 100644
--- a/p2p/kademlia/node_activity.go
+++ b/p2p/kademlia/node_activity.go
@@ -25,7 +25,7 @@ func (s *DHT) checkNodeActivity(ctx context.Context) {
return
case <-ticker.C:
if !utils.CheckInternetConnectivity() {
- logtrace.Info(ctx, "no internet connectivity, not checking node activity", logtrace.Fields{})
+ logtrace.Debug(ctx, "no internet connectivity, not checking node activity", logtrace.Fields{})
continue
}
@@ -115,7 +115,7 @@ func (s *DHT) handlePingSuccess(ctx context.Context, wasActive bool, n *Node) {
s.ignorelist.Delete(n)
if !wasActive {
- logtrace.Info(ctx, "node found to be active again", logtrace.Fields{
+ logtrace.Debug(ctx, "node found to be active again", logtrace.Fields{
logtrace.FieldModule: "p2p",
"ip": n.IP,
"node_id": string(n.ID),
diff --git a/p2p/kademlia/recent.go b/p2p/kademlia/recent.go
deleted file mode 100644
index 2467cf02..00000000
--- a/p2p/kademlia/recent.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package kademlia
-
-import (
- "sync"
- "time"
-)
-
-// RecentBatchStoreEntry captures a handled BatchStoreData request outcome
-type RecentBatchStoreEntry struct {
- TimeUnix int64 `json:"time_unix"`
- SenderID string `json:"sender_id"`
- SenderIP string `json:"sender_ip"`
- Keys int `json:"keys"`
- DurationMS int64 `json:"duration_ms"`
- OK bool `json:"ok"`
- Error string `json:"error,omitempty"`
-}
-
-// RecentBatchRetrieveEntry captures a handled BatchGetValues request outcome
-type RecentBatchRetrieveEntry struct {
- TimeUnix int64 `json:"time_unix"`
- SenderID string `json:"sender_id"`
- SenderIP string `json:"sender_ip"`
- Requested int `json:"requested"`
- Found int `json:"found"`
- DurationMS int64 `json:"duration_ms"`
- Error string `json:"error,omitempty"`
-}
-
-func (s *Network) appendStoreEntry(ip string, e RecentBatchStoreEntry) {
- s.recentMu.Lock()
- defer s.recentMu.Unlock()
- if s.recentStoreByIP == nil {
- s.recentStoreByIP = make(map[string][]RecentBatchStoreEntry)
- }
- s.recentStoreOverall = append([]RecentBatchStoreEntry{e}, s.recentStoreOverall...)
- if len(s.recentStoreOverall) > 10 {
- s.recentStoreOverall = s.recentStoreOverall[:10]
- }
- lst := append([]RecentBatchStoreEntry{e}, s.recentStoreByIP[ip]...)
- if len(lst) > 10 {
- lst = lst[:10]
- }
- s.recentStoreByIP[ip] = lst
-}
-
-func (s *Network) appendRetrieveEntry(ip string, e RecentBatchRetrieveEntry) {
- s.recentMu.Lock()
- defer s.recentMu.Unlock()
- if s.recentRetrieveByIP == nil {
- s.recentRetrieveByIP = make(map[string][]RecentBatchRetrieveEntry)
- }
- s.recentRetrieveOverall = append([]RecentBatchRetrieveEntry{e}, s.recentRetrieveOverall...)
- if len(s.recentRetrieveOverall) > 10 {
- s.recentRetrieveOverall = s.recentRetrieveOverall[:10]
- }
- lst := append([]RecentBatchRetrieveEntry{e}, s.recentRetrieveByIP[ip]...)
- if len(lst) > 10 {
- lst = lst[:10]
- }
- s.recentRetrieveByIP[ip] = lst
-}
-
-// RecentBatchStoreSnapshot returns copies of recent store entries (overall and by IP)
-func (s *Network) RecentBatchStoreSnapshot() (overall []RecentBatchStoreEntry, byIP map[string][]RecentBatchStoreEntry) {
- s.recentMu.Lock()
- defer s.recentMu.Unlock()
- overall = append([]RecentBatchStoreEntry(nil), s.recentStoreOverall...)
- byIP = make(map[string][]RecentBatchStoreEntry, len(s.recentStoreByIP))
- for k, v := range s.recentStoreByIP {
- byIP[k] = append([]RecentBatchStoreEntry(nil), v...)
- }
- return
-}
-
-// RecentBatchRetrieveSnapshot returns copies of recent retrieve entries (overall and by IP)
-func (s *Network) RecentBatchRetrieveSnapshot() (overall []RecentBatchRetrieveEntry, byIP map[string][]RecentBatchRetrieveEntry) {
- s.recentMu.Lock()
- defer s.recentMu.Unlock()
- overall = append([]RecentBatchRetrieveEntry(nil), s.recentRetrieveOverall...)
- byIP = make(map[string][]RecentBatchRetrieveEntry, len(s.recentRetrieveByIP))
- for k, v := range s.recentRetrieveByIP {
- byIP[k] = append([]RecentBatchRetrieveEntry(nil), v...)
- }
- return
-}
-
-// helper to avoid unused import warning if needed
-var _ = time.Now
-var _ = sync.Mutex{}
diff --git a/p2p/kademlia/redundant_data.go b/p2p/kademlia/redundant_data.go
index bfe6947d..151269d1 100644
--- a/p2p/kademlia/redundant_data.go
+++ b/p2p/kademlia/redundant_data.go
@@ -13,7 +13,7 @@ import (
)
func (s *DHT) startDisabledKeysCleanupWorker(ctx context.Context) error {
- logtrace.Info(ctx, "disabled keys cleanup worker started", logtrace.Fields{logtrace.FieldModule: "p2p"})
+ logtrace.Debug(ctx, "disabled keys cleanup worker started", logtrace.Fields{logtrace.FieldModule: "p2p"})
for {
select {
@@ -50,7 +50,7 @@ func (s *DHT) cleanupDisabledKeys(ctx context.Context) error {
}
func (s *DHT) startCleanupRedundantDataWorker(ctx context.Context) {
- logtrace.Info(ctx, "redundant data cleanup worker started", logtrace.Fields{logtrace.FieldModule: "p2p"})
+ logtrace.Debug(ctx, "redundant data cleanup worker started", logtrace.Fields{logtrace.FieldModule: "p2p"})
for {
select {
@@ -66,7 +66,7 @@ func (s *DHT) startCleanupRedundantDataWorker(ctx context.Context) {
func (s *DHT) cleanupRedundantDataWorker(ctx context.Context) {
from := time.Now().AddDate(-5, 0, 0) // 5 years ago
- logtrace.Info(ctx, "getting all possible replication keys past five years", logtrace.Fields{logtrace.FieldModule: "p2p", "from": from})
+ logtrace.Debug(ctx, "getting all possible replication keys past five years", logtrace.Fields{logtrace.FieldModule: "p2p", "from": from})
to := time.Now().UTC()
replicationKeys := s.store.GetKeysForReplication(ctx, from, to)
@@ -88,7 +88,7 @@ func (s *DHT) cleanupRedundantDataWorker(ctx context.Context) {
removeKeys := make([]domain.DelKey, 0)
for key, closestContacts := range closestContactsMap {
if len(closestContacts) < Alpha {
- logtrace.Info(ctx, "not enough contacts to replicate", logtrace.Fields{logtrace.FieldModule: "p2p", "key": key, "closest contacts": closestContacts})
+ logtrace.Debug(ctx, "not enough contacts to replicate", logtrace.Fields{logtrace.FieldModule: "p2p", "key": key, "closest contacts": closestContacts})
continue
}
@@ -118,9 +118,9 @@ func (s *DHT) cleanupRedundantDataWorker(ctx context.Context) {
return
}
- logtrace.Info(ctx, "insert del keys success", logtrace.Fields{logtrace.FieldModule: "p2p", "count-del-keys": len(insertKeys)})
+ logtrace.Debug(ctx, "insert del keys success", logtrace.Fields{logtrace.FieldModule: "p2p", "count-del-keys": len(insertKeys)})
} else {
- logtrace.Info(ctx, "No redundant key found to be stored in the storage", logtrace.Fields{logtrace.FieldModule: "p2p"})
+ logtrace.Debug(ctx, "No redundant key found to be stored in the storage", logtrace.Fields{logtrace.FieldModule: "p2p"})
}
if len(removeKeys) > 0 {
@@ -133,7 +133,7 @@ func (s *DHT) cleanupRedundantDataWorker(ctx context.Context) {
}
func (s *DHT) startDeleteDataWorker(ctx context.Context) {
- logtrace.Info(ctx, "start delete data worker", logtrace.Fields{logtrace.FieldModule: "p2p"})
+ logtrace.Debug(ctx, "start delete data worker", logtrace.Fields{logtrace.FieldModule: "p2p"})
for {
select {
diff --git a/p2p/kademlia/replication.go b/p2p/kademlia/replication.go
index 5163fd0b..4a36c422 100644
--- a/p2p/kademlia/replication.go
+++ b/p2p/kademlia/replication.go
@@ -34,7 +34,7 @@ var (
// StartReplicationWorker starts replication
func (s *DHT) StartReplicationWorker(ctx context.Context) error {
- logtrace.Info(ctx, "replication worker started", logtrace.Fields{logtrace.FieldModule: "p2p"})
+ logtrace.Debug(ctx, "replication worker started", logtrace.Fields{logtrace.FieldModule: "p2p"})
go s.checkNodeActivity(ctx)
go s.StartBatchFetchAndStoreWorker(ctx)
@@ -54,7 +54,7 @@ func (s *DHT) StartReplicationWorker(ctx context.Context) error {
// StartBatchFetchAndStoreWorker starts replication
func (s *DHT) StartBatchFetchAndStoreWorker(ctx context.Context) error {
- logtrace.Info(ctx, "batch fetch and store worker started", logtrace.Fields{logtrace.FieldModule: "p2p"})
+ logtrace.Debug(ctx, "batch fetch and store worker started", logtrace.Fields{logtrace.FieldModule: "p2p"})
for {
select {
@@ -69,7 +69,7 @@ func (s *DHT) StartBatchFetchAndStoreWorker(ctx context.Context) error {
// StartFailedFetchAndStoreWorker starts replication
func (s *DHT) StartFailedFetchAndStoreWorker(ctx context.Context) error {
- logtrace.Info(ctx, "fetch and store worker started", logtrace.Fields{logtrace.FieldModule: "p2p"})
+ logtrace.Debug(ctx, "fetch and store worker started", logtrace.Fields{logtrace.FieldModule: "p2p"})
for {
select {
@@ -131,7 +131,7 @@ func (s *DHT) Replicate(ctx context.Context) {
historicStart = time.Now().UTC().Add(-24 * time.Hour * 180)
}
- logtrace.Info(ctx, "replicating data", logtrace.Fields{logtrace.FieldModule: "p2p", "historic-start": historicStart})
+ logtrace.Debug(ctx, "replicating data", logtrace.Fields{logtrace.FieldModule: "p2p", "historic-start": historicStart})
for i := 0; i < B; i++ {
if time.Since(s.ht.refreshTime(i)) > defaultRefreshTime {
@@ -150,7 +150,7 @@ func (s *DHT) Replicate(ctx context.Context) {
}
if len(repInfo) == 0 {
- logtrace.Info(ctx, "no replication info found", logtrace.Fields{logtrace.FieldModule: "p2p"})
+ logtrace.Debug(ctx, "no replication info found", logtrace.Fields{logtrace.FieldModule: "p2p"})
return
}
@@ -159,7 +159,7 @@ func (s *DHT) Replicate(ctx context.Context) {
from = *repInfo[0].LastReplicatedAt
}
- logtrace.Info(ctx, "getting all possible replication keys", logtrace.Fields{logtrace.FieldModule: "p2p", "from": from})
+ logtrace.Debug(ctx, "getting all possible replication keys", logtrace.Fields{logtrace.FieldModule: "p2p", "from": from})
to := time.Now().UTC()
replicationKeys := s.store.GetKeysForReplication(ctx, from, to)
@@ -199,7 +199,7 @@ func (s *DHT) Replicate(ctx context.Context) {
continue
}
countToSendKeys := len(replicationKeys) - idx
- logtrace.Info(ctx, "count of replication keys to be checked", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID), "len-rep-keys": countToSendKeys})
+ logtrace.Debug(ctx, "count of replication keys to be checked", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID), "len-rep-keys": countToSendKeys})
// Preallocate a slice with a capacity equal to the number of keys.
closestContactKeys := make([]string, 0, countToSendKeys)
@@ -212,13 +212,13 @@ func (s *DHT) Replicate(ctx context.Context) {
}
}
- logtrace.Info(ctx, "closest contact keys count", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID), "len-rep-keys": len(closestContactKeys)})
+ logtrace.Debug(ctx, "closest contact keys count", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID), "len-rep-keys": len(closestContactKeys)})
if len(closestContactKeys) == 0 {
if err := s.updateLastReplicated(ctx, info.ID, to); err != nil {
logtrace.Error(ctx, "replicate update lastReplicated failed", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID)})
} else {
- logtrace.Info(ctx, "no closest keys found - replicate update lastReplicated success", logtrace.Fields{logtrace.FieldModule: "p2p", "node": info.IP, "to": to.String(), "closest-contact-keys": 0})
+ logtrace.Debug(ctx, "no closest keys found - replicate update lastReplicated success", logtrace.Fields{logtrace.FieldModule: "p2p", "node": info.IP, "to": to.String(), "closest-contact-keys": 0})
}
continue
@@ -258,17 +258,17 @@ func (s *DHT) Replicate(ctx context.Context) {
if err := s.updateLastReplicated(ctx, info.ID, to); err != nil {
logtrace.Error(ctx, "replicate update lastReplicated failed", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID)})
} else {
- logtrace.Info(ctx, "replicate update lastReplicated success", logtrace.Fields{logtrace.FieldModule: "p2p", "node": info.IP, "to": to.String(), "expected-rep-keys": len(closestContactKeys)})
+ logtrace.Debug(ctx, "replicate update lastReplicated success", logtrace.Fields{logtrace.FieldModule: "p2p", "node": info.IP, "to": to.String(), "expected-rep-keys": len(closestContactKeys)})
}
}
- logtrace.Info(ctx, "Replication done", logtrace.Fields{logtrace.FieldModule: "p2p"})
+ logtrace.Debug(ctx, "Replication done", logtrace.Fields{logtrace.FieldModule: "p2p"})
}
func (s *DHT) adjustNodeKeys(ctx context.Context, from time.Time, info domain.NodeReplicationInfo) error {
replicationKeys := s.store.GetKeysForReplication(ctx, from, time.Now().UTC())
- logtrace.Info(ctx, "begin adjusting node keys process for offline node", logtrace.Fields{logtrace.FieldModule: "p2p", "offline-node-ip": info.IP, "offline-node-id": string(info.ID), "total-rep-keys": len(replicationKeys), "from": from.String()})
+ logtrace.Debug(ctx, "begin adjusting node keys process for offline node", logtrace.Fields{logtrace.FieldModule: "p2p", "offline-node-ip": info.IP, "offline-node-id": string(info.ID), "total-rep-keys": len(replicationKeys), "from": from.String()})
// prepare ignored nodes list but remove the node we are adjusting
// because we want to find if this node was supposed to hold this key
@@ -315,7 +315,7 @@ func (s *DHT) adjustNodeKeys(ctx context.Context, from time.Time, info domain.No
failureCount := 0
for nodeInfoKey, keys := range nodeKeysMap {
- logtrace.Info(ctx, "sending adjusted replication keys to node", logtrace.Fields{logtrace.FieldModule: "p2p", "offline-node-ip": info.IP, "offline-node-id": string(info.ID), "adjust-to-node": nodeInfoKey, "to-adjust-keys-len": len(keys)})
+ logtrace.Debug(ctx, "sending adjusted replication keys to node", logtrace.Fields{logtrace.FieldModule: "p2p", "offline-node-ip": info.IP, "offline-node-id": string(info.ID), "adjust-to-node": nodeInfoKey, "to-adjust-keys-len": len(keys)})
// Retrieve the node object from the key
node, err := getNodeFromKey(nodeInfoKey)
if err != nil {
@@ -370,14 +370,14 @@ func (s *DHT) adjustNodeKeys(ctx context.Context, from time.Time, info domain.No
return fmt.Errorf("replicate update isAdjusted failed: %v", err)
}
- logtrace.Info(ctx, "offline node was successfully adjusted", logtrace.Fields{logtrace.FieldModule: "p2p", "offline-node-ip": info.IP, "offline-node-id": string(info.ID)})
+ logtrace.Debug(ctx, "offline node was successfully adjusted", logtrace.Fields{logtrace.FieldModule: "p2p", "offline-node-ip": info.IP, "offline-node-id": string(info.ID)})
return nil
}
func isNodeGoneAndShouldBeAdjusted(lastSeen *time.Time, isAlreadyAdjusted bool) bool {
if lastSeen == nil {
- logtrace.Info(context.Background(), "lastSeen is nil - aborting node adjustment", logtrace.Fields{})
+ logtrace.Debug(context.Background(), "lastSeen is nil - aborting node adjustment", logtrace.Fields{})
return false
}
@@ -396,10 +396,10 @@ func (s *DHT) checkAndAdjustNode(ctx context.Context, info domain.NodeReplicatio
if err := s.store.UpdateIsAdjusted(ctx, string(info.ID), true); err != nil {
logtrace.Error(ctx, "failed to update replication info, set isAdjusted to true", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err.Error(), "rep-ip": info.IP, "rep-id": string(info.ID)})
} else {
- logtrace.Info(ctx, "set isAdjusted to true", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID)})
+ logtrace.Debug(ctx, "set isAdjusted to true", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID)})
}
}
}
- logtrace.Info(ctx, "replication node not active, skipping over it.", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID)})
+ logtrace.Debug(ctx, "replication node not active, skipping over it.", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID)})
}
diff --git a/p2p/kademlia/rq_symbols.go b/p2p/kademlia/rq_symbols.go
index fbf6563d..85367dec 100644
--- a/p2p/kademlia/rq_symbols.go
+++ b/p2p/kademlia/rq_symbols.go
@@ -16,7 +16,8 @@ const (
)
func (s *DHT) startStoreSymbolsWorker(ctx context.Context) {
- logtrace.Info(ctx, "start delete data worker", logtrace.Fields{logtrace.FieldModule: "p2p"})
+ // Minimal visibility for lifecycle + each tick
+ logtrace.Debug(ctx, "rq_symbols worker started", logtrace.Fields{logtrace.FieldModule: "p2p"})
for {
select {
@@ -25,7 +26,7 @@ func (s *DHT) startStoreSymbolsWorker(ctx context.Context) {
logtrace.Error(ctx, "store symbols", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err})
}
case <-ctx.Done():
- logtrace.Error(ctx, "closing store symbols worker", logtrace.Fields{logtrace.FieldModule: "p2p"})
+ logtrace.Debug(ctx, "rq_symbols worker stopping", logtrace.Fields{logtrace.FieldModule: "p2p"})
return
}
}
@@ -37,13 +38,30 @@ func (s *DHT) storeSymbols(ctx context.Context) error {
return fmt.Errorf("get to do store symbol dirs: %w", err)
}
+ // Minimal visibility: how many dirs to process this tick
+ if len(dirs) > 0 {
+ logtrace.Info(ctx, "worker: symbols todo", logtrace.Fields{"count": len(dirs)})
+ }
+
for _, dir := range dirs {
- logtrace.Info(ctx, "rq_symbols worker: start scanning dir & storing raptorQ symbols", logtrace.Fields{"dir": dir, "txid": dir.TXID})
- if err := s.scanDirAndStoreSymbols(ctx, dir.Dir, dir.TXID); err != nil {
- logtrace.Error(ctx, "scan and store symbols", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err})
+ // Use txid as correlation id so worker logs join with register flow
+ wctx := logtrace.CtxWithCorrelationID(ctx, dir.TXID)
+ // Pre-count symbols in this directory
+ preCount := -1
+ if set, rerr := utils.ReadDirFilenames(dir.Dir); rerr == nil {
+ preCount = len(set)
}
-
- logtrace.Info(ctx, "rq_symbols worker: scanned dir & stored raptorQ symbols", logtrace.Fields{"dir": dir, "txid": dir.TXID})
+ start := time.Now()
+ logtrace.Info(wctx, "worker: dir start", logtrace.Fields{"dir": dir.Dir, "txid": dir.TXID, "symbols": preCount})
+ if err := s.scanDirAndStoreSymbols(wctx, dir.Dir, dir.TXID); err != nil {
+ logtrace.Error(wctx, "scan and store symbols", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err})
+ }
+ // Post-count remaining symbols
+ remCount := -1
+ if set, rerr := utils.ReadDirFilenames(dir.Dir); rerr == nil {
+ remCount = len(set)
+ }
+ logtrace.Info(wctx, "worker: dir done", logtrace.Fields{"dir": dir.Dir, "txid": dir.TXID, "remaining": remCount, "ms": time.Since(start).Milliseconds()})
}
return nil
@@ -68,17 +86,17 @@ func (s *DHT) scanDirAndStoreSymbols(ctx context.Context, dir, txid string) erro
logtrace.Info(ctx, "p2p-worker: storing ALL RaptorQ symbols", logtrace.Fields{"txid": txid, "dir": dir, "total": len(keys)})
- // Batch-flush at loadSymbolsBatchSize
- for start := 0; start < len(keys); {
- end := start + loadSymbolsBatchSize
- if end > len(keys) {
- end = len(keys)
- }
- if err := s.storeSymbolsInP2P(ctx, dir, keys[start:end]); err != nil {
- return err
- }
- start = end
- }
+ // Batch-flush at loadSymbolsBatchSize
+ for start := 0; start < len(keys); {
+ end := start + loadSymbolsBatchSize
+ if end > len(keys) {
+ end = len(keys)
+ }
+ if err := s.storeSymbolsInP2P(ctx, txid, dir, keys[start:end]); err != nil {
+ return err
+ }
+ start = end
+ }
// Mark this directory as completed in rqstore
if err := s.rqstore.SetIsCompleted(txid); err != nil {
@@ -90,15 +108,21 @@ func (s *DHT) scanDirAndStoreSymbols(ctx context.Context, dir, txid string) erro
// ---------------------------------------------------------------------
// 2. Load → StoreBatch → Delete for a slice of keys
// ---------------------------------------------------------------------
-func (s *DHT) storeSymbolsInP2P(ctx context.Context, dir string, keys []string) error {
+func (s *DHT) storeSymbolsInP2P(ctx context.Context, txid, dir string, keys []string) error {
+ // Per-batch visibility for background worker
+ logtrace.Info(ctx, "worker: batch send", logtrace.Fields{"dir": dir, "keys": len(keys), logtrace.FieldTaskID: txid})
+
+ start := time.Now()
loaded, err := utils.LoadSymbols(dir, keys)
if err != nil {
return fmt.Errorf("load symbols: %w", err)
}
- if err := s.StoreBatch(ctx, loaded, 1, dir); err != nil {
- return fmt.Errorf("p2p store batch: %w", err)
- }
+ if err := s.StoreBatch(ctx, loaded, 1, txid); err != nil {
+ return fmt.Errorf("p2p store batch: %w", err)
+ }
+
+ logtrace.Info(ctx, "worker: batch ok", logtrace.Fields{"dir": dir, "keys": len(loaded), "ms": time.Since(start).Milliseconds(), logtrace.FieldTaskID: txid})
if err := utils.DeleteSymbols(ctx, dir, keys); err != nil {
return fmt.Errorf("delete symbols: %w", err)
diff --git a/p2p/kademlia/store/meta/meta.go b/p2p/kademlia/store/meta/meta.go
index fa75dc81..c57d05a4 100644
--- a/p2p/kademlia/store/meta/meta.go
+++ b/p2p/kademlia/store/meta/meta.go
@@ -67,7 +67,7 @@ func NewStore(ctx context.Context, dataDir string) (*Store, error) {
quit: make(chan bool),
}
- logtrace.Info(ctx, fmt.Sprintf("p2p data dir: %v", dataDir), logtrace.Fields{logtrace.FieldModule: "p2p"})
+ logtrace.Debug(ctx, fmt.Sprintf("p2p data dir: %v", dataDir), logtrace.Fields{logtrace.FieldModule: "p2p"})
if _, err := os.Stat(dataDir); os.IsNotExist(err) {
if err := os.MkdirAll(dataDir, 0750); err != nil {
return nil, fmt.Errorf("mkdir %q: %w", dataDir, err)
@@ -185,10 +185,10 @@ func (s *Store) startCheckpointWorker(ctx context.Context) {
select {
case <-ctx.Done():
- logtrace.Info(ctx, "Stopping checkpoint worker because of context cancel", logtrace.Fields{})
+ logtrace.Debug(ctx, "Stopping checkpoint worker because of context cancel", logtrace.Fields{})
return
case <-s.worker.quit:
- logtrace.Info(ctx, "Stopping checkpoint worker because of quit signal", logtrace.Fields{})
+ logtrace.Debug(ctx, "Stopping checkpoint worker because of quit signal", logtrace.Fields{})
return
default:
}
@@ -204,10 +204,10 @@ func (s *Store) start(ctx context.Context) {
logtrace.Error(ctx, "Failed to perform job", logtrace.Fields{logtrace.FieldError: err})
}
case <-s.worker.quit:
- logtrace.Info(ctx, "exit sqlite meta db worker - quit signal received", logtrace.Fields{})
+ logtrace.Debug(ctx, "exit sqlite meta db worker - quit signal received", logtrace.Fields{})
return
case <-ctx.Done():
- logtrace.Info(ctx, "exit sqlite meta db worker- ctx done signal received", logtrace.Fields{})
+ logtrace.Debug(ctx, "exit sqlite meta db worker- ctx done signal received", logtrace.Fields{})
return
}
}
diff --git a/p2p/kademlia/store/sqlite/meta_worker.go b/p2p/kademlia/store/sqlite/meta_worker.go
index eb7a968f..6d1207df 100644
--- a/p2p/kademlia/store/sqlite/meta_worker.go
+++ b/p2p/kademlia/store/sqlite/meta_worker.go
@@ -124,7 +124,7 @@ func NewMigrationMetaStore(ctx context.Context, dataDir string, cloud cloud.Stor
go handler.startLastAccessedUpdateWorker(ctx)
go handler.startInsertWorker(ctx)
go handler.startMigrationExecutionWorker(ctx)
- logtrace.Info(ctx, "MigrationMetaStore workers started", logtrace.Fields{})
+ logtrace.Debug(ctx, "MigrationMetaStore workers started", logtrace.Fields{})
return handler, nil
}
@@ -348,7 +348,7 @@ func (d *MigrationMetaStore) startLastAccessedUpdateWorker(ctx context.Context)
case <-d.updateTicker.C:
d.commitLastAccessedUpdates(ctx)
case <-ctx.Done():
- logtrace.Info(ctx, "Shutting down last accessed update worker", logtrace.Fields{})
+ logtrace.Debug(ctx, "Shutting down last accessed update worker", logtrace.Fields{})
return
}
}
@@ -414,7 +414,7 @@ func (d *MigrationMetaStore) commitLastAccessedUpdates(ctx context.Context) {
d.updates.Delete(k)
}
- logtrace.Info(ctx, "Committed last accessed updates", logtrace.Fields{"count": len(keysToUpdate)})
+ logtrace.Debug(ctx, "Committed last accessed updates", logtrace.Fields{"count": len(keysToUpdate)})
}
func PostKeysInsert(updates []UpdateMessage) {
@@ -437,7 +437,7 @@ func (d *MigrationMetaStore) startInsertWorker(ctx context.Context) {
case <-d.insertTicker.C:
d.commitInserts(ctx)
case <-ctx.Done():
- logtrace.Info(ctx, "Shutting down insert meta keys worker", logtrace.Fields{})
+ logtrace.Debug(ctx, "Shutting down insert meta keys worker", logtrace.Fields{})
d.commitInserts(ctx)
return
}
@@ -501,7 +501,7 @@ func (d *MigrationMetaStore) commitInserts(ctx context.Context) {
d.inserts.Delete(k)
}
- logtrace.Info(ctx, "Committed inserts", logtrace.Fields{"count": len(keysToUpdate)})
+ logtrace.Debug(ctx, "Committed inserts", logtrace.Fields{"count": len(keysToUpdate)})
}
// startMigrationExecutionWorker starts the worker that executes a migration
@@ -511,7 +511,7 @@ func (d *MigrationMetaStore) startMigrationExecutionWorker(ctx context.Context)
case <-d.migrationExecutionTicker.C:
d.checkAndExecuteMigration(ctx)
case <-ctx.Done():
- logtrace.Info(ctx, "Shutting down data migration worker", logtrace.Fields{})
+ logtrace.Debug(ctx, "Shutting down data migration worker", logtrace.Fields{})
return
}
}
@@ -544,7 +544,7 @@ func (d *MigrationMetaStore) checkAndExecuteMigration(ctx context.Context) {
//return
//}
- logtrace.Info(ctx, "Starting data migration", logtrace.Fields{"islow": isLow})
+ logtrace.Debug(ctx, "Starting data migration", logtrace.Fields{"islow": isLow})
// Step 1: Fetch pending migrations
var migrations Migrations
@@ -553,11 +553,11 @@ func (d *MigrationMetaStore) checkAndExecuteMigration(ctx context.Context) {
logtrace.Error(ctx, "Failed to fetch pending migrations", logtrace.Fields{logtrace.FieldError: err})
return
}
- logtrace.Info(ctx, "Fetched pending migrations", logtrace.Fields{"count": len(migrations)})
+ logtrace.Debug(ctx, "Fetched pending migrations", logtrace.Fields{"count": len(migrations)})
// Iterate over each migration
for _, migration := range migrations {
- logtrace.Info(ctx, "Processing migration", logtrace.Fields{"migration_id": migration.ID})
+ logtrace.Debug(ctx, "Processing migration", logtrace.Fields{"migration_id": migration.ID})
if err := d.ProcessMigrationInBatches(ctx, migration); err != nil {
logtrace.Error(ctx, "Failed to process migration", logtrace.Fields{logtrace.FieldError: err, "migration_id": migration.ID})
@@ -579,7 +579,7 @@ func (d *MigrationMetaStore) ProcessMigrationInBatches(ctx context.Context, migr
}
if totalKeys < minKeysToMigrate {
- logtrace.Info(ctx, "Skipping migration due to insufficient keys", logtrace.Fields{"migration_id": migration.ID, "keys-count": totalKeys})
+ logtrace.Debug(ctx, "Skipping migration due to insufficient keys", logtrace.Fields{"migration_id": migration.ID, "keys-count": totalKeys})
return nil
}
@@ -630,7 +630,7 @@ func (d *MigrationMetaStore) ProcessMigrationInBatches(ctx context.Context, migr
}
}
- logtrace.Info(ctx, "Migration processed successfully", logtrace.Fields{"migration_id": migration.ID, "tota-keys-count": totalKeys, "migrated_in_current_iteration": nonMigratedKeys})
+ logtrace.Debug(ctx, "Migration processed successfully", logtrace.Fields{"migration_id": migration.ID, "tota-keys-count": totalKeys, "migrated_in_current_iteration": nonMigratedKeys})
return nil
}
@@ -683,7 +683,7 @@ func (d *MigrationMetaStore) uploadInBatches(ctx context.Context, keys []string,
continue
}
- logtrace.Info(ctx, "Successfully uploaded and deleted records for batch", logtrace.Fields{"batch": i + 1, "total_batches": batches})
+ logtrace.Debug(ctx, "Successfully uploaded and deleted records for batch", logtrace.Fields{"batch": i + 1, "total_batches": batches})
}
return lastError
@@ -823,7 +823,7 @@ func (d *MigrationMetaStore) InsertMetaMigrationData(ctx context.Context, migrat
func (d *MigrationMetaStore) batchSetMigrated(keys []string) error {
if len(keys) == 0 {
// log.P2P().Info("no keys provided for batch update (is_migrated)")
- logtrace.Info(context.Background(), "No keys provided for batch update (is_migrated)", logtrace.Fields{})
+ logtrace.Debug(context.Background(), "No keys provided for batch update (is_migrated)", logtrace.Fields{})
return nil
}
diff --git a/p2p/kademlia/store/sqlite/sqlite.go b/p2p/kademlia/store/sqlite/sqlite.go
index 71224a57..d38661d1 100644
--- a/p2p/kademlia/store/sqlite/sqlite.go
+++ b/p2p/kademlia/store/sqlite/sqlite.go
@@ -293,10 +293,10 @@ func (s *Store) startCheckpointWorker(ctx context.Context) {
select {
case <-ctx.Done():
- logtrace.Info(ctx, "Stopping checkpoint worker because of context cancel", logtrace.Fields{})
+ logtrace.Debug(ctx, "Stopping checkpoint worker because of context cancel", logtrace.Fields{})
return
case <-s.worker.quit:
- logtrace.Info(ctx, "Stopping checkpoint worker because of quit signal", logtrace.Fields{})
+ logtrace.Debug(ctx, "Stopping checkpoint worker because of quit signal", logtrace.Fields{})
return
default:
}
@@ -312,10 +312,10 @@ func (s *Store) start(ctx context.Context) {
logtrace.Error(ctx, "Failed to perform job", logtrace.Fields{logtrace.FieldError: err.Error()})
}
case <-s.worker.quit:
- logtrace.Info(ctx, "exit sqlite db worker - quit signal received", logtrace.Fields{})
+ logtrace.Debug(ctx, "exit sqlite db worker - quit signal received", logtrace.Fields{})
return
case <-ctx.Done():
- logtrace.Info(ctx, "exit sqlite db worker- ctx done signal received", logtrace.Fields{})
+ logtrace.Debug(ctx, "exit sqlite db worker- ctx done signal received", logtrace.Fields{})
return
}
}
@@ -737,11 +737,11 @@ func (s *Store) GetOwnCreatedAt(ctx context.Context) (time.Time, error) {
func (s *Store) GetLocalKeys(from time.Time, to time.Time) ([]string, error) {
var keys []string
ctx := context.Background()
- logtrace.Info(ctx, "getting all keys for SC", logtrace.Fields{})
+ logtrace.Debug(ctx, "getting all keys for SC", logtrace.Fields{})
if err := s.db.SelectContext(ctx, &keys, `SELECT key FROM data WHERE createdAt > ? and createdAt < ?`, from, to); err != nil {
return keys, fmt.Errorf("error reading all keys from database: %w", err)
}
- logtrace.Info(ctx, "got all keys for SC", logtrace.Fields{})
+ logtrace.Debug(ctx, "got all keys for SC", logtrace.Fields{})
return keys, nil
}
@@ -762,7 +762,7 @@ func stringArgsToInterface(args []string) []interface{} {
func batchDeleteRecords(db *sqlx.DB, keys []string) error {
if len(keys) == 0 {
- logtrace.Info(context.Background(), "no keys provided for batch delete", logtrace.Fields{logtrace.FieldModule: "p2p"})
+ logtrace.Debug(context.Background(), "no keys provided for batch delete", logtrace.Fields{logtrace.FieldModule: "p2p"})
return nil
}
total := int64(0)
@@ -784,7 +784,7 @@ func batchDeleteRecords(db *sqlx.DB, keys []string) error {
func batchSetMigratedRecords(db *sqlx.DB, keys []string) error {
if len(keys) == 0 {
- logtrace.Info(context.Background(), "no keys provided for batch update (migrated)", logtrace.Fields{logtrace.FieldModule: "p2p"})
+ logtrace.Debug(context.Background(), "no keys provided for batch update (migrated)", logtrace.Fields{logtrace.FieldModule: "p2p"})
return nil
}
total := int64(0)
diff --git a/p2p/p2p.go b/p2p/p2p.go
index e3d6b40a..bb38ac0c 100644
--- a/p2p/p2p.go
+++ b/p2p/p2p.go
@@ -40,14 +40,14 @@ type P2P interface {
// p2p structure to implements interface
type p2p struct {
- store kademlia.Store // the store for kademlia network
- metaStore kademlia.MetaStore
- dht *kademlia.DHT // the kademlia network
- config *Config // the service configuration
- running bool // if the kademlia network is ready
- lumeraClient lumera.Client
- keyring keyring.Keyring // Add the keyring field
- rqstore rqstore.Store
+ store kademlia.Store // the store for kademlia network
+ metaStore kademlia.MetaStore
+ dht *kademlia.DHT // the kademlia network
+ config *Config // the service configuration
+ running bool // if the kademlia network is ready
+ lumeraClient lumera.Client
+ keyring keyring.Keyring // Add the keyring field
+ rqstore rqstore.Store
}
// Run the kademlia network
@@ -64,7 +64,7 @@ func (s *p2p) Run(ctx context.Context) error {
logtrace.Error(ctx, "failed to run kadmelia, retrying.", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err})
} else {
- logtrace.Info(ctx, "kadmelia started successfully", logtrace.Fields{logtrace.FieldModule: "p2p"})
+ logtrace.Debug(ctx, "kadmelia started successfully", logtrace.Fields{logtrace.FieldModule: "p2p"})
return nil
}
}
@@ -74,7 +74,7 @@ func (s *p2p) Run(ctx context.Context) error {
// run the kademlia network
func (s *p2p) run(ctx context.Context) error {
- logtrace.Info(ctx, "Running kademlia network", logtrace.Fields{logtrace.FieldModule: "p2p"})
+ logtrace.Debug(ctx, "Running kademlia network", logtrace.Fields{logtrace.FieldModule: "p2p"})
// configure the kademlia dht for p2p service
if err := s.configure(ctx); err != nil {
return errors.Errorf("configure kademlia dht: %w", err)
@@ -95,7 +95,7 @@ func (s *p2p) run(ctx context.Context) error {
}
s.running = true
- logtrace.Info(ctx, "p2p service is started", logtrace.Fields{})
+ logtrace.Debug(ctx, "p2p service is started", logtrace.Fields{})
// block until context is done
<-ctx.Done()
@@ -103,7 +103,7 @@ func (s *p2p) run(ctx context.Context) error {
// stop the node for kademlia network
s.dht.Stop(ctx)
- logtrace.Info(ctx, "p2p service is stopped", logtrace.Fields{})
+ logtrace.Debug(ctx, "p2p service is stopped", logtrace.Fields{})
return nil
}
@@ -263,13 +263,13 @@ func New(ctx context.Context, config *Config, lumeraClient lumera.Client, kr key
}
return &p2p{
- store: store,
- metaStore: meta,
- config: config,
- lumeraClient: lumeraClient,
- keyring: kr, // Store the keyring
- rqstore: rqstore,
- }, nil
+ store: store,
+ metaStore: meta,
+ config: config,
+ lumeraClient: lumeraClient,
+ keyring: kr, // Store the keyring
+ rqstore: rqstore,
+ }, nil
}
// LocalStore store data into the kademlia network
diff --git a/pkg/codec/codec_mock.go b/pkg/codec/codec_mock.go
index 9c3cf864..09484cee 100644
--- a/pkg/codec/codec_mock.go
+++ b/pkg/codec/codec_mock.go
@@ -1,10 +1,5 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: codec.go
-//
-// Generated by this command:
-//
-// mockgen -destination=codec_mock.go -package=codec -source=codec.go
-//
// Package codec is a generated GoMock package.
package codec
@@ -13,14 +8,13 @@ import (
context "context"
reflect "reflect"
- gomock "go.uber.org/mock/gomock"
+ gomock "github.com/golang/mock/gomock"
)
// MockCodec is a mock of Codec interface.
type MockCodec struct {
ctrl *gomock.Controller
recorder *MockCodecMockRecorder
- isgomock struct{}
}
// MockCodecMockRecorder is the mock recorder for MockCodec.
@@ -50,7 +44,7 @@ func (m *MockCodec) Decode(ctx context.Context, req DecodeRequest) (DecodeRespon
}
// Decode indicates an expected call of Decode.
-func (mr *MockCodecMockRecorder) Decode(ctx, req any) *gomock.Call {
+func (mr *MockCodecMockRecorder) Decode(ctx, req interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Decode", reflect.TypeOf((*MockCodec)(nil).Decode), ctx, req)
}
@@ -65,7 +59,7 @@ func (m *MockCodec) Encode(ctx context.Context, req EncodeRequest) (EncodeRespon
}
// Encode indicates an expected call of Encode.
-func (mr *MockCodecMockRecorder) Encode(ctx, req any) *gomock.Call {
+func (mr *MockCodecMockRecorder) Encode(ctx, req interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Encode", reflect.TypeOf((*MockCodec)(nil).Encode), ctx, req)
}
diff --git a/pkg/codec/decode.go b/pkg/codec/decode.go
index beeed5a8..348894e4 100644
--- a/pkg/codec/decode.go
+++ b/pkg/codec/decode.go
@@ -5,9 +5,13 @@ import (
"encoding/json"
"fmt"
"os"
+ "path"
"path/filepath"
+ "strings"
+ "sync"
raptorq "github.com/LumeraProtocol/rq-go"
+ "github.com/LumeraProtocol/supernode/v2/pkg/errors"
"github.com/LumeraProtocol/supernode/v2/pkg/logtrace"
)
@@ -18,64 +22,160 @@ type DecodeRequest struct {
}
type DecodeResponse struct {
- Path string
+ FilePath string
DecodeTmpDir string
}
-func (rq *raptorQ) Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) {
+// Workspace holds paths & reverse index for prepared decoding.
+type Workspace struct {
+ ActionID string
+ SymbolsDir string // ...//
+ BlockDirs []string // index = blockID (or 0 if single block)
+ symbolToBlock map[string]int
+ mu sync.RWMutex // protects symbolToBlock reads if you expand it later
+}
+
+// PrepareDecode creates the on-disk workspace for decoding and returns:
+// - blockPaths[0] => where to write symbols for block 0 (your single-block case)
+// - Write(block, id, data) callback that writes symbol bytes directly to disk
+// - Cleanup() to remove the workspace on abort (no-op if you want to keep it)
+func (rq *raptorQ) PrepareDecode(
+ ctx context.Context,
+ actionID string,
+ layout Layout,
+) (blockPaths []string, Write func(block int, symbolID string, data []byte) (string, error), Cleanup func() error, ws *Workspace, err error) {
fields := logtrace.Fields{
- logtrace.FieldMethod: "Decode",
+ logtrace.FieldMethod: "PrepareDecode",
logtrace.FieldModule: "rq",
- logtrace.FieldActionID: req.ActionID,
+ logtrace.FieldActionID: actionID,
}
- logtrace.Info(ctx, "RaptorQ decode request received", fields)
- // Use deterministic processor settings (matching encoder)
- processor, err := raptorq.NewRaptorQProcessor(rqSymbolSize, rqRedundancyFactor, rqMaxMemoryMB, rqConcurrency)
- if err != nil {
+ // Create root symbols dir for this action
+ symbolsDir := filepath.Join(rq.symbolsBaseDir, actionID)
+ if err := os.MkdirAll(symbolsDir, 0o755); err != nil {
fields[logtrace.FieldError] = err.Error()
- return DecodeResponse{}, fmt.Errorf("create RaptorQ processor: %w", err)
+ logtrace.Error(ctx, "mkdir symbols base dir failed", fields)
+ return nil, nil, nil, nil, fmt.Errorf("mkdir %s: %w", symbolsDir, err)
}
- defer processor.Free()
- symbolsDir := filepath.Join(rq.symbolsBaseDir, req.ActionID)
- if err := os.MkdirAll(symbolsDir, 0o755); err != nil {
- fields[logtrace.FieldError] = err.Error()
- return DecodeResponse{}, fmt.Errorf("mkdir %s: %w", symbolsDir, err)
+ // Ensure block directories exist; build reverse index symbol -> block
+ maxBlockID := 0
+ for _, b := range layout.Blocks {
+ if int(b.BlockID) > maxBlockID {
+ maxBlockID = int(b.BlockID)
+ }
}
+ blockDirs := make([]string, maxBlockID+1)
+ s2b := make(map[string]int, 1024)
- // Build symbol->block mapping from layout and ensure block directories exist
- symbolToBlock := make(map[string]int)
- for _, blk := range req.Layout.Blocks {
- blockDir := filepath.Join(symbolsDir, fmt.Sprintf("block_%d", blk.BlockID))
- if err := os.MkdirAll(blockDir, 0o755); err != nil {
+ for _, b := range layout.Blocks {
+ dir := filepath.Join(symbolsDir, fmt.Sprintf("block_%d", b.BlockID))
+ if err := os.MkdirAll(dir, 0o755); err != nil {
fields[logtrace.FieldError] = err.Error()
- return DecodeResponse{}, fmt.Errorf("mkdir %s: %w", blockDir, err)
+ logtrace.Error(ctx, "mkdir block dir failed", fields)
+ return nil, nil, nil, nil, fmt.Errorf("mkdir %s: %w", dir, err)
}
- for _, sym := range blk.Symbols {
- symbolToBlock[sym] = blk.BlockID
+ blockDirs[b.BlockID] = dir
+ for _, sym := range b.Symbols {
+ s2b[sym] = b.BlockID
}
}
- // Write symbols to their respective block directories
- for id, data := range req.Symbols {
- blkID, ok := symbolToBlock[id]
- if !ok {
- fields[logtrace.FieldError] = "symbol not present in layout"
- return DecodeResponse{}, fmt.Errorf("symbol %s not present in layout", id)
+ ws = &Workspace{
+ ActionID: actionID,
+ SymbolsDir: symbolsDir,
+ BlockDirs: blockDirs,
+ symbolToBlock: s2b,
+ }
+
+ // Helper: atomic write (tmp file + rename) to avoid partials on crash
+ writeFileAtomic := func(path string, data []byte) error {
+ tmp := path + ".tmp"
+ if err := os.WriteFile(tmp, data, 0o644); err != nil {
+ return err
}
- blockDir := filepath.Join(symbolsDir, fmt.Sprintf("block_%d", blkID))
- symbolPath := filepath.Join(blockDir, id)
- if err := os.WriteFile(symbolPath, data, 0o644); err != nil {
- fields[logtrace.FieldError] = err.Error()
- return DecodeResponse{}, fmt.Errorf("write symbol %s: %w", id, err)
+ return os.Rename(tmp, path)
+ }
+
+ // Write callback; if block < 0, resolve via layout reverse index; default to 0.
+ Write = func(block int, symbolID string, data []byte) (string, error) {
+ // Quick cancellation check
+ select {
+ case <-ctx.Done():
+ return "", ctx.Err()
+ default:
+ }
+
+ // Resolve block if caller passes default
+ if block < 0 {
+ ws.mu.RLock()
+ bid, ok := ws.symbolToBlock[symbolID]
+ ws.mu.RUnlock()
+ if !ok {
+ // Single-block simplification: default to block 0 if layout maps are absent
+ if len(ws.BlockDirs) == 0 || ws.BlockDirs[0] == "" {
+ return "", errors.Errorf("no block directories prepared")
+ }
+ bid = 0
+ }
+ block = bid
+ }
+
+ if block < 0 || block >= len(ws.BlockDirs) || ws.BlockDirs[block] == "" {
+ return "", errors.Errorf("invalid block index %d", block)
+ }
+
+ // sanitize symbolID to a basename (prevents traversal)
+ clean := path.Clean("/" + symbolID)
+ base := strings.TrimPrefix(clean, "/")
+ if base == "" || strings.Contains(base, "/") {
+ return "", errors.Errorf("invalid symbol id %q", symbolID)
+
}
+
+ dest := filepath.Join(ws.BlockDirs[block], base)
+ if err := writeFileAtomic(dest, data); err != nil {
+ return "", fmt.Errorf("write symbol %s (block %d): %w", base, block, err)
+ }
+ return dest, nil
+ }
+
+ Cleanup = func() error {
+ // Remove the whole workspace directory (symbols + layout + output if any)
+ return os.RemoveAll(symbolsDir)
}
- logtrace.Info(ctx, "symbols written to block directories", fields)
- // ---------- write layout.json ----------
- layoutPath := filepath.Join(symbolsDir, "layout.json")
- layoutBytes, err := json.Marshal(req.Layout)
+ logtrace.Debug(ctx, "prepare decode workspace created", logtrace.Fields{
+ "symbols_dir": symbolsDir,
+ "blocks": len(blockDirs),
+ })
+ return blockDirs, Write, Cleanup, ws, nil
+}
+
+// DecodeFromPrepared performs RaptorQ decode using an already-prepared workspace.
+// It writes layout.json under the workspace, runs decode, and returns output paths.
+func (rq *raptorQ) DecodeFromPrepared(
+ ctx context.Context,
+ ws *Workspace,
+ layout Layout,
+) (DecodeResponse, error) {
+ fields := logtrace.Fields{
+ logtrace.FieldMethod: "DecodeFromPrepared",
+ logtrace.FieldModule: "rq",
+ logtrace.FieldActionID: ws.ActionID,
+ }
+ logtrace.Debug(ctx, "RaptorQ decode (prepared) requested", fields)
+
+ processor, err := raptorq.NewRaptorQProcessor(rqSymbolSize, rqRedundancyFactor, rqMaxMemoryMB, rqConcurrency)
+ if err != nil {
+ fields[logtrace.FieldError] = err.Error()
+ return DecodeResponse{}, fmt.Errorf("create RaptorQ processor: %w", err)
+ }
+ defer processor.Free()
+
+ // Write layout.json (idempotent)
+ layoutPath := filepath.Join(ws.SymbolsDir, "layout.json")
+ layoutBytes, err := json.Marshal(layout)
if err != nil {
fields[logtrace.FieldError] = err.Error()
return DecodeResponse{}, fmt.Errorf("marshal layout: %w", err)
@@ -84,16 +184,74 @@ func (rq *raptorQ) Decode(ctx context.Context, req DecodeRequest) (DecodeRespons
fields[logtrace.FieldError] = err.Error()
return DecodeResponse{}, fmt.Errorf("write layout file: %w", err)
}
- logtrace.Info(ctx, "layout.json written", fields)
+ logtrace.Debug(ctx, "layout.json written (prepared)", fields)
- // Decode
- outputPath := filepath.Join(symbolsDir, "output")
- if err := processor.DecodeSymbols(symbolsDir, outputPath, layoutPath); err != nil {
+ // Decode to output (idempotent-safe: overwrite on success)
+ outputPath := filepath.Join(ws.SymbolsDir, "output")
+ if err := processor.DecodeSymbols(ws.SymbolsDir, outputPath, layoutPath); err != nil {
fields[logtrace.FieldError] = err.Error()
- _ = os.Remove(outputPath)
+ _ = os.Remove(outputPath) // best-effort cleanup of partial output
return DecodeResponse{}, fmt.Errorf("raptorq decode: %w", err)
}
- logtrace.Info(ctx, "RaptorQ decoding completed successfully", fields)
- return DecodeResponse{Path: outputPath, DecodeTmpDir: symbolsDir}, nil
+ logtrace.Debug(ctx, "RaptorQ decoding completed successfully (prepared)", logtrace.Fields{
+ "output_path": outputPath,
+ })
+ return DecodeResponse{FilePath: outputPath, DecodeTmpDir: ws.SymbolsDir}, nil
+}
+
+func (rq *raptorQ) Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) {
+ fields := logtrace.Fields{
+ logtrace.FieldMethod: "Decode",
+ logtrace.FieldModule: "rq",
+ logtrace.FieldActionID: req.ActionID,
+ }
+ logtrace.Debug(ctx, "RaptorQ decode request received", fields)
+
+ // 1) Validate layout (the check)
+ if len(req.Layout.Blocks) == 0 {
+ fields[logtrace.FieldError] = "empty layout"
+ return DecodeResponse{}, fmt.Errorf("invalid layout: no blocks present")
+ }
+ for _, blk := range req.Layout.Blocks {
+ if len(blk.Symbols) == 0 {
+ fields[logtrace.FieldError] = fmt.Sprintf("block_%d has no symbols", blk.BlockID)
+ return DecodeResponse{}, fmt.Errorf("invalid layout: block %d has no symbols", blk.BlockID)
+ }
+ }
+
+ // 2) Prepare workspace (functionality)
+ _, Write, Cleanup, ws, err := rq.PrepareDecode(ctx, req.ActionID, req.Layout)
+ if err != nil {
+ fields[logtrace.FieldError] = err.Error()
+ return DecodeResponse{}, fmt.Errorf("prepare decode workspace: %w", err)
+ }
+
+ // Ensure workspace cleanup on failure. On success, caller cleans up via returned path.
+ success := false
+ defer func() {
+ if !success && Cleanup != nil {
+ _ = Cleanup()
+ }
+ }()
+
+ // 3) Persist provided in-memory symbols via Write (functionality)
+ if len(req.Symbols) > 0 {
+ for id, data := range req.Symbols {
+ if _, werr := Write(-1, id, data); werr != nil {
+ fields[logtrace.FieldError] = werr.Error()
+ return DecodeResponse{}, werr
+ }
+ }
+ logtrace.Debug(ctx, "symbols persisted via Write()", fields)
+ }
+
+ // 4) Decode using the prepared workspace (functionality)
+ resp, derr := rq.DecodeFromPrepared(ctx, ws, req.Layout)
+ if derr != nil {
+ fields[logtrace.FieldError] = derr.Error()
+ return DecodeResponse{}, derr
+ }
+ success = true
+ return resp, nil
}
diff --git a/pkg/codec/raptorq.go b/pkg/codec/raptorq.go
index 8e0c1c6c..541aac58 100644
--- a/pkg/codec/raptorq.go
+++ b/pkg/codec/raptorq.go
@@ -15,9 +15,9 @@ const (
rqSymbolSize uint16 = 65535
rqRedundancyFactor uint8 = 6
// Limit RaptorQ processor memory usage to ~2 GiB
- rqMaxMemoryMB uint64 = 2 * 1024 // MB
+ rqMaxMemoryMB uint64 = 4 * 1024 // MB
// Concurrency tuned for 2 GiB limit and typical 8+ core CPUs
- rqConcurrency uint64 = 6
+ rqConcurrency uint64 = 1
// Target single-block output for up to 1 GiB files with padding headroom (~1.25 GiB)
rqBlockSize int = 1280 * 1024 * 1024 // bytes (1,280 MiB)
)
@@ -48,7 +48,7 @@ func (rq *raptorQ) Encode(ctx context.Context, req EncodeRequest) (EncodeRespons
return EncodeResponse{}, fmt.Errorf("create RaptorQ processor: %w", err)
}
defer processor.Free()
- logtrace.Info(ctx, "RaptorQ processor created", fields)
+ logtrace.Debug(ctx, "RaptorQ processor created", fields)
/* ---------- 1. run the encoder ---------- */
// Deterministic: force single block
@@ -60,7 +60,7 @@ func (rq *raptorQ) Encode(ctx context.Context, req EncodeRequest) (EncodeRespons
os.Remove(req.Path)
return EncodeResponse{}, fmt.Errorf("mkdir %s: %w", symbolsDir, err)
}
- logtrace.Info(ctx, "RaptorQ processor encoding", fields)
+ logtrace.Debug(ctx, "RaptorQ processor encoding", fields)
resp, err := processor.EncodeFile(req.Path, symbolsDir, blockSize)
if err != nil {
@@ -74,7 +74,7 @@ func (rq *raptorQ) Encode(ctx context.Context, req EncodeRequest) (EncodeRespons
/* ---------- 2. read the layout JSON ---------- */
layoutData, err := os.ReadFile(resp.LayoutFilePath)
- logtrace.Info(ctx, "RaptorQ processor layout file", logtrace.Fields{
+ logtrace.Debug(ctx, "RaptorQ processor layout file", logtrace.Fields{
"layout-file": resp.LayoutFilePath})
if err != nil {
fields[logtrace.FieldError] = err.Error()
diff --git a/pkg/common/task/task.go b/pkg/common/task/task.go
index e4bb062a..adf173e4 100644
--- a/pkg/common/task/task.go
+++ b/pkg/common/task/task.go
@@ -92,13 +92,13 @@ func (task *task) RunAction(ctx context.Context) error {
for {
select {
case <-ctx.Done():
- logtrace.Info(ctx, "context done", logtrace.Fields{"task_id": task.ID()})
+ logtrace.Debug(ctx, "context done", logtrace.Fields{"task_id": task.ID()})
case <-task.Done():
- logtrace.Info(ctx, "task done", logtrace.Fields{"task_id": task.ID()})
+ logtrace.Debug(ctx, "task done", logtrace.Fields{"task_id": task.ID()})
cancel()
case action, ok := <-task.actionCh:
if !ok {
- logtrace.Info(ctx, "action channel closed", logtrace.Fields{"task_id": task.ID()})
+ logtrace.Debug(ctx, "action channel closed", logtrace.Fields{"task_id": task.ID()})
return group.Wait()
}
diff --git a/pkg/common/task/worker.go b/pkg/common/task/worker.go
index 280b5fb8..14043079 100644
--- a/pkg/common/task/worker.go
+++ b/pkg/common/task/worker.go
@@ -91,7 +91,7 @@ func (worker *Worker) Run(ctx context.Context) error {
logtrace.Error(ctx, "Recovered from panic in common task's worker run", logtrace.Fields{"task": currentTask.ID(), "error": r})
}
- logtrace.Info(ctx, "Task Removed", logtrace.Fields{"task": currentTask.ID()})
+ logtrace.Debug(ctx, "Task Removed", logtrace.Fields{"task": currentTask.ID()})
// Remove the task from the worker's task list
worker.RemoveTask(currentTask)
}()
diff --git a/pkg/dd/image_rareness.go b/pkg/dd/image_rareness.go
index d021da1b..74fec800 100644
--- a/pkg/dd/image_rareness.go
+++ b/pkg/dd/image_rareness.go
@@ -56,7 +56,7 @@ func (c *ddServerClientImpl) ImageRarenessScore(ctx context.Context, req Rarenes
logtrace.FieldMethod: "ImageRarenessScore",
logtrace.FieldRequest: req,
}
- logtrace.Info(ctx, "getting image rareness score", fields)
+ logtrace.Debug(ctx, "getting image rareness score", fields)
res, err := c.ddService.ImageRarenessScore(ctx, &ddService.RarenessScoreRequest{ImageFilepath: req.Filepath})
if err != nil {
@@ -65,7 +65,7 @@ func (c *ddServerClientImpl) ImageRarenessScore(ctx context.Context, req Rarenes
return ImageRarenessScoreResponse{}, fmt.Errorf("dd image rareness score error: %w", err)
}
- logtrace.Info(ctx, "successfully got image rareness score", fields)
+ logtrace.Debug(ctx, "successfully got image rareness score", fields)
return toImageRarenessScoreResponse(res), nil
}
diff --git a/pkg/dd/status.go b/pkg/dd/status.go
index fc7f4d30..812b62d6 100644
--- a/pkg/dd/status.go
+++ b/pkg/dd/status.go
@@ -26,7 +26,7 @@ func (c *ddServerClientImpl) GetStatus(ctx context.Context, req GetStatusRequest
logtrace.FieldMethod: "GetStatus",
logtrace.FieldRequest: req,
}
- logtrace.Info(ctx, "getting status", fields)
+ logtrace.Debug(ctx, "getting status", fields)
res, err := c.ddService.GetStatus(ctx, &ddService.GetStatusRequest{})
if err != nil {
@@ -35,7 +35,7 @@ func (c *ddServerClientImpl) GetStatus(ctx context.Context, req GetStatusRequest
return GetStatusResponse{}, fmt.Errorf("dd get status error: %w", err)
}
- logtrace.Info(ctx, "successfully got status", fields)
+ logtrace.Debug(ctx, "successfully got status", fields)
return GetStatusResponse{
Version: res.GetVersion(),
TaskCount: res.GetTaskCount(),
diff --git a/pkg/logtrace/datadog.go b/pkg/logtrace/datadog.go
new file mode 100644
index 00000000..6fb0ba86
--- /dev/null
+++ b/pkg/logtrace/datadog.go
@@ -0,0 +1,208 @@
+package logtrace
+
+import (
+ "bytes"
+ "compress/gzip"
+ "context"
+ "encoding/json"
+ "net/http"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// Minimal Datadog Logs Forwarder (hard-coded config) kept separate for cleanliness.
+
+type ddCfg struct {
+ APIKey string
+ Site string // e.g. "datadoghq.com", "datadoghq.eu"
+ Service string // e.g. used as Datadog 'service'; we will set to node IP
+ Host string // optional; defaults to machine hostname
+}
+
+var (
+ ddOnce sync.Once
+ ddConfig ddCfg
+ ddClient = &http.Client{Timeout: 5 * time.Second}
+ ddQueue chan map[string]any
+ // Optional build-time injection via -ldflags
+ // -ldflags "-X github.com/LumeraProtocol/supernode/v2/pkg/logtrace.DDAPIKey=... -X github.com/LumeraProtocol/supernode/v2/pkg/logtrace.DDSite=us5.datadoghq.com"
+ DDAPIKey string
+ DDSite string
+)
+
+// SetupDatadog initializes the Datadog forwarding once.
+func SetupDatadog(service string) {
+ ddOnce.Do(func() {
+ initDatadog(service)
+ })
+}
+
+// ForwardDatadog enqueues a log line for forwarding (non-blocking).
+func ForwardDatadog(level zapcore.Level, ctx context.Context, msg string, fields Fields) {
+ ddForward(level, ctx, msg, fields)
+}
+
+// SetDatadogService allows setting the Datadog service (e.g., to the node IP)
+func SetDatadogService(service string) {
+ if s := strings.TrimSpace(service); s != "" {
+ ddConfig.Service = s
+ }
+}
+
+// SetDatadogHost sets the Datadog host field (use the supernode identity)
+func SetDatadogHost(host string) {
+ if h := strings.TrimSpace(host); h != "" {
+ ddConfig.Host = h
+ }
+}
+
+func initDatadog(service string) {
+ // Base defaults (site default chosen based on earlier validation)
+ ddConfig = ddCfg{Site: "us5.datadoghq.com", Service: service, Host: ""}
+
+ // Resolve from env and build flags
+ apiKey := strings.TrimSpace(os.Getenv("DD_API_KEY"))
+ if apiKey == "" {
+ apiKey = strings.TrimSpace(DDAPIKey)
+ }
+
+ site := strings.TrimSpace(os.Getenv("DD_SITE"))
+ if site == "" {
+ site = strings.TrimSpace(DDSite)
+ if site == "" {
+ site = ddConfig.Site
+ }
+ }
+
+ ddConfig.APIKey = apiKey
+ ddConfig.Site = site
+
+ // Only enable forwarding when a real key is present
+ if ddConfig.APIKey == "" {
+ return
+ }
+
+ ddQueue = make(chan map[string]any, 256)
+ go ddLoop()
+}
+
+// ddForward enqueues a single log entry for Datadog intake.
+func ddForward(level zapcore.Level, ctx context.Context, msg string, fields Fields) {
+ if ddQueue == nil {
+ return
+ }
+
+ // Map zap level to Datadog status
+ status := "info"
+ switch level {
+ case zapcore.DebugLevel:
+ status = "debug"
+ case zapcore.InfoLevel:
+ status = "info"
+ case zapcore.WarnLevel:
+ status = "warn"
+ case zapcore.ErrorLevel:
+ status = "error"
+ case zapcore.FatalLevel:
+ status = "critical"
+ }
+
+ // Build a compact attributes map
+ attrs := map[string]any{}
+ for k, v := range fields {
+ attrs[k] = v
+ }
+ // Attach correlation ID if present
+ if cid := extractCorrelationID(ctx); cid != "unknown" {
+ attrs["correlation_id"] = cid
+ }
+ // Attach origin/phase if present (first_pass | worker | download)
+ if o := OriginFromContext(ctx); o != "" {
+ attrs["origin"] = o
+ }
+
+ entry := map[string]any{
+ "message": msg,
+ "status": status,
+ "service": ddConfig.Service,
+ "host": ddConfig.Host,
+ "attributes": attrs, // avoid collisions with top-level fields
+ }
+
+ select {
+ case ddQueue <- entry:
+ default:
+ // drop if queue is full to avoid blocking critical paths
+ }
+}
+
+// ddLoop batches log entries and sends to Datadog intake.
+func ddLoop() {
+ ticker := time.NewTicker(2 * time.Second)
+ defer ticker.Stop()
+
+ batch := make([]map[string]any, 0, 32)
+ flush := func() {
+ if len(batch) == 0 {
+ return
+ }
+ // Marshal batch
+ buf := &bytes.Buffer{}
+ if err := json.NewEncoder(buf).Encode(batch); err != nil {
+ batch = batch[:0]
+ return
+ }
+ _ = ddPost(buf.Bytes())
+ batch = batch[:0]
+ }
+
+ for {
+ select {
+ case e, ok := <-ddQueue:
+ if !ok {
+ flush()
+ return
+ }
+ batch = append(batch, e)
+ if len(batch) >= 32 {
+ flush()
+ }
+ case <-ticker.C:
+ flush()
+ }
+ }
+}
+
+func ddPost(payload []byte) error {
+ url := "https://http-intake.logs." + strings.TrimSpace(ddConfig.Site) + "/api/v2/logs"
+
+ // gzip the JSON payload
+ var gzBuf bytes.Buffer
+ gw := gzip.NewWriter(&gzBuf)
+ if _, err := gw.Write(payload); err == nil {
+ _ = gw.Close()
+ } else {
+ _ = gw.Close()
+ gzBuf = *bytes.NewBuffer(payload)
+ }
+
+ req, err := http.NewRequest(http.MethodPost, url, &gzBuf)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Accept", "application/json")
+ req.Header.Set("Content-Encoding", "gzip")
+ req.Header.Set("DD-API-KEY", ddConfig.APIKey)
+
+ resp, err := ddClient.Do(req)
+ if err != nil {
+ return err
+ }
+ _ = resp.Body.Close()
+ return nil
+}
diff --git a/pkg/logtrace/fields.go b/pkg/logtrace/fields.go
index 8554137b..40e4e5f1 100644
--- a/pkg/logtrace/fields.go
+++ b/pkg/logtrace/fields.go
@@ -5,6 +5,8 @@ type Fields map[string]interface{}
const (
FieldCorrelationID = "correlation_id"
+ FieldOrigin = "origin"
+ FieldRole = "role"
FieldMethod = "method"
FieldModule = "module"
FieldError = "error"
diff --git a/pkg/logtrace/log.go b/pkg/logtrace/log.go
index 02b8f36e..469b32e8 100644
--- a/pkg/logtrace/log.go
+++ b/pkg/logtrace/log.go
@@ -16,7 +16,13 @@ type ContextKey string
// CorrelationIDKey is the key for storing correlation ID in context
const CorrelationIDKey ContextKey = "correlation_id"
-var logger *zap.Logger
+// OriginKey marks which phase produced the log (first_pass | worker | download)
+const OriginKey ContextKey = "origin"
+
+var (
+ logger *zap.Logger
+ minLevel zapcore.Level = zapcore.InfoLevel // effective minimum log level
+)
// Setup initializes the logger for readable output in all modes.
func Setup(serviceName string) {
@@ -34,7 +40,11 @@ func Setup(serviceName string) {
config.DisableStacktrace = true
// Always respect the LOG_LEVEL environment variable.
- config.Level = zap.NewAtomicLevelAt(getLogLevel())
+ lvl := getLogLevel()
+ config.Level = zap.NewAtomicLevelAt(lvl)
+ // Persist the effective minimum so non-core sinks (e.g., Datadog) can
+ // filter entries consistently with the console logger.
+ minLevel = lvl
// Build the logger from the customized config.
if tracingEnabled {
@@ -45,11 +55,14 @@ func Setup(serviceName string) {
if err != nil {
panic(err)
}
+
+ // Initialize Datadog forwarding (minimal integration in separate file)
+ SetupDatadog(serviceName)
}
// getLogLevel returns the log level from environment variable LOG_LEVEL
func getLogLevel() zapcore.Level {
- levelStr := strings.ToLower(os.Getenv("LOG_LEVEL"))
+ levelStr := "info"
switch levelStr {
case "debug":
return zapcore.DebugLevel
@@ -76,6 +89,27 @@ func CtxWithCorrelationID(ctx context.Context, correlationID string) context.Con
return context.WithValue(ctx, CorrelationIDKey, correlationID)
}
+// CorrelationIDFromContext returns the correlation ID from context or "unknown".
+func CorrelationIDFromContext(ctx context.Context) string {
+ return extractCorrelationID(ctx)
+}
+
+// CtxWithOrigin stores a phase/origin tag in context
+func CtxWithOrigin(ctx context.Context, origin string) context.Context {
+ if origin == "" {
+ return ctx
+ }
+ return context.WithValue(ctx, OriginKey, origin)
+}
+
+// OriginFromContext returns the origin tag from context or ""
+func OriginFromContext(ctx context.Context) string {
+ if v, ok := ctx.Value(OriginKey).(string); ok {
+ return v
+ }
+ return ""
+}
+
// extractCorrelationID retrieves the correlation ID from context
func extractCorrelationID(ctx context.Context) string {
if correlationID, ok := ctx.Value(CorrelationIDKey).(string); ok {
@@ -90,12 +124,10 @@ func logWithLevel(level zapcore.Level, ctx context.Context, message string, fiel
Setup("unknown-service") // Fallback if Setup wasn't called
}
- // Always enrich logs with the correlation ID.
- // allFields := make(Fields, len(fields)+1)
- // for k, v := range fields {
- // allFields[k] = v
- // }
- // allFields[FieldCorrelationID] = extractCorrelationID(ctx)
+ // Drop early if below the configured level (keeps Datadog in sync)
+ if !logger.Core().Enabled(level) {
+ return
+ }
// Convert the map to a slice of zap.Field
zapFields := make([]zap.Field, 0, len(fields))
@@ -116,18 +148,19 @@ func logWithLevel(level zapcore.Level, ctx context.Context, message string, fiel
}
}
- // Log with the structured fields.
- switch level {
- case zapcore.DebugLevel:
- logger.Debug(message, zapFields...)
- case zapcore.InfoLevel:
- logger.Info(message, zapFields...)
- case zapcore.WarnLevel:
- logger.Warn(message, zapFields...)
- case zapcore.ErrorLevel:
- logger.Error(message, zapFields...)
- case zapcore.FatalLevel:
- logger.Fatal(message, zapFields...)
+ // Log with the structured fields using a level check/write
+ if ce := logger.Check(level, message); ce != nil {
+ ce.Write(zapFields...)
+ } else {
+ // Should not happen due to early Enabled check, but guard anyway
+ return
+ }
+
+ // Forward to Datadog (non-blocking, best-effort) only if level is enabled
+ // for the current configuration. This prevents forwarding debug entries
+ // when the logger is configured for info and above.
+ if level >= minLevel {
+ ForwardDatadog(level, ctx, message, fields)
}
}
diff --git a/pkg/lumera/client.go b/pkg/lumera/client.go
index bac35d68..2e25877c 100644
--- a/pkg/lumera/client.go
+++ b/pkg/lumera/client.go
@@ -2,10 +2,12 @@ package lumera
import (
"context"
+ "fmt"
"github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action"
"github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action_msg"
"github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/auth"
+ "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/bank"
"github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/node"
"github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode"
"github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx"
@@ -16,6 +18,7 @@ type lumeraClient struct {
authMod auth.Module
actionMod action.Module
actionMsgMod action_msg.Module
+ bankMod bank.Module
supernodeMod supernode.Module
txMod tx.Module
nodeMod node.Module
@@ -53,12 +56,30 @@ func newClient(ctx context.Context, cfg *Config) (Client, error) {
return nil, err
}
+ bankModule, err := bank.NewModule(conn.GetConn())
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
nodeModule, err := node.NewModule(conn.GetConn(), cfg.keyring)
if err != nil {
conn.Close()
return nil, err
}
+ // Preflight: verify configured ChainID matches node's reported network
+ if nodeInfo, nerr := nodeModule.GetNodeInfo(ctx); nerr != nil {
+ conn.Close()
+ return nil, fmt.Errorf("failed to get node info for chain verification: %w", nerr)
+ } else if nodeInfo != nil && nodeInfo.DefaultNodeInfo != nil {
+ // Cosmos SDK exposes chain-id in DefaultNodeInfo.Network
+ if reported := nodeInfo.DefaultNodeInfo.Network; reported != "" && reported != cfg.ChainID {
+ conn.Close()
+ return nil, fmt.Errorf("chain ID mismatch: configured=%s node=%s", cfg.ChainID, reported)
+ }
+ }
+
actionMsgModule, err := action_msg.NewModule(
conn.GetConn(),
authModule, // For account info
@@ -77,6 +98,7 @@ func newClient(ctx context.Context, cfg *Config) (Client, error) {
authMod: authModule,
actionMod: actionModule,
actionMsgMod: actionMsgModule,
+ bankMod: bankModule,
supernodeMod: supernodeModule,
txMod: txModule,
nodeMod: nodeModule,
@@ -96,6 +118,10 @@ func (c *lumeraClient) ActionMsg() action_msg.Module {
return c.actionMsgMod
}
+func (c *lumeraClient) Bank() bank.Module {
+ return c.bankMod
+}
+
func (c *lumeraClient) SuperNode() supernode.Module {
return c.supernodeMod
}
diff --git a/pkg/lumera/connection.go b/pkg/lumera/connection.go
index ab28702c..8abdc0f5 100644
--- a/pkg/lumera/connection.go
+++ b/pkg/lumera/connection.go
@@ -14,8 +14,6 @@ import (
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/keepalive"
- "os"
-
"github.com/LumeraProtocol/supernode/v2/pkg/logtrace"
)
@@ -127,14 +125,11 @@ func newGRPCConnection(ctx context.Context, rawAddr string) (Connection, error)
if firstCand.useTLS {
scheme = "tls"
}
- logtrace.Info(ctx, "gRPC connection established", logtrace.Fields{
+ logtrace.Debug(ctx, "gRPC connection established", logtrace.Fields{
"target": firstCand.target,
"scheme": scheme,
})
- // Start a monitor to terminate the app if connection is lost
- go monitorConnection(ctx, firstConn)
-
return &grpcConnection{conn: firstConn}, nil
}
@@ -275,35 +270,6 @@ func createGRPCConnection(ctx context.Context, hostPort string, creds credential
}
}
-// monitorConnection watches the connection state and exits the process if the
-// connection transitions to Shutdown or remains in TransientFailure beyond a grace period.
-func monitorConnection(ctx context.Context, conn *grpc.ClientConn) {
- for {
- state := conn.GetState()
- switch state {
- case connectivity.Shutdown:
- logtrace.Error(ctx, "gRPC connection shutdown", logtrace.Fields{"action": "exit"})
- os.Exit(1)
- case connectivity.TransientFailure:
- // Allow some time to recover to Ready
- gctx, cancel := context.WithTimeout(ctx, reconnectionGracePeriod)
- for conn.GetState() == connectivity.TransientFailure {
- if !conn.WaitForStateChange(gctx, connectivity.TransientFailure) {
- cancel()
- logtrace.Error(ctx, "gRPC connection lost (transient failure)", logtrace.Fields{"grace": reconnectionGracePeriod.String(), "action": "exit"})
- os.Exit(1)
- }
- }
- cancel()
- default:
- // Idle/Connecting/Ready: just wait for state change
- if !conn.WaitForStateChange(ctx, state) {
- return
- }
- }
- }
-}
-
// Close closes the gRPC connection.
func (c *grpcConnection) Close() error {
if c.conn != nil {
diff --git a/pkg/lumera/interface.go b/pkg/lumera/interface.go
index eba47684..2fb25c13 100644
--- a/pkg/lumera/interface.go
+++ b/pkg/lumera/interface.go
@@ -7,6 +7,7 @@ import (
"github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action"
"github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action_msg"
"github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/auth"
+ "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/bank"
"github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/node"
"github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode"
"github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx"
@@ -18,6 +19,7 @@ type Client interface {
Action() action.Module
ActionMsg() action_msg.Module
SuperNode() supernode.Module
+ Bank() bank.Module
Tx() tx.Module
Node() node.Module
diff --git a/pkg/lumera/lumera_mock.go b/pkg/lumera/lumera_mock.go
index 25d30789..e19ddfdb 100644
--- a/pkg/lumera/lumera_mock.go
+++ b/pkg/lumera/lumera_mock.go
@@ -15,6 +15,7 @@ import (
action "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action"
action_msg "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action_msg"
auth "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/auth"
+ bank "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/bank"
node "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/node"
supernode "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode"
tx "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx"
@@ -87,6 +88,20 @@ func (mr *MockClientMockRecorder) Auth() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Auth", reflect.TypeOf((*MockClient)(nil).Auth))
}
+// Bank mocks base method.
+func (m *MockClient) Bank() bank.Module {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Bank")
+ ret0, _ := ret[0].(bank.Module)
+ return ret0
+}
+
+// Bank indicates an expected call of Bank.
+func (mr *MockClientMockRecorder) Bank() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bank", reflect.TypeOf((*MockClient)(nil).Bank))
+}
+
// Close mocks base method.
func (m *MockClient) Close() error {
m.ctrl.T.Helper()
diff --git a/pkg/lumera/modules/auth/impl.go b/pkg/lumera/modules/auth/impl.go
index 3597d7a9..a3ad3bca 100644
--- a/pkg/lumera/modules/auth/impl.go
+++ b/pkg/lumera/modules/auth/impl.go
@@ -46,7 +46,7 @@ func (m *module) Verify(ctx context.Context, accAddress string, data, signature
return fmt.Errorf("invalid address: %w", err)
}
- logtrace.Info(ctx, "Verifying signature", logtrace.Fields{"address": addr.String()})
+ logtrace.Debug(ctx, "Verifying signature", logtrace.Fields{"address": addr.String()})
// Use Account RPC instead of AccountInfo to get the full account with public key
accResp, err := m.client.Account(ctx, &authtypes.QueryAccountRequest{
@@ -66,7 +66,7 @@ func (m *module) Verify(ctx context.Context, accAddress string, data, signature
if pubKey == nil {
return fmt.Errorf("public key is nil")
}
- logtrace.Info(ctx, "Public key retrieved", logtrace.Fields{"pubKey": pubKey.String()})
+ logtrace.Debug(ctx, "Public key retrieved", logtrace.Fields{"pubKey": pubKey.String()})
if !pubKey.VerifySignature(data, signature) {
return fmt.Errorf("invalid signature")
}
diff --git a/pkg/lumera/modules/bank/impl.go b/pkg/lumera/modules/bank/impl.go
new file mode 100644
index 00000000..157eb97f
--- /dev/null
+++ b/pkg/lumera/modules/bank/impl.go
@@ -0,0 +1,30 @@
+package bank
+
+import (
+ "context"
+ "fmt"
+
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ "google.golang.org/grpc"
+)
+
+type module struct {
+ client banktypes.QueryClient
+}
+
+func newModule(conn *grpc.ClientConn) (Module, error) {
+ if conn == nil {
+ return nil, fmt.Errorf("connection cannot be nil")
+ }
+ return &module{client: banktypes.NewQueryClient(conn)}, nil
+}
+
+func (m *module) Balance(ctx context.Context, address string, denom string) (*banktypes.QueryBalanceResponse, error) {
+ if address == "" {
+ return nil, fmt.Errorf("address cannot be empty")
+ }
+ if denom == "" {
+ return nil, fmt.Errorf("denom cannot be empty")
+ }
+ return m.client.Balance(ctx, &banktypes.QueryBalanceRequest{Address: address, Denom: denom})
+}
diff --git a/pkg/lumera/modules/bank/interface.go b/pkg/lumera/modules/bank/interface.go
new file mode 100644
index 00000000..b88093cf
--- /dev/null
+++ b/pkg/lumera/modules/bank/interface.go
@@ -0,0 +1,18 @@
+//go:generate mockgen -destination=bank_mock.go -package=bank -source=interface.go
+package bank
+
+import (
+ "context"
+
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ "google.golang.org/grpc"
+)
+
+// Module provides access to Cosmos SDK bank queries.
+type Module interface {
+ // Balance returns the balance for a specific denom at an address.
+ Balance(ctx context.Context, address string, denom string) (*banktypes.QueryBalanceResponse, error)
+}
+
+// NewModule constructs a bank Module backed by the given gRPC connection.
+func NewModule(conn *grpc.ClientConn) (Module, error) { return newModule(conn) }
diff --git a/pkg/lumera/modules/tx/impl.go b/pkg/lumera/modules/tx/impl.go
index d342601b..6ac625ca 100644
--- a/pkg/lumera/modules/tx/impl.go
+++ b/pkg/lumera/modules/tx/impl.go
@@ -5,7 +5,6 @@ import (
"fmt"
"math"
"strconv"
- "time"
"github.com/LumeraProtocol/supernode/v2/pkg/logtrace"
lumeracodec "github.com/LumeraProtocol/supernode/v2/pkg/lumera/codec"
@@ -48,6 +47,18 @@ func newModule(conn *grpc.ClientConn) (Module, error) {
// SimulateTransaction simulates a transaction with given messages and returns gas used
func (m *module) SimulateTransaction(ctx context.Context, msgs []types.Msg, accountInfo *authtypes.BaseAccount, config *TxConfig) (*sdktx.SimulateResponse, error) {
+ if config == nil {
+ return nil, fmt.Errorf("tx config cannot be nil")
+ }
+ if accountInfo == nil {
+ return nil, fmt.Errorf("account info cannot be nil")
+ }
+ if config.Keyring == nil {
+ return nil, fmt.Errorf("keyring cannot be nil")
+ }
+ if config.KeyName == "" {
+ return nil, fmt.Errorf("key name cannot be empty")
+ }
// Create encoding config and client context
encCfg := lumeracodec.GetEncodingConfig()
clientCtx := client.Context{}.
@@ -103,12 +114,24 @@ func (m *module) SimulateTransaction(ctx context.Context, msgs []types.Msg, acco
return nil, fmt.Errorf("simulation error: %w", err)
}
- logtrace.Info(ctx, fmt.Sprintf("simulation complete | gasUsed=%d", simRes.GasInfo.GasUsed), nil)
+ logtrace.Debug(ctx, fmt.Sprintf("simulation complete | gasUsed=%d", simRes.GasInfo.GasUsed), nil)
return simRes, nil
}
// BuildAndSignTransaction builds and signs a transaction with the given parameters
func (m *module) BuildAndSignTransaction(ctx context.Context, msgs []types.Msg, accountInfo *authtypes.BaseAccount, gasLimit uint64, fee string, config *TxConfig) ([]byte, error) {
+ if config == nil {
+ return nil, fmt.Errorf("tx config cannot be nil")
+ }
+ if accountInfo == nil {
+ return nil, fmt.Errorf("account info cannot be nil")
+ }
+ if config.Keyring == nil {
+ return nil, fmt.Errorf("keyring cannot be nil")
+ }
+ if config.KeyName == "" {
+ return nil, fmt.Errorf("key name cannot be empty")
+ }
// Create encoding config
encCfg := lumeracodec.GetEncodingConfig()
@@ -116,10 +139,9 @@ func (m *module) BuildAndSignTransaction(ctx context.Context, msgs []types.Msg,
clientCtx := client.Context{}.
WithCodec(encCfg.Codec).
WithTxConfig(encCfg.TxConfig).
- WithKeyring(config.Keyring).
- WithBroadcastMode("sync")
+ WithKeyring(config.Keyring)
- // Create transaction factory
+ // Create transaction factory
factory := tx.Factory{}.
WithTxConfig(clientCtx.TxConfig).
WithKeybase(config.Keyring).
@@ -127,7 +149,6 @@ func (m *module) BuildAndSignTransaction(ctx context.Context, msgs []types.Msg,
WithSequence(accountInfo.Sequence).
WithChainID(config.ChainID).
WithGas(gasLimit).
- WithGasAdjustment(config.GasAdjustment).
WithSignMode(signingtypes.SignMode_SIGN_MODE_DIRECT).
WithFees(fee)
@@ -143,7 +164,7 @@ func (m *module) BuildAndSignTransaction(ctx context.Context, msgs []types.Msg,
return nil, fmt.Errorf("failed to sign transaction: %w", err)
}
- logtrace.Info(ctx, "transaction signed successfully", nil)
+ logtrace.Debug(ctx, "transaction signed successfully", nil)
// Encode signed transaction
txBytes, err := clientCtx.TxConfig.TxEncoder()(txBuilder.GetTx())
@@ -157,10 +178,7 @@ func (m *module) BuildAndSignTransaction(ctx context.Context, msgs []types.Msg,
// BroadcastTransaction broadcasts a signed transaction and returns the result
func (m *module) BroadcastTransaction(ctx context.Context, txBytes []byte) (*sdktx.BroadcastTxResponse, error) {
// Broadcast transaction
- req := &sdktx.BroadcastTxRequest{
- TxBytes: txBytes,
- Mode: sdktx.BroadcastMode_BROADCAST_MODE_SYNC,
- }
+ req := &sdktx.BroadcastTxRequest{TxBytes: txBytes, Mode: sdktx.BroadcastMode_BROADCAST_MODE_SYNC}
resp, err := m.client.BroadcastTx(ctx, req)
@@ -273,7 +291,7 @@ func (m *module) ProcessTransaction(ctx context.Context, msgs []types.Msg, accou
// Step 3: Calculate fee based on adjusted gas
fee := m.CalculateFee(gasToUse, config)
- logtrace.Info(ctx, fmt.Sprintf("using simulated gas and calculated fee | simulatedGas=%d adjustedGas=%d fee=%s", simulatedGasUsed, gasToUse, fee), nil)
+ logtrace.Debug(ctx, fmt.Sprintf("using simulated gas and calculated fee | simulatedGas=%d gasToUse=%d fee=%s", simulatedGasUsed, gasToUse, fee), nil)
// Step 4: Build and sign transaction
txBytes, err := m.BuildAndSignTransaction(ctx, msgs, accountInfo, gasToUse, fee, config)
@@ -281,38 +299,12 @@ func (m *module) ProcessTransaction(ctx context.Context, msgs []types.Msg, accou
return nil, fmt.Errorf("failed to build and sign transaction: %w", err)
}
- // Step 5: Broadcast transaction
+ // Step 5: Broadcast transaction (SYNC mode)
result, err := m.BroadcastTransaction(ctx, txBytes)
if err != nil {
return result, fmt.Errorf("failed to broadcast transaction: %w", err)
}
- if result != nil && result.TxResponse != nil && result.TxResponse.Code == 0 && len(result.TxResponse.Events) == 0 {
- logtrace.Info(ctx, "Transaction broadcast successful, waiting for inclusion to get events...", nil)
-
- // Retry 5 times with 1 second intervals
- var txResp *sdktx.GetTxResponse
- for i := 0; i < 5; i++ {
- time.Sleep(1 * time.Second)
-
- txResp, err = m.GetTransaction(ctx, result.TxResponse.TxHash)
- if err == nil && txResp != nil && txResp.TxResponse != nil {
- // Successfully got the transaction with events
- logtrace.Info(ctx, fmt.Sprintf("Retrieved transaction with %d events", len(txResp.TxResponse.Events)), nil)
- result.TxResponse = txResp.TxResponse
- break
- }
-
- if err != nil {
- logtrace.Warn(ctx, fmt.Sprintf("Attempt %d: failed to query transaction: %v", i+1, err), nil)
- }
- }
- }
-
- if len(result.TxResponse.Events) == 0 {
- logtrace.Error(ctx, "Failed to retrieve transaction events after 5 attempts", nil)
- }
-
return result, nil
}
diff --git a/pkg/net/grpc/client/client.go b/pkg/net/grpc/client/client.go
index dc4f45de..907c5b58 100644
--- a/pkg/net/grpc/client/client.go
+++ b/pkg/net/grpc/client/client.go
@@ -112,9 +112,9 @@ var defaultBackoffConfig = backoff.Config{
func DefaultClientOptions() *ClientOptions {
return &ClientOptions{
MaxRecvMsgSize: 100 * MB,
- MaxSendMsgSize: 100 * MB, // 100MB
- InitialWindowSize: (int32)(1 * MB), // 1MB - controls initial frame size for streams
- InitialConnWindowSize: (int32)(1 * MB), // 1MB - controls initial frame size for connection
+ MaxSendMsgSize: 100 * MB, // 100MB
+ InitialWindowSize: (int32)(32 * MB), // 32MB - controls initial frame size for streams
+ InitialConnWindowSize: (int32)(128 * MB), // 128MB - controls initial frame size for connection
ConnWaitTime: defaultConnWaitTime,
KeepAliveTime: 30 * time.Minute,
diff --git a/pkg/net/grpc/client/client_test.go b/pkg/net/grpc/client/client_test.go
index 12b196fa..0b4a10e1 100644
--- a/pkg/net/grpc/client/client_test.go
+++ b/pkg/net/grpc/client/client_test.go
@@ -86,8 +86,8 @@ func TestDefaultClientOptions(t *testing.T) {
assert.NotNil(t, opts, "ClientOptions should be initialized")
assert.Equal(t, 100*MB, opts.MaxRecvMsgSize, "MaxRecvMsgSize should be 100 MB")
assert.Equal(t, 100*MB, opts.MaxSendMsgSize, "MaxSendMsgSize should be 100 MB")
- assert.Equal(t, int32(1*MB), opts.InitialWindowSize, "InitialWindowSize should be 1 MB")
- assert.Equal(t, int32(1*MB), opts.InitialConnWindowSize, "InitialConnWindowSize should be 1 MB")
+ assert.Equal(t, int32(32*MB), opts.InitialWindowSize, "InitialWindowSize should be 32 MB")
+ assert.Equal(t, int32(128*MB), opts.InitialConnWindowSize, "InitialConnWindowSize should be 128 MB")
assert.Equal(t, defaultConnWaitTime, opts.ConnWaitTime, "ConnWaitTime should be 10 seconds")
assert.True(t, opts.EnableRetries, "EnableRetries should be true")
assert.Equal(t, maxRetries, opts.MaxRetries, "MaxRetries should be 5")
diff --git a/pkg/net/grpc/server/server.go b/pkg/net/grpc/server/server.go
index 10a2452c..ae1a3524 100644
--- a/pkg/net/grpc/server/server.go
+++ b/pkg/net/grpc/server/server.go
@@ -94,7 +94,7 @@ func DefaultServerOptions() *ServerOptions {
return &ServerOptions{
MaxRecvMsgSize: 100 * MB,
MaxSendMsgSize: 100 * MB,
- InitialWindowSize: (int32)(1 * MB),
+ InitialWindowSize: (int32)(32 * MB),
InitialConnWindowSize: (int32)(1 * MB),
MaxConcurrentStreams: 1000,
GracefulShutdownTime: defaultGracefulShutdownTimeout,
@@ -102,13 +102,13 @@ func DefaultServerOptions() *ServerOptions {
MaxConnectionIdle: 2 * time.Hour,
MaxConnectionAge: 2 * time.Hour,
MaxConnectionAgeGrace: 1 * time.Hour,
- Time: 1 * time.Hour,
+ Time: 30 * time.Minute,
Timeout: 30 * time.Minute,
MinTime: 5 * time.Minute,
PermitWithoutStream: true,
- WriteBufferSize: 32 * KB,
- ReadBufferSize: 32 * KB,
+ WriteBufferSize: 512 * KB,
+ ReadBufferSize: 512 * KB,
}
}
@@ -203,7 +203,7 @@ func (s *Server) createListener(ctx context.Context, address string) (net.Listen
if err != nil {
return nil, errors.Errorf("failed to create listener: %w", err).WithField("address", address)
}
- logtrace.Info(ctx, "gRPC server listening", logtrace.Fields{"address": address})
+ logtrace.Debug(ctx, "gRPC server listening", logtrace.Fields{"address": address})
return lis, nil
}
@@ -256,7 +256,7 @@ func (s *Server) Serve(ctx context.Context, address string, opts *ServerOptions)
// Wait for context cancellation or error
select {
case <-ctx.Done():
- logtrace.Info(ctx, "Shutting down gRPC server", logtrace.Fields{"address": address})
+ logtrace.Debug(ctx, "Shutting down gRPC server", logtrace.Fields{"address": address})
return s.Stop(opts.GracefulShutdownTime)
case err := <-serveErr:
return err
diff --git a/pkg/net/grpc/server/server_test.go b/pkg/net/grpc/server/server_test.go
index 4c6067e0..f2308436 100644
--- a/pkg/net/grpc/server/server_test.go
+++ b/pkg/net/grpc/server/server_test.go
@@ -55,18 +55,18 @@ func TestDefaultServerOptions(t *testing.T) {
assert.NotNil(t, opts, "Server options should be initialized")
assert.Equal(t, 100*MB, opts.MaxRecvMsgSize, "MaxRecvMsgSize should be 100 MB")
assert.Equal(t, 100*MB, opts.MaxSendMsgSize, "MaxSendMsgSize should be 100 MB")
- assert.Equal(t, int32(1*MB), opts.InitialWindowSize, "InitialWindowSize should be 1 MB")
+ assert.Equal(t, int32(32*MB), opts.InitialWindowSize, "InitialWindowSize should be 32 MB")
assert.Equal(t, int32(1*MB), opts.InitialConnWindowSize, "InitialConnWindowSize should be 1 MB")
assert.Equal(t, uint32(1000), opts.MaxConcurrentStreams, "MaxConcurrentStreams should be 1000")
assert.Equal(t, defaultGracefulShutdownTimeout, opts.GracefulShutdownTime,
fmt.Sprintf("GracefulShutdownTimeout should be %v", defaultGracefulShutdownTimeout))
assert.Equal(t, uint32(0), opts.NumServerWorkers, "NumServerWorkers should be 0")
- assert.Equal(t, 32*KB, opts.WriteBufferSize, "WriteBufferSize should be 32 KB")
- assert.Equal(t, 32*KB, opts.ReadBufferSize, "ReadBufferSize should be 32 KB")
+ assert.Equal(t, 512*KB, opts.WriteBufferSize, "WriteBufferSize should be 512 KB")
+ assert.Equal(t, 512*KB, opts.ReadBufferSize, "ReadBufferSize should be 512 KB")
assert.Equal(t, 2*time.Hour, opts.MaxConnectionIdle, "MaxConnectionIdle should be 2 hours")
assert.Equal(t, 2*time.Hour, opts.MaxConnectionAge, "MaxConnectionAge should be 2 hours")
assert.Equal(t, 1*time.Hour, opts.MaxConnectionAgeGrace, "MaxConnectionAgeGrace should be 1 hour")
- assert.Equal(t, 1*time.Hour, opts.Time, "Time should be 1 hour")
+ assert.Equal(t, 30*time.Minute, opts.Time, "Time should be 30 minutes")
assert.Equal(t, 30*time.Minute, opts.Timeout, "Timeout should be 30 minutes")
assert.Equal(t, 5*time.Minute, opts.MinTime, "MinTime should be 5 minutes")
assert.True(t, opts.PermitWithoutStream, "PermitWithoutStream should be true")
diff --git a/pkg/net/interceptor.go b/pkg/net/interceptor.go
index f29d88a1..b33aadcf 100644
--- a/pkg/net/interceptor.go
+++ b/pkg/net/interceptor.go
@@ -34,7 +34,7 @@ func UnaryServerInterceptor() grpc.UnaryServerInterceptor {
logtrace.FieldMethod: info.FullMethod,
logtrace.FieldCorrelationID: correlationID,
}
- logtrace.Info(ctx, "received gRPC request", fields)
+ logtrace.Debug(ctx, "received gRPC request", fields)
resp, err := handler(ctx, req)
@@ -42,7 +42,7 @@ func UnaryServerInterceptor() grpc.UnaryServerInterceptor {
fields[logtrace.FieldError] = err.Error()
logtrace.Error(ctx, "gRPC request failed", fields)
} else {
- logtrace.Info(ctx, "gRPC request processed successfully", fields)
+ logtrace.Debug(ctx, "gRPC request processed successfully", fields)
}
return resp, err
diff --git a/pkg/p2pmetrics/metrics.go b/pkg/p2pmetrics/metrics.go
deleted file mode 100644
index 945225db..00000000
--- a/pkg/p2pmetrics/metrics.go
+++ /dev/null
@@ -1,325 +0,0 @@
-package p2pmetrics
-
-import (
- "context"
- "sync"
-)
-
-// Call represents a single per-node RPC outcome (store or retrieve).
-type Call struct {
- IP string `json:"ip"`
- Address string `json:"address"`
- Keys int `json:"keys"`
- Success bool `json:"success"`
- Error string `json:"error,omitempty"`
- DurationMS int64 `json:"duration_ms"`
- Noop bool `json:"noop,omitempty"`
-}
-
-// -------- Lightweight hooks -------------------------
-
-var (
- storeMu sync.RWMutex
- storeHook = make(map[string]func(Call))
-
- retrieveMu sync.RWMutex
- retrieveHook = make(map[string]func(Call))
-
- foundLocalMu sync.RWMutex
- foundLocalCb = make(map[string]func(int))
-)
-
-// RegisterStoreHook registers a callback to receive store RPC calls for a task.
-func RegisterStoreHook(taskID string, fn func(Call)) {
- storeMu.Lock()
- defer storeMu.Unlock()
- if fn == nil {
- delete(storeHook, taskID)
- return
- }
- storeHook[taskID] = fn
-}
-
-// UnregisterStoreHook removes the registered store callback for a task.
-func UnregisterStoreHook(taskID string) { RegisterStoreHook(taskID, nil) }
-
-// RecordStore invokes the registered store callback for the given task, if any.
-func RecordStore(taskID string, c Call) {
- storeMu.RLock()
- fn := storeHook[taskID]
- storeMu.RUnlock()
- if fn != nil {
- fn(c)
- }
-}
-
-// RegisterRetrieveHook registers a callback to receive retrieve RPC calls.
-func RegisterRetrieveHook(taskID string, fn func(Call)) {
- retrieveMu.Lock()
- defer retrieveMu.Unlock()
- if fn == nil {
- delete(retrieveHook, taskID)
- return
- }
- retrieveHook[taskID] = fn
-}
-
-// UnregisterRetrieveHook removes the registered retrieve callback for a task.
-func UnregisterRetrieveHook(taskID string) { RegisterRetrieveHook(taskID, nil) }
-
-// RecordRetrieve invokes the registered retrieve callback for the given task.
-func RecordRetrieve(taskID string, c Call) {
- retrieveMu.RLock()
- fn := retrieveHook[taskID]
- retrieveMu.RUnlock()
- if fn != nil {
- fn(c)
- }
-}
-
-// RegisterFoundLocalHook registers a callback to receive found-local counts.
-func RegisterFoundLocalHook(taskID string, fn func(int)) {
- foundLocalMu.Lock()
- defer foundLocalMu.Unlock()
- if fn == nil {
- delete(foundLocalCb, taskID)
- return
- }
- foundLocalCb[taskID] = fn
-}
-
-// UnregisterFoundLocalHook removes the registered found-local callback.
-func UnregisterFoundLocalHook(taskID string) { RegisterFoundLocalHook(taskID, nil) }
-
-// ReportFoundLocal invokes the registered found-local callback for the task.
-func ReportFoundLocal(taskID string, count int) {
- foundLocalMu.RLock()
- fn := foundLocalCb[taskID]
- foundLocalMu.RUnlock()
- if fn != nil {
- fn(count)
- }
-}
-
-// -------- Minimal in-process collectors for events --------------------------
-
-// Store session
-type storeSession struct {
- CallsByIP map[string][]Call
- SymbolsFirstPass int
- SymbolsTotal int
- IDFilesCount int
- DurationMS int64
-}
-
-var storeSessions = struct{ m map[string]*storeSession }{m: map[string]*storeSession{}}
-
-// RegisterStoreBridge hooks store callbacks into the store session collector.
-func StartStoreCapture(taskID string) {
- RegisterStoreHook(taskID, func(c Call) {
- s := storeSessions.m[taskID]
- if s == nil {
- s = &storeSession{CallsByIP: map[string][]Call{}}
- storeSessions.m[taskID] = s
- }
- key := c.IP
- if key == "" {
- key = c.Address
- }
- s.CallsByIP[key] = append(s.CallsByIP[key], c)
- })
-}
-
-func StopStoreCapture(taskID string) { UnregisterStoreHook(taskID) }
-
-// SetStoreSummary sets store summary fields for the first pass and totals.
-//
-// - symbolsFirstPass: number of symbols sent during the first pass
-// - symbolsTotal: total symbols available in the directory
-// - idFilesCount: number of ID/metadata files included in the first combined batch
-// - durationMS: elapsed time of the first-pass store phase
-func SetStoreSummary(taskID string, symbolsFirstPass, symbolsTotal, idFilesCount int, durationMS int64) {
- if taskID == "" {
- return
- }
- s := storeSessions.m[taskID]
- if s == nil {
- s = &storeSession{CallsByIP: map[string][]Call{}}
- storeSessions.m[taskID] = s
- }
- s.SymbolsFirstPass = symbolsFirstPass
- s.SymbolsTotal = symbolsTotal
- s.IDFilesCount = idFilesCount
- s.DurationMS = durationMS
-}
-
-// BuildStoreEventPayloadFromCollector builds the store event payload (minimal).
-func BuildStoreEventPayloadFromCollector(taskID string) map[string]any {
- s := storeSessions.m[taskID]
- if s == nil {
- return map[string]any{
- "store": map[string]any{
- "duration_ms": int64(0),
- "symbols_first_pass": 0,
- "symbols_total": 0,
- "id_files_count": 0,
- "success_rate_pct": float64(0),
- "calls_by_ip": map[string][]Call{},
- },
- }
- }
- // Compute per-call success rate across first-pass store RPC attempts
- totalCalls := 0
- successCalls := 0
- for _, calls := range s.CallsByIP {
- for _, c := range calls {
- totalCalls++
- if c.Success {
- successCalls++
- }
- }
- }
- var successRate float64
- if totalCalls > 0 {
- successRate = float64(successCalls) / float64(totalCalls) * 100.0
- }
- return map[string]any{
- "store": map[string]any{
- "duration_ms": s.DurationMS,
- "symbols_first_pass": s.SymbolsFirstPass,
- "symbols_total": s.SymbolsTotal,
- "id_files_count": s.IDFilesCount,
- "success_rate_pct": successRate,
- "calls_by_ip": s.CallsByIP,
- },
- }
-}
-
-// Retrieve session
-type retrieveSession struct {
- CallsByIP map[string][]Call
- FoundLocal int
- FoundNet int
- Keys int
- Required int
- RetrieveMS int64
- DecodeMS int64
-}
-
-var retrieveSessions = struct{ m map[string]*retrieveSession }{m: map[string]*retrieveSession{}}
-
-// RegisterRetrieveBridge hooks retrieve callbacks into the retrieve collector.
-func StartRetrieveCapture(taskID string) {
- RegisterRetrieveHook(taskID, func(c Call) {
- s := retrieveSessions.m[taskID]
- if s == nil {
- s = &retrieveSession{CallsByIP: map[string][]Call{}}
- retrieveSessions.m[taskID] = s
- }
- key := c.IP
- if key == "" {
- key = c.Address
- }
- s.CallsByIP[key] = append(s.CallsByIP[key], c)
- })
- RegisterFoundLocalHook(taskID, func(n int) {
- s := retrieveSessions.m[taskID]
- if s == nil {
- s = &retrieveSession{CallsByIP: map[string][]Call{}}
- retrieveSessions.m[taskID] = s
- }
- s.FoundLocal = n
- })
-}
-
-func StopRetrieveCapture(taskID string) {
- UnregisterRetrieveHook(taskID)
- UnregisterFoundLocalHook(taskID)
-}
-
-// SetRetrieveBatchSummary sets counts for a retrieval attempt.
-func SetRetrieveBatchSummary(taskID string, keys, required, foundLocal, foundNet int, retrieveMS int64) {
- if taskID == "" {
- return
- }
- s := retrieveSessions.m[taskID]
- if s == nil {
- s = &retrieveSession{CallsByIP: map[string][]Call{}}
- retrieveSessions.m[taskID] = s
- }
- s.Keys = keys
- s.Required = required
- s.FoundLocal = foundLocal
- s.FoundNet = foundNet
- s.RetrieveMS = retrieveMS
-}
-
-// SetRetrieveSummary sets timing info for retrieve/decode phases.
-func SetRetrieveSummary(taskID string, retrieveMS, decodeMS int64) {
- if taskID == "" {
- return
- }
- s := retrieveSessions.m[taskID]
- if s == nil {
- s = &retrieveSession{CallsByIP: map[string][]Call{}}
- retrieveSessions.m[taskID] = s
- }
- s.RetrieveMS = retrieveMS
- s.DecodeMS = decodeMS
-}
-
-// BuildDownloadEventPayloadFromCollector builds the download section payload.
-func BuildDownloadEventPayloadFromCollector(taskID string) map[string]any {
- s := retrieveSessions.m[taskID]
- if s == nil {
- return map[string]any{
- "retrieve": map[string]any{
- "keys": 0,
- "required": 0,
- "found_local": 0,
- "found_net": 0,
- "retrieve_ms": int64(0),
- "decode_ms": int64(0),
- "calls_by_ip": map[string][]Call{},
- },
- }
- }
- return map[string]any{
- "retrieve": map[string]any{
- "keys": s.Keys,
- "required": s.Required,
- "found_local": s.FoundLocal,
- "found_net": s.FoundNet,
- "retrieve_ms": s.RetrieveMS,
- "decode_ms": s.DecodeMS,
- "calls_by_ip": s.CallsByIP,
- },
- }
-}
-
-// -------- Context helpers (dedicated to metrics tagging) --------------------
-
-type ctxKey string
-
-var taskIDKey ctxKey = "p2pmetrics-task-id"
-
-// WithTaskID returns a child context with the metrics task ID set.
-func WithTaskID(ctx context.Context, taskID string) context.Context {
- if ctx == nil {
- return context.Background()
- }
- return context.WithValue(ctx, taskIDKey, taskID)
-}
-
-// TaskIDFromContext extracts the metrics task ID from context (or "").
-func TaskIDFromContext(ctx context.Context) string {
- if ctx == nil {
- return ""
- }
- if v := ctx.Value(taskIDKey); v != nil {
- if s, ok := v.(string); ok {
- return s
- }
- }
- return ""
-}
diff --git a/pkg/storage/queries/health_check.go b/pkg/storage/queries/health_check.go
index e76799da..96802dd8 100644
--- a/pkg/storage/queries/health_check.go
+++ b/pkg/storage/queries/health_check.go
@@ -98,10 +98,10 @@ func (s *SQLiteStore) GetHCSummaryStats(from time.Time) (hcMetrics metrics.HCMet
if err != nil {
return hcMetrics, err
}
- logtrace.Info(context.Background(), "observer evaluations retrieved", logtrace.Fields{"observer_evaluations": len(hcObserversEvaluations), "from": from})
+ logtrace.Debug(context.Background(), "observer evaluations retrieved", logtrace.Fields{"observer_evaluations": len(hcObserversEvaluations), "from": from})
observerEvaluationMetrics := processHCObserverEvaluations(hcObserversEvaluations)
- logtrace.Info(context.Background(), "observer evaluation metrics retrieved", logtrace.Fields{"observer_evaluation_metrics": len(observerEvaluationMetrics), "from": from})
+ logtrace.Debug(context.Background(), "observer evaluation metrics retrieved", logtrace.Fields{"observer_evaluation_metrics": len(observerEvaluationMetrics), "from": from})
for _, obMetrics := range observerEvaluationMetrics {
if obMetrics.ChallengesVerified >= 3 {
@@ -154,7 +154,7 @@ func (s *SQLiteStore) GetMetricsDataByHealthCheckChallengeID(ctx context.Context
if err != nil {
return healthCheckChallengeMessages, err
}
- logtrace.Info(ctx, "health-check-challenge metrics row count", logtrace.Fields{"rows": len(hcMetrics), "challenge_id": challengeID})
+ logtrace.Debug(ctx, "health-check-challenge metrics row count", logtrace.Fields{"rows": len(hcMetrics), "challenge_id": challengeID})
for _, hcMetric := range hcMetrics {
msg := types.HealthCheckMessageData{}
diff --git a/pkg/storage/queries/self_healing.go b/pkg/storage/queries/self_healing.go
index 47145a0b..61e7c63c 100644
--- a/pkg/storage/queries/self_healing.go
+++ b/pkg/storage/queries/self_healing.go
@@ -257,7 +257,7 @@ func (s *SQLiteStore) GetSHExecutionMetrics(ctx context.Context, from time.Time)
if err != nil {
return m, err
}
- logtrace.Info(ctx, "self-healing execution metrics row count", logtrace.Fields{"rows": len(rows)})
+ logtrace.Debug(ctx, "self-healing execution metrics row count", logtrace.Fields{"rows": len(rows)})
challenges := make(map[string]SHChallengeMetric)
for _, row := range rows {
@@ -361,11 +361,11 @@ func (s *SQLiteStore) GetSHExecutionMetrics(ctx context.Context, from time.Time)
}
}
- logtrace.Info(ctx, "self-healing execution metrics challenges count", logtrace.Fields{"challenges": len(challenges)})
+ logtrace.Debug(ctx, "self-healing execution metrics challenges count", logtrace.Fields{"challenges": len(challenges)})
for _, challenge := range challenges {
- logtrace.Info(ctx, "self-healing challenge metric", logtrace.Fields{
+ logtrace.Debug(ctx, "self-healing challenge metric", logtrace.Fields{
"challenge-id": challenge.ChallengeID,
"is-accepted": challenge.IsAccepted,
"is-verified": challenge.IsVerified,
@@ -475,7 +475,7 @@ func (s *SQLiteStore) GetLastNSHChallenges(ctx context.Context, n int) (types.Se
if err != nil {
return challenges, err
}
- logtrace.Info(ctx, "self-healing execution metrics row count", logtrace.Fields{"rows": len(rows)})
+ logtrace.Debug(ctx, "self-healing execution metrics row count", logtrace.Fields{"rows": len(rows)})
challengesInserted := 0
for _, row := range rows {
@@ -507,7 +507,7 @@ func (s *SQLiteStore) GetSHChallengeReport(ctx context.Context, challengeID stri
if err != nil {
return challenges, err
}
- logtrace.Info(ctx, "self-healing execution metrics row count", logtrace.Fields{"rows": len(rows)})
+ logtrace.Debug(ctx, "self-healing execution metrics row count", logtrace.Fields{"rows": len(rows)})
for _, row := range rows {
if row.ChallengeID == challengeID {
diff --git a/pkg/storage/queries/storage_challenge.go b/pkg/storage/queries/storage_challenge.go
index 574e7f4f..164ed2be 100644
--- a/pkg/storage/queries/storage_challenge.go
+++ b/pkg/storage/queries/storage_challenge.go
@@ -97,7 +97,7 @@ func (s *SQLiteStore) GetMetricsDataByStorageChallengeID(ctx context.Context, ch
return storageChallengeMessages, err
}
// log.WithContext(ctx).WithField("rows", len(scMetrics)).Info("storage-challenge metrics row count")
- logtrace.Info(ctx, "storage-challenge metrics row count", logtrace.Fields{
+ logtrace.Debug(ctx, "storage-challenge metrics row count", logtrace.Fields{
"rows": len(scMetrics),
})
@@ -210,13 +210,13 @@ func (s *SQLiteStore) GetSCSummaryStats(from time.Time) (scMetrics metrics.SCMet
return scMetrics, err
}
// log.WithField("observer_evaluations", len(observersEvaluations)).Info("observer evaluations retrieved")
- logtrace.Info(context.Background(), "observer evaluations retrieved", logtrace.Fields{
+ logtrace.Debug(context.Background(), "observer evaluations retrieved", logtrace.Fields{
"observer_evaluations": len(observersEvaluations),
})
observerEvaluationMetrics := processObserverEvaluations(observersEvaluations)
// log.WithField("observer_evaluation_metrics", len(observerEvaluationMetrics)).Info("observer evaluation metrics retrieved")
- logtrace.Info(context.Background(), "observer evaluation metrics retrieved", logtrace.Fields{
+ logtrace.Debug(context.Background(), "observer evaluation metrics retrieved", logtrace.Fields{
"observer_evaluation_metrics": len(observerEvaluationMetrics),
})
diff --git a/pkg/storage/queries/task_history.go b/pkg/storage/queries/task_history.go
index 73a55ef8..29539a49 100644
--- a/pkg/storage/queries/task_history.go
+++ b/pkg/storage/queries/task_history.go
@@ -59,7 +59,7 @@ func (s *SQLiteStore) QueryTaskHistory(taskID string) (history []types.TaskHisto
err = json.Unmarshal([]byte(details), &i.Details)
if err != nil {
- logtrace.Info(context.Background(), "Detals", logtrace.Fields{"details": details})
+ logtrace.Debug(context.Background(), "Detals", logtrace.Fields{"details": details})
logtrace.Error(context.Background(), fmt.Sprintf("cannot unmarshal task history details: %s", details), logtrace.Fields{"error": err})
i.Details = nil
}
diff --git a/pkg/testutil/lumera.go b/pkg/testutil/lumera.go
index 3f556a97..a4d09814 100644
--- a/pkg/testutil/lumera.go
+++ b/pkg/testutil/lumera.go
@@ -9,15 +9,18 @@ import (
"github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action"
"github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action_msg"
"github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/auth"
+ bankmod "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/bank"
"github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/node"
"github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode"
"github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx"
+ sdkmath "cosmossdk.io/math"
cmtservice "github.com/cosmos/cosmos-sdk/client/grpc/cmtservice"
"github.com/cosmos/cosmos-sdk/crypto/keyring"
sdktypes "github.com/cosmos/cosmos-sdk/types"
sdktx "github.com/cosmos/cosmos-sdk/types/tx"
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
)
// MockLumeraClient implements the lumera.Client interface for testing purposes
@@ -25,6 +28,7 @@ type MockLumeraClient struct {
authMod *MockAuthModule
actionMod *MockActionModule
actionMsgMod *MockActionMsgModule
+ bankMod *MockBankModule
supernodeMod *MockSupernodeModule
txMod *MockTxModule
nodeMod *MockNodeModule
@@ -36,6 +40,7 @@ type MockLumeraClient struct {
func NewMockLumeraClient(kr keyring.Keyring, addresses []string) (lumera.Client, error) {
actionMod := &MockActionModule{}
actionMsgMod := &MockActionMsgModule{}
+ bankMod := &MockBankModule{}
supernodeMod := &MockSupernodeModule{addresses: addresses}
txMod := &MockTxModule{}
nodeMod := &MockNodeModule{}
@@ -44,6 +49,7 @@ func NewMockLumeraClient(kr keyring.Keyring, addresses []string) (lumera.Client,
authMod: &MockAuthModule{},
actionMod: actionMod,
actionMsgMod: actionMsgMod,
+ bankMod: bankMod,
supernodeMod: supernodeMod,
txMod: txMod,
nodeMod: nodeMod,
@@ -67,6 +73,11 @@ func (c *MockLumeraClient) ActionMsg() action_msg.Module {
return c.actionMsgMod
}
+// Bank returns the Bank module client
+func (c *MockLumeraClient) Bank() bankmod.Module {
+ return c.bankMod
+}
+
// SuperNode returns the SuperNode module client
func (c *MockLumeraClient) SuperNode() supernode.Module {
return c.supernodeMod
@@ -87,6 +98,15 @@ func (c *MockLumeraClient) Close() error {
return nil
}
+// MockBankModule implements the bank.Module interface for testing
+type MockBankModule struct{}
+
+// Balance returns a positive balance for any address/denom to pass checks by default
+func (m *MockBankModule) Balance(ctx context.Context, address string, denom string) (*banktypes.QueryBalanceResponse, error) {
+ // Return >= 1 LUME in micro units to satisfy threshold checks
+ return &banktypes.QueryBalanceResponse{Balance: &sdktypes.Coin{Denom: denom, Amount: sdkmath.NewInt(1_000_000)}}, nil
+}
+
// MockAuthModule implements the auth.Module interface for testing
type MockAuthModule struct{}
@@ -124,8 +144,8 @@ type MockActionMsgModule struct{}
// RequestAction mocks the behavior of requesting an action.
func (m *MockActionMsgModule) RequestAction(ctx context.Context, actionType, metadata, price, expirationTime string) (*sdktx.BroadcastTxResponse, error) {
- // Mock implementation returns success with empty result
- return &sdktx.BroadcastTxResponse{}, nil
+ // Mock implementation returns success with empty result
+ return &sdktx.BroadcastTxResponse{}, nil
}
// FinalizeCascadeAction implements the required method from action_msg.Module interface
diff --git a/profile_cascade.sh b/profile_cascade.sh
new file mode 100755
index 00000000..7fe0af5e
--- /dev/null
+++ b/profile_cascade.sh
@@ -0,0 +1,98 @@
+#!/bin/bash
+
+# Cascade Download Heap Profiling Script
+# Samples heap every 30 seconds during cascade downloads
+
+# Configuration - modify these as needed
+PROFILE_URL="http://localhost:6062/debug/pprof/heap"
+INTERVAL=30
+TIMESTAMP=$(date +%Y%m%d_%H%M%S)
+PROFILE_DIR="profiles_${TIMESTAMP}"
+
+# Allow override via command line
+if [ "$1" != "" ]; then
+ PROFILE_URL="$1"
+fi
+
+echo "=== Cascade Heap Profiling ==="
+echo "Profile URL: $PROFILE_URL"
+echo "Interval: ${INTERVAL}s"
+echo "Output Dir: $PROFILE_DIR"
+echo
+
+# Create profile directory
+mkdir -p "$PROFILE_DIR"
+cd "$PROFILE_DIR"
+
+# Test connection first
+echo "Testing connection to profiling server..."
+if ! curl -s --fail "$PROFILE_URL" > /dev/null; then
+ echo "ERROR: Cannot connect to profiling server at $PROFILE_URL"
+ echo "Make sure your supernode is running on testnet!"
+ exit 1
+fi
+
+echo "✓ Connected to profiling server"
+echo
+
+# Take baseline
+echo "Taking baseline heap snapshot..."
+curl -s -o "heap_00s.prof" "$PROFILE_URL"
+echo "✓ Baseline saved: heap_00s.prof"
+echo
+
+echo "*** NOW START YOUR CASCADE DOWNLOAD ***"
+echo "Press ENTER when download has started..."
+read
+
+echo "Starting heap profiling every ${INTERVAL}s..."
+echo "Press Ctrl+C to stop"
+echo
+
+# Counter for snapshots
+counter=1
+
+# Function to handle cleanup on exit
+cleanup() {
+ echo
+ echo "Profiling stopped. Taking final snapshot..."
+ final_elapsed=$((counter * INTERVAL))
+ curl -s -o "heap_${final_elapsed}s_final.prof" "$PROFILE_URL"
+
+ echo
+ echo "=== Profiling Complete ==="
+ echo "Location: $(pwd)"
+ echo "Files created:"
+ ls -la *.prof
+ echo
+ echo "Analysis commands:"
+ echo "# Compare baseline to final:"
+ echo "go tool pprof -http=:8080 -base heap_00s.prof heap_${final_elapsed}s_final.prof"
+ exit 0
+}
+
+# Set up signal handler
+trap cleanup INT TERM
+
+# Main profiling loop
+while true; do
+ sleep $INTERVAL
+
+ elapsed=$((counter * INTERVAL))
+ minutes=$((elapsed / 60))
+ seconds=$((elapsed % 60))
+
+ timestamp=$(date +%H:%M:%S)
+ filename="heap_${elapsed}s.prof"
+
+ echo "[$timestamp] Taking snapshot $counter (${minutes}m ${seconds}s elapsed)..."
+
+ if curl -s -o "$filename" "$PROFILE_URL"; then
+ size=$(ls -lh "$filename" | awk '{print $5}')
+ echo "✓ Saved: $filename ($size)"
+ else
+ echo "✗ Failed to get snapshot $counter"
+ fi
+
+ ((counter++))
+done
\ No newline at end of file
diff --git a/proto/proto.go b/proto/proto.go
deleted file mode 100644
index 34045007..00000000
--- a/proto/proto.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package proto
-
-const (
- // MetadataKeySessID is unique numeric for every registration process, encompasses for all connections.
- MetadataKeySessID = "sessID"
-)
diff --git a/proto/supernode/service.proto b/proto/supernode/service.proto
new file mode 100644
index 00000000..9725f84a
--- /dev/null
+++ b/proto/supernode/service.proto
@@ -0,0 +1,34 @@
+syntax = "proto3";
+package supernode;
+option go_package = "github.com/LumeraProtocol/supernode/v2/gen/supernode";
+
+import "supernode/status.proto";
+import "google/api/annotations.proto";
+
+// SupernodeService provides status information for all services
+service SupernodeService {
+ rpc GetStatus(StatusRequest) returns (StatusResponse) {
+ option (google.api.http) = {
+ get: "/api/v1/status"
+ };
+ }
+
+ rpc ListServices(ListServicesRequest) returns (ListServicesResponse) {
+ option (google.api.http) = {
+ get: "/api/v1/services"
+ };
+ }
+}
+
+message ListServicesRequest {}
+
+message ListServicesResponse {
+ repeated ServiceInfo services = 1;
+ int32 count = 2;
+}
+
+message ServiceInfo {
+ string name = 1;
+ repeated string methods = 2;
+}
+
diff --git a/proto/supernode/supernode.proto b/proto/supernode/status.proto
similarity index 71%
rename from proto/supernode/supernode.proto
rename to proto/supernode/status.proto
index 50597e90..7cafe908 100644
--- a/proto/supernode/supernode.proto
+++ b/proto/supernode/status.proto
@@ -2,41 +2,13 @@ syntax = "proto3";
package supernode;
option go_package = "github.com/LumeraProtocol/supernode/v2/gen/supernode";
-import "google/api/annotations.proto";
-
-// SupernodeService provides status information for all services
-service SupernodeService {
- rpc GetStatus(StatusRequest) returns (StatusResponse) {
- option (google.api.http) = {
- get: "/api/v1/status"
- };
- }
-
- rpc ListServices(ListServicesRequest) returns (ListServicesResponse) {
- option (google.api.http) = {
- get: "/api/v1/services"
- };
- }
-}
-
+// StatusRequest controls optional metrics in the status response
message StatusRequest {
// Optional: include detailed P2P metrics in the response
// Maps to query param via grpc-gateway: /api/v1/status?include_p2p_metrics=true
bool include_p2p_metrics = 1;
}
-message ListServicesRequest {}
-
-message ListServicesResponse {
- repeated ServiceInfo services = 1;
- int32 count = 2;
-}
-
-message ServiceInfo {
- string name = 1;
- repeated string methods = 2;
-}
-
// The StatusResponse represents system status with clear organization
message StatusResponse {
string version = 1; // Supernode version
@@ -46,7 +18,7 @@ message StatusResponse {
message Resources {
message CPU {
double usage_percent = 1; // CPU usage percentage (0-100)
- int32 cores = 2; // Number of CPU cores
+ int32 cores = 2; // Number of CPU cores
}
message Memory {
@@ -154,39 +126,8 @@ message StatusResponse {
repeated BanEntry ban_list = 4;
DatabaseStats database = 5;
DiskStatus disk = 6;
-
- // Last handled BatchStoreData requests (most recent first)
- message RecentBatchStoreEntry {
- int64 time_unix = 1;
- string sender_id = 2;
- string sender_ip = 3;
- int32 keys = 4;
- int64 duration_ms = 5;
- bool ok = 6;
- string error = 7;
- }
-
- // Last handled BatchGetValues requests (most recent first)
- message RecentBatchRetrieveEntry {
- int64 time_unix = 1;
- string sender_id = 2;
- string sender_ip = 3;
- int32 requested = 4;
- int32 found = 5;
- int64 duration_ms = 6;
- string error = 7;
- }
-
- repeated RecentBatchStoreEntry recent_batch_store = 7;
- repeated RecentBatchRetrieveEntry recent_batch_retrieve = 8;
-
- // Per-IP buckets: last 10 per sender IP
- message RecentBatchStoreList { repeated RecentBatchStoreEntry entries = 1; }
- message RecentBatchRetrieveList { repeated RecentBatchRetrieveEntry entries = 1; }
- map recent_batch_store_by_ip = 9;
- map recent_batch_retrieve_by_ip = 10;
}
P2PMetrics p2p_metrics = 9;
-
}
+
diff --git a/sdk/README.md b/sdk/README.md
index b0aecb20..f8385eef 100644
--- a/sdk/README.md
+++ b/sdk/README.md
@@ -221,11 +221,13 @@ if err != nil {
// taskID can be used to track the download progress
```
+Note: If the action's cascade metadata sets `public: true`, the signature may be left empty to allow anonymous download.
+
**Parameters:**
- `ctx context.Context`: Context for the operation
- `actionID string`: ID of the action to download
- `outputDir string`: Directory where the downloaded file will be saved
-- `signature string`: Base64-encoded signature for download authorization
+- `signature string`: Base64-encoded signature for download authorization (leave empty for public cascades)
**Signature Creation for Download:**
The download signature is created by combining the action ID with the creator's address, signing it, and base64 encoding the result.
@@ -286,7 +288,7 @@ if err != nil {
**Returns:**
- `error`: Error if the task doesn't exist or deletion fails
-### GetSupernodeStatus
+### GetSupernodeStatus (Status API)
Retrieves the current status and resource information of a specific supernode.
@@ -303,27 +305,12 @@ if err != nil {
- `supernodeAddress string`: Cosmos address of the supernode
**Returns:**
-- `*supernodeservice.SupernodeStatusresponse`: Status information including CPU usage, memory stats, and active services
-- `error`: Error if the supernode is unreachable or query fails
-
-Include detailed P2P metrics (optional):
-
-By default, peer info and P2P metrics are not returned to keep calls lightweight. To include them, set an option in the context:
-
-```go
-import snsvc "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice"
+- `*supernode.StatusResponse`: Status information including CPU usage, memory stats, active services, and P2P metrics
+- `error`: Error if the supernode is unreachable or the query fails
-// Opt-in via context
-ctxWithMetrics := snsvc.WithIncludeP2PMetrics(ctx)
-status, err := client.GetSupernodeStatus(ctxWithMetrics, "lumera1abc...")
-if err != nil {
- // handle error
-}
-
-// Access optional fields when present
-fmt.Println("Peers:", status.Network.PeersCount)
-fmt.Println("DHT hot path bans:", status.P2PMetrics.DhtMetrics.HotPathBanIncrements)
-```
+Notes:
+- The SDK always requests P2P metrics to ensure `Network.PeersCount` is populated for eligibility checks.
+- Status response is the generated type; no mapping layer in the SDK.
### SubscribeToEvents
diff --git a/sdk/action/client.go b/sdk/action/client.go
index fc3c2d7e..db5a932f 100644
--- a/sdk/action/client.go
+++ b/sdk/action/client.go
@@ -4,8 +4,8 @@ import (
"context"
"fmt"
+ pb "github.com/LumeraProtocol/supernode/v2/gen/supernode"
"github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera"
- "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice"
"github.com/LumeraProtocol/supernode/v2/sdk/config"
"github.com/LumeraProtocol/supernode/v2/sdk/event"
"github.com/LumeraProtocol/supernode/v2/sdk/log"
@@ -26,7 +26,7 @@ type Client interface {
GetTask(ctx context.Context, taskID string) (*task.TaskEntry, bool)
SubscribeToEvents(ctx context.Context, eventType event.EventType, handler event.Handler) error
SubscribeToAllEvents(ctx context.Context, handler event.Handler) error
- GetSupernodeStatus(ctx context.Context, supernodeAddress string) (*supernodeservice.SupernodeStatusresponse, error)
+ GetSupernodeStatus(ctx context.Context, supernodeAddress string) (*pb.StatusResponse, error)
// DownloadCascade downloads cascade to outputDir, filename determined by action ID
DownloadCascade(ctx context.Context, actionID, outputDir, signature string) (string, error)
}
@@ -151,7 +151,7 @@ func (c *ClientImpl) SubscribeToAllEvents(ctx context.Context, handler event.Han
}
// GetSupernodeStatus retrieves the status of a specific supernode by its address
-func (c *ClientImpl) GetSupernodeStatus(ctx context.Context, supernodeAddress string) (*supernodeservice.SupernodeStatusresponse, error) {
+func (c *ClientImpl) GetSupernodeStatus(ctx context.Context, supernodeAddress string) (*pb.StatusResponse, error) {
if supernodeAddress == "" {
c.logger.Error(ctx, "Empty supernode address provided")
return nil, fmt.Errorf("supernode address cannot be empty")
diff --git a/sdk/adapters/lumera/adapter.go b/sdk/adapters/lumera/adapter.go
index 8fe7a1fb..1c20acdd 100644
--- a/sdk/adapters/lumera/adapter.go
+++ b/sdk/adapters/lumera/adapter.go
@@ -13,6 +13,7 @@ import (
lumeraclient "github.com/LumeraProtocol/supernode/v2/pkg/lumera"
"github.com/cosmos/cosmos-sdk/crypto/keyring"
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
"github.com/golang/protobuf/proto"
)
@@ -25,6 +26,8 @@ type Client interface {
GetSupernodeWithLatestAddress(ctx context.Context, address string) (*SuperNodeInfo, error)
DecodeCascadeMetadata(ctx context.Context, action Action) (actiontypes.CascadeMetadata, error)
VerifySignature(ctx context.Context, accountAddr string, data []byte, signature []byte) error
+ // GetBalance returns the bank balance for the given address and denom.
+ GetBalance(ctx context.Context, address string, denom string) (*banktypes.QueryBalanceResponse, error)
}
// SuperNodeInfo contains supernode information with latest address
@@ -213,6 +216,22 @@ func (a *Adapter) VerifySignature(ctx context.Context, accountAddr string, data,
return nil
}
+// GetBalance fetches the balance for a given address and denom via the underlying lumera client.
+func (a *Adapter) GetBalance(ctx context.Context, address string, denom string) (*banktypes.QueryBalanceResponse, error) {
+ a.logger.Debug(ctx, "Querying bank balance", "address", address, "denom", denom)
+ resp, err := a.client.Bank().Balance(ctx, address, denom)
+ if err != nil {
+ a.logger.Error(ctx, "Failed to query bank balance", "address", address, "denom", denom, "error", err)
+ return nil, fmt.Errorf("failed to query bank balance: %w", err)
+ }
+ if resp == nil || resp.Balance == nil {
+ a.logger.Error(ctx, "Nil balance response", "address", address, "denom", denom)
+ return nil, fmt.Errorf("nil balance response for %s %s", address, denom)
+ }
+ a.logger.Debug(ctx, "Successfully fetched bank balance", "amount", resp.Balance.Amount.String(), "denom", resp.Balance.Denom)
+ return resp, nil
+}
+
// DecodeCascadeMetadata decodes the raw metadata bytes into CascadeMetadata
func (a *Adapter) DecodeCascadeMetadata(ctx context.Context, action Action) (actiontypes.CascadeMetadata, error) {
if action.ActionType != "ACTION_TYPE_CASCADE" {
diff --git a/sdk/adapters/supernodeservice/adapter.go b/sdk/adapters/supernodeservice/adapter.go
index f9e9e6da..9712915c 100644
--- a/sdk/adapters/supernodeservice/adapter.go
+++ b/sdk/adapters/supernodeservice/adapter.go
@@ -2,7 +2,6 @@ package supernodeservice
import (
"context"
- "encoding/json"
"fmt"
"io"
"os"
@@ -345,30 +344,7 @@ func (a *cascadeAdapter) CascadeSupernodeRegister(ctx context.Context, in *Casca
event.KeyTaskID: in.TaskId,
event.KeyActionID: in.ActionID,
}
- // For artefacts stored, parse JSON payload with metrics (new minimal shape)
- if resp.EventType == cascade.SupernodeEventType_ARTEFACTS_STORED {
- var payload map[string]any
- if err := json.Unmarshal([]byte(resp.Message), &payload); err == nil {
- if store, ok := payload["store"].(map[string]any); ok {
- if v, ok := store["duration_ms"].(float64); ok {
- edata[event.KeyStoreDurationMS] = int64(v)
- }
- if v, ok := store["symbols_first_pass"].(float64); ok {
- edata[event.KeyStoreSymbolsFirstPass] = int64(v)
- }
- if v, ok := store["symbols_total"].(float64); ok {
- edata[event.KeyStoreSymbolsTotal] = int64(v)
- }
- if v, ok := store["id_files_count"].(float64); ok {
- edata[event.KeyStoreIDFilesCount] = int64(v)
- }
- if v, ok := store["calls_by_ip"]; ok {
- edata[event.KeyStoreCallsByIP] = v
- }
- }
- }
- }
- in.EventLogger(ctx, toSdkEventWithMessage(resp.EventType, resp.Message), resp.Message, edata)
+ in.EventLogger(ctx, toSdkEvent(resp.EventType), resp.Message, edata)
}
// Optionally capture the final response
@@ -395,18 +371,18 @@ func (a *cascadeAdapter) CascadeSupernodeRegister(ctx context.Context, in *Casca
}, nil
}
-func (a *cascadeAdapter) GetSupernodeStatus(ctx context.Context) (SupernodeStatusresponse, error) {
- // Gate P2P metrics via context option to keep API backward compatible
- req := &supernode.StatusRequest{IncludeP2PMetrics: includeP2PMetrics(ctx)}
+func (a *cascadeAdapter) GetSupernodeStatus(ctx context.Context) (*supernode.StatusResponse, error) {
+ // Always include P2P metrics to populate peers count for eligibility checks
+ req := &supernode.StatusRequest{IncludeP2PMetrics: true}
resp, err := a.statusClient.GetStatus(ctx, req)
if err != nil {
a.logger.Error(ctx, "Failed to get supernode status", "error", err)
- return SupernodeStatusresponse{}, fmt.Errorf("failed to get supernode status: %w", err)
+ return nil, fmt.Errorf("failed to get supernode status: %w", err)
}
a.logger.Debug(ctx, "Supernode status retrieved", "status", resp)
- return *toSdkSupernodeStatus(resp), nil
+ return resp, nil
}
// CascadeSupernodeDownload downloads a file from a supernode gRPC stream
@@ -446,6 +422,7 @@ func (a *cascadeAdapter) CascadeSupernodeDownload(
bytesWritten int64
chunkIndex int
startedEmitted bool
+ downloadStart time.Time
)
// 3. Receive streamed responses
@@ -470,46 +447,11 @@ func (a *cascadeAdapter) CascadeSupernodeDownload(
event.KeyEventType: x.Event.EventType,
event.KeyMessage: x.Event.Message,
}
- // Parse detailed metrics for downloaded event if JSON payload provided (new minimal shape)
- if x.Event.EventType == cascade.SupernodeEventType_ARTEFACTS_DOWNLOADED {
- var payload map[string]any
- if err := json.Unmarshal([]byte(x.Event.Message), &payload); err == nil {
- if retrieve, ok := payload["retrieve"].(map[string]any); ok {
- if v, ok := retrieve["found_local"].(float64); ok {
- edata[event.KeyRetrieveFoundLocal] = int64(v)
- }
- if v, ok := retrieve["retrieve_ms"].(float64); ok {
- edata[event.KeyRetrieveMS] = int64(v)
- }
- if v, ok := retrieve["decode_ms"].(float64); ok {
- edata[event.KeyDecodeMS] = int64(v)
- }
- if v, ok := retrieve["calls_by_ip"]; ok {
- edata[event.KeyRetrieveCallsByIP] = v
- }
- // Optional additional retrieve fields
- if v, ok := retrieve["keys"].(float64); ok {
- edata[event.KeyRetrieveKeys] = int64(v)
- }
- if v, ok := retrieve["required"].(float64); ok {
- edata[event.KeyRetrieveRequired] = int64(v)
- }
- if v, ok := retrieve["found_net"].(float64); ok {
- edata[event.KeyRetrieveFoundNet] = int64(v)
- }
- if v, ok := retrieve["target_required_percent"].(float64); ok {
- edata[event.KeyTargetRequiredPercent] = v
- }
- if v, ok := retrieve["target_required_count"].(float64); ok {
- edata[event.KeyTargetRequiredCount] = int64(v)
- }
- if v, ok := retrieve["total_symbols"].(float64); ok {
- edata[event.KeyTotalSymbols] = int64(v)
- }
- }
- }
- }
- in.EventLogger(ctx, toSdkEvent(x.Event.EventType), x.Event.Message, edata)
+ // Avoid blocking Recv loop on event handling; dispatch asynchronously
+ evtType := toSdkEvent(x.Event.EventType)
+ go func(ed event.EventData, et event.EventType, msg string) {
+ in.EventLogger(ctx, et, msg, ed)
+ }(edata, evtType, x.Event.Message)
}
// 3b. Actual data chunk
@@ -520,7 +462,10 @@ func (a *cascadeAdapter) CascadeSupernodeDownload(
}
if !startedEmitted {
if in.EventLogger != nil {
- in.EventLogger(ctx, event.SDKDownloadStarted, "Download started", event.EventData{event.KeyActionID: in.ActionID})
+ // mark start to compute throughput at completion
+ downloadStart = time.Now()
+ // Emit started asynchronously to avoid blocking
+ go in.EventLogger(ctx, event.SDKDownloadStarted, "Download started", event.EventData{event.KeyActionID: in.ActionID})
}
startedEmitted = true
}
@@ -538,7 +483,25 @@ func (a *cascadeAdapter) CascadeSupernodeDownload(
a.logger.Info(ctx, "download complete", "bytes_written", bytesWritten, "path", in.OutputPath, "action_id", in.ActionID)
if in.EventLogger != nil {
- in.EventLogger(ctx, event.SDKDownloadCompleted, "Download completed", event.EventData{event.KeyActionID: in.ActionID, event.KeyOutputPath: in.OutputPath})
+ // Compute metrics if we marked a start
+ var elapsed float64
+ var throughput float64
+ if !downloadStart.IsZero() {
+ elapsed = time.Since(downloadStart).Seconds()
+ mb := float64(bytesWritten) / (1024.0 * 1024.0)
+ if elapsed > 0 {
+ throughput = mb / elapsed
+ }
+ }
+ // Emit completion asynchronously with metrics
+ go in.EventLogger(ctx, event.SDKDownloadCompleted, "Download completed", event.EventData{
+ event.KeyActionID: in.ActionID,
+ event.KeyOutputPath: in.OutputPath,
+ event.KeyBytesTotal: bytesWritten,
+ event.KeyChunks: chunkIndex,
+ event.KeyElapsedSeconds: elapsed,
+ event.KeyThroughputMBS: throughput,
+ })
}
return &CascadeSupernodeDownloadResponse{
Success: true,
@@ -588,185 +551,3 @@ func toSdkEvent(e cascade.SupernodeEventType) event.EventType {
return event.SupernodeUnknown
}
}
-
-// toSdkEventWithMessage extends event mapping using message content for finer granularity
-func toSdkEventWithMessage(e cascade.SupernodeEventType, msg string) event.EventType {
- // Detect finalize simulation pass piggybacked on RQID_VERIFIED
- if e == cascade.SupernodeEventType_RQID_VERIFIED && msg == "finalize action simulation passed" {
- return event.SupernodeFinalizeSimulated
- }
- return toSdkEvent(e)
-}
-
-func toSdkSupernodeStatus(resp *supernode.StatusResponse) *SupernodeStatusresponse {
- result := &SupernodeStatusresponse{}
- result.Version = resp.Version
- result.UptimeSeconds = resp.UptimeSeconds
-
- // Convert Resources data
- if resp.Resources != nil {
- // Convert CPU data
- if resp.Resources.Cpu != nil {
- result.Resources.CPU.UsagePercent = resp.Resources.Cpu.UsagePercent
- result.Resources.CPU.Cores = resp.Resources.Cpu.Cores
- }
-
- // Convert Memory data
- if resp.Resources.Memory != nil {
- result.Resources.Memory.TotalGB = resp.Resources.Memory.TotalGb
- result.Resources.Memory.UsedGB = resp.Resources.Memory.UsedGb
- result.Resources.Memory.AvailableGB = resp.Resources.Memory.AvailableGb
- result.Resources.Memory.UsagePercent = resp.Resources.Memory.UsagePercent
- }
-
- // Convert Storage data
- result.Resources.Storage = make([]StorageInfo, 0, len(resp.Resources.StorageVolumes))
- for _, storage := range resp.Resources.StorageVolumes {
- result.Resources.Storage = append(result.Resources.Storage, StorageInfo{
- Path: storage.Path,
- TotalBytes: storage.TotalBytes,
- UsedBytes: storage.UsedBytes,
- AvailableBytes: storage.AvailableBytes,
- UsagePercent: storage.UsagePercent,
- })
- }
-
- // Copy hardware summary
- result.Resources.HardwareSummary = resp.Resources.HardwareSummary
- }
-
- // Convert RunningTasks data
- result.RunningTasks = make([]ServiceTasks, 0, len(resp.RunningTasks))
- for _, service := range resp.RunningTasks {
- result.RunningTasks = append(result.RunningTasks, ServiceTasks{
- ServiceName: service.ServiceName,
- TaskIDs: service.TaskIds,
- TaskCount: service.TaskCount,
- })
- }
-
- // Convert RegisteredServices data
- result.RegisteredServices = make([]string, len(resp.RegisteredServices))
- copy(result.RegisteredServices, resp.RegisteredServices)
-
- // Convert Network data
- if resp.Network != nil {
- result.Network.PeersCount = resp.Network.PeersCount
- result.Network.PeerAddresses = make([]string, len(resp.Network.PeerAddresses))
- copy(result.Network.PeerAddresses, resp.Network.PeerAddresses)
- }
-
- // Copy rank and IP address
- result.Rank = resp.Rank
- result.IPAddress = resp.IpAddress
-
- // Map optional P2P metrics
- if resp.P2PMetrics != nil {
- // DHT metrics
- if resp.P2PMetrics.DhtMetrics != nil {
- // Store success recent
- for _, p := range resp.P2PMetrics.DhtMetrics.StoreSuccessRecent {
- result.P2PMetrics.DhtMetrics.StoreSuccessRecent = append(result.P2PMetrics.DhtMetrics.StoreSuccessRecent, struct {
- TimeUnix int64
- Requests int32
- Successful int32
- SuccessRate float64
- }{
- TimeUnix: p.TimeUnix,
- Requests: p.Requests,
- Successful: p.Successful,
- SuccessRate: p.SuccessRate,
- })
- }
- // Batch retrieve recent
- for _, p := range resp.P2PMetrics.DhtMetrics.BatchRetrieveRecent {
- result.P2PMetrics.DhtMetrics.BatchRetrieveRecent = append(result.P2PMetrics.DhtMetrics.BatchRetrieveRecent, struct {
- TimeUnix int64
- Keys int32
- Required int32
- FoundLocal int32
- FoundNetwork int32
- DurationMS int64
- }{
- TimeUnix: p.TimeUnix,
- Keys: p.Keys,
- Required: p.Required,
- FoundLocal: p.FoundLocal,
- FoundNetwork: p.FoundNetwork,
- DurationMS: p.DurationMs,
- })
- }
- result.P2PMetrics.DhtMetrics.HotPathBannedSkips = resp.P2PMetrics.DhtMetrics.HotPathBannedSkips
- result.P2PMetrics.DhtMetrics.HotPathBanIncrements = resp.P2PMetrics.DhtMetrics.HotPathBanIncrements
- }
-
- // Network handle metrics
- if resp.P2PMetrics.NetworkHandleMetrics != nil {
- if result.P2PMetrics.NetworkHandleMetrics == nil {
- result.P2PMetrics.NetworkHandleMetrics = map[string]struct {
- Total int64
- Success int64
- Failure int64
- Timeout int64
- }{}
- }
- for k, v := range resp.P2PMetrics.NetworkHandleMetrics {
- result.P2PMetrics.NetworkHandleMetrics[k] = struct {
- Total int64
- Success int64
- Failure int64
- Timeout int64
- }{
- Total: v.Total,
- Success: v.Success,
- Failure: v.Failure,
- Timeout: v.Timeout,
- }
- }
- }
-
- // Conn pool metrics
- if resp.P2PMetrics.ConnPoolMetrics != nil {
- if result.P2PMetrics.ConnPoolMetrics == nil {
- result.P2PMetrics.ConnPoolMetrics = map[string]int64{}
- }
- for k, v := range resp.P2PMetrics.ConnPoolMetrics {
- result.P2PMetrics.ConnPoolMetrics[k] = v
- }
- }
-
- // Ban list
- for _, b := range resp.P2PMetrics.BanList {
- result.P2PMetrics.BanList = append(result.P2PMetrics.BanList, struct {
- ID string
- IP string
- Port uint32
- Count int32
- CreatedAtUnix int64
- AgeSeconds int64
- }{
- ID: b.Id,
- IP: b.Ip,
- Port: b.Port,
- Count: b.Count,
- CreatedAtUnix: b.CreatedAtUnix,
- AgeSeconds: b.AgeSeconds,
- })
- }
-
- // Database
- if resp.P2PMetrics.Database != nil {
- result.P2PMetrics.Database.P2PDBSizeMB = resp.P2PMetrics.Database.P2PDbSizeMb
- result.P2PMetrics.Database.P2PDBRecordsCount = resp.P2PMetrics.Database.P2PDbRecordsCount
- }
-
- // Disk
- if resp.P2PMetrics.Disk != nil {
- result.P2PMetrics.Disk.AllMB = resp.P2PMetrics.Disk.AllMb
- result.P2PMetrics.Disk.UsedMB = resp.P2PMetrics.Disk.UsedMb
- result.P2PMetrics.Disk.FreeMB = resp.P2PMetrics.Disk.FreeMb
- }
- }
-
- return result
-}
diff --git a/sdk/adapters/supernodeservice/options.go b/sdk/adapters/supernodeservice/options.go
deleted file mode 100644
index 547a28c9..00000000
--- a/sdk/adapters/supernodeservice/options.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package supernodeservice
-
-import "context"
-
-// internal context key to toggle P2P metrics in status requests
-type ctxKey string
-
-const ctxKeyIncludeP2P ctxKey = "include_p2p_metrics"
-
-// WithIncludeP2PMetrics returns a child context that requests detailed P2P metrics
-// (and peer info) in status responses.
-func WithIncludeP2PMetrics(ctx context.Context) context.Context {
- return context.WithValue(ctx, ctxKeyIncludeP2P, true)
-}
-
-// WithP2PMetrics allows explicitly setting the include flag.
-func WithP2PMetrics(ctx context.Context, include bool) context.Context {
- return context.WithValue(ctx, ctxKeyIncludeP2P, include)
-}
-
-// includeP2PMetrics reads the flag from context; defaults to false when unset.
-func includeP2PMetrics(ctx context.Context) bool {
- v := ctx.Value(ctxKeyIncludeP2P)
- if b, ok := v.(bool); ok {
- return b
- }
- return false
-}
-
diff --git a/sdk/adapters/supernodeservice/types.go b/sdk/adapters/supernodeservice/types.go
index 4dbdd7b6..89e04cae 100644
--- a/sdk/adapters/supernodeservice/types.go
+++ b/sdk/adapters/supernodeservice/types.go
@@ -1,11 +1,12 @@
package supernodeservice
import (
- "context"
+ "context"
- "google.golang.org/grpc"
+ pb "github.com/LumeraProtocol/supernode/v2/gen/supernode"
+ "google.golang.org/grpc"
- "github.com/LumeraProtocol/supernode/v2/sdk/event"
+ "github.com/LumeraProtocol/supernode/v2/sdk/event"
)
type LoggerFunc func(
@@ -28,93 +29,7 @@ type CascadeSupernodeRegisterResponse struct {
TxHash string
}
-// ServiceTasks contains task information for a specific service
-type ServiceTasks struct {
- ServiceName string
- TaskIDs []string
- TaskCount int32
-}
-
-// StorageInfo contains storage metrics for a specific path
-type StorageInfo struct {
- Path string
- TotalBytes uint64
- UsedBytes uint64
- AvailableBytes uint64
- UsagePercent float64
-}
-
-type SupernodeStatusresponse struct {
- Version string // Supernode version
- UptimeSeconds uint64 // Uptime in seconds
- Resources struct {
- CPU struct {
- UsagePercent float64
- Cores int32
- }
- Memory struct {
- TotalGB float64
- UsedGB float64
- AvailableGB float64
- UsagePercent float64
- }
- Storage []StorageInfo
- HardwareSummary string // Formatted hardware summary
- }
- RunningTasks []ServiceTasks // Services with running tasks
- RegisteredServices []string // All available service names
- Network struct {
- PeersCount int32 // Number of connected peers
- PeerAddresses []string // List of peer addresses
- }
- Rank int32 // Rank in top supernodes list (0 if not in top list)
- IPAddress string // Supernode IP address with port
- // Optional detailed P2P metrics (present when requested)
- P2PMetrics struct {
- DhtMetrics struct {
- StoreSuccessRecent []struct {
- TimeUnix int64
- Requests int32
- Successful int32
- SuccessRate float64
- }
- BatchRetrieveRecent []struct {
- TimeUnix int64
- Keys int32
- Required int32
- FoundLocal int32
- FoundNetwork int32
- DurationMS int64
- }
- HotPathBannedSkips int64
- HotPathBanIncrements int64
- }
- NetworkHandleMetrics map[string]struct {
- Total int64
- Success int64
- Failure int64
- Timeout int64
- }
- ConnPoolMetrics map[string]int64
- BanList []struct {
- ID string
- IP string
- Port uint32
- Count int32
- CreatedAtUnix int64
- AgeSeconds int64
- }
- Database struct {
- P2PDBSizeMB float64
- P2PDBRecordsCount int64
- }
- Disk struct {
- AllMB float64
- UsedMB float64
- FreeMB float64
- }
- }
-}
+// Use generated proto types directly for status
type CascadeSupernodeDownloadRequest struct {
ActionID string
TaskID string
@@ -131,7 +46,7 @@ type CascadeSupernodeDownloadResponse struct {
//go:generate mockery --name=CascadeServiceClient --output=testutil/mocks --outpkg=mocks --filename=cascade_service_mock.go
type CascadeServiceClient interface {
- CascadeSupernodeRegister(ctx context.Context, in *CascadeSupernodeRegisterRequest, opts ...grpc.CallOption) (*CascadeSupernodeRegisterResponse, error)
- GetSupernodeStatus(ctx context.Context) (SupernodeStatusresponse, error)
- CascadeSupernodeDownload(ctx context.Context, in *CascadeSupernodeDownloadRequest, opts ...grpc.CallOption) (*CascadeSupernodeDownloadResponse, error)
+ CascadeSupernodeRegister(ctx context.Context, in *CascadeSupernodeRegisterRequest, opts ...grpc.CallOption) (*CascadeSupernodeRegisterResponse, error)
+ GetSupernodeStatus(ctx context.Context) (*pb.StatusResponse, error)
+ CascadeSupernodeDownload(ctx context.Context, in *CascadeSupernodeDownloadRequest, opts ...grpc.CallOption) (*CascadeSupernodeDownloadResponse, error)
}
diff --git a/sdk/event/keys.go b/sdk/event/keys.go
index 9d68b818..04e27bd3 100644
--- a/sdk/event/keys.go
+++ b/sdk/event/keys.go
@@ -30,26 +30,5 @@ const (
KeyTaskID EventDataKey = "task_id"
KeyActionID EventDataKey = "action_id"
- // Removed legacy cascade storage metrics keys (meta/sym timings and nodes)
-
- // Combined store metrics (metadata + symbols) — new minimal only
- KeyStoreDurationMS EventDataKey = "store_duration_ms"
- // New minimal store metrics
- KeyStoreSymbolsFirstPass EventDataKey = "store_symbols_first_pass"
- KeyStoreSymbolsTotal EventDataKey = "store_symbols_total"
- KeyStoreIDFilesCount EventDataKey = "store_id_files_count"
- KeyStoreCallsByIP EventDataKey = "store_calls_by_ip"
-
- // Download (retrieve) detailed metrics — new minimal only
- KeyRetrieveFoundLocal EventDataKey = "retrieve_found_local"
- KeyRetrieveMS EventDataKey = "retrieve_ms"
- KeyDecodeMS EventDataKey = "decode_ms"
- KeyRetrieveCallsByIP EventDataKey = "retrieve_calls_by_ip"
- // Additional retrieve summary fields
- KeyRetrieveKeys EventDataKey = "retrieve_keys"
- KeyRetrieveRequired EventDataKey = "retrieve_required"
- KeyRetrieveFoundNet EventDataKey = "retrieve_found_net"
- KeyTargetRequiredPercent EventDataKey = "target_required_percent"
- KeyTargetRequiredCount EventDataKey = "target_required_count"
- KeyTotalSymbols EventDataKey = "total_symbols"
+ // Removed legacy cascade storage/retrieve metrics keys
)
diff --git a/sdk/net/client.go b/sdk/net/client.go
index dc8950df..b88fe75b 100644
--- a/sdk/net/client.go
+++ b/sdk/net/client.go
@@ -1,11 +1,12 @@
package net
import (
- "context"
+ "context"
- "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice"
- "google.golang.org/grpc"
- "google.golang.org/grpc/health/grpc_health_v1"
+ pb "github.com/LumeraProtocol/supernode/v2/gen/supernode"
+ "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/health/grpc_health_v1"
)
// SupernodeClient defines the interface for communicating with supernodes
@@ -15,7 +16,7 @@ type SupernodeClient interface {
// HealthCheck performs a health check on the supernode
HealthCheck(ctx context.Context) (*grpc_health_v1.HealthCheckResponse, error)
- GetSupernodeStatus(ctx context.Context) (*supernodeservice.SupernodeStatusresponse, error)
+ GetSupernodeStatus(ctx context.Context) (*pb.StatusResponse, error)
// Download downloads the cascade action file
Download(ctx context.Context, in *supernodeservice.CascadeSupernodeDownloadRequest, opts ...grpc.CallOption) (*supernodeservice.CascadeSupernodeDownloadResponse, error)
diff --git a/sdk/net/factory.go b/sdk/net/factory.go
index b9fad9fd..f3486780 100644
--- a/sdk/net/factory.go
+++ b/sdk/net/factory.go
@@ -39,9 +39,10 @@ func NewClientFactory(ctx context.Context, logger log.Logger, keyring keyring.Ke
// Tuned for 1GB max files with 4MB chunks
// Reduce in-flight memory by aligning windows and msg sizes to chunk size.
opts := client.DefaultClientOptions()
- opts.MaxRecvMsgSize = 8 * 1024 * 1024 // 8MB: supports 4MB chunks + overhead
- opts.MaxSendMsgSize = 8 * 1024 * 1024 // 8MB: supports 4MB chunks + overhead
- opts.InitialWindowSize = 4 * 1024 * 1024 // 4MB per-stream window ≈ chunk size
+ opts.MaxRecvMsgSize = 12 * 1024 * 1024 // 8MB: supports 4MB chunks + overhead
+ opts.MaxSendMsgSize = 12 * 1024 * 1024 // 8MB: supports 4MB chunks + overhead
+ // Increase per-stream window to provide headroom for first data chunk + events
+ opts.InitialWindowSize = 12 * 1024 * 1024 // 8MB per-stream window
opts.InitialConnWindowSize = 64 * 1024 * 1024 // 64MB per-connection window
return &ClientFactory{
diff --git a/sdk/net/impl.go b/sdk/net/impl.go
index ab0f7b28..cd6bf10f 100644
--- a/sdk/net/impl.go
+++ b/sdk/net/impl.go
@@ -1,20 +1,21 @@
package net
import (
- "context"
- "fmt"
-
- "github.com/LumeraProtocol/lumera/x/lumeraid/securekeyx"
- ltc "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials"
- "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials/alts/conn"
- "github.com/LumeraProtocol/supernode/v2/pkg/net/grpc/client"
- "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera"
- "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice"
- "github.com/LumeraProtocol/supernode/v2/sdk/log"
-
- "github.com/cosmos/cosmos-sdk/crypto/keyring"
- "google.golang.org/grpc"
- "google.golang.org/grpc/health/grpc_health_v1"
+ "context"
+ "fmt"
+
+ "github.com/LumeraProtocol/lumera/x/lumeraid/securekeyx"
+ ltc "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials"
+ "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials/alts/conn"
+ "github.com/LumeraProtocol/supernode/v2/pkg/net/grpc/client"
+ "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera"
+ "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice"
+ "github.com/LumeraProtocol/supernode/v2/sdk/log"
+
+ pb "github.com/LumeraProtocol/supernode/v2/gen/supernode"
+ "github.com/cosmos/cosmos-sdk/crypto/keyring"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/health/grpc_health_v1"
)
// supernodeClient implements the SupernodeClient interface
@@ -128,14 +129,14 @@ func (c *supernodeClient) HealthCheck(ctx context.Context) (*grpc_health_v1.Heal
return resp, nil
}
-func (c *supernodeClient) GetSupernodeStatus(ctx context.Context) (*supernodeservice.SupernodeStatusresponse, error) {
- resp, err := c.cascadeClient.GetSupernodeStatus(ctx)
- if err != nil {
- return nil, fmt.Errorf("failed to get supernode status: %w", err)
- }
+func (c *supernodeClient) GetSupernodeStatus(ctx context.Context) (*pb.StatusResponse, error) {
+ resp, err := c.cascadeClient.GetSupernodeStatus(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get supernode status: %w", err)
+ }
- c.logger.Debug(ctx, "Supernode status retrieved successfully")
- return &resp, nil
+ c.logger.Debug(ctx, "Supernode status retrieved successfully")
+ return resp, nil
}
// Download downloads the cascade action file
diff --git a/sdk/task/download.go b/sdk/task/download.go
index 3e85007a..2c727ae9 100644
--- a/sdk/task/download.go
+++ b/sdk/task/download.go
@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"os"
- "sort"
"time"
"github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera"
@@ -77,51 +76,6 @@ func (t *CascadeDownloadTask) downloadFromSupernodes(ctx context.Context, supern
}
}
- // Optionally rank supernodes by available memory to improve success for large files
- // We keep a short timeout per status fetch to avoid delaying downloads.
- type rankedSN struct {
- sn lumera.Supernode
- availGB float64
- hasStatus bool
- }
- ranked := make([]rankedSN, 0, len(supernodes))
- for _, sn := range supernodes {
- ranked = append(ranked, rankedSN{sn: sn})
- }
-
- // Probe supernode status with short timeouts and close clients promptly
- for i := range ranked {
- sn := ranked[i].sn
- // 2s status timeout to keep this pass fast
- stx, cancel := context.WithTimeout(ctx, 2*time.Second)
- client, err := clientFactory.CreateClient(stx, sn)
- if err != nil {
- cancel()
- continue
- }
- status, err := client.GetSupernodeStatus(stx)
- _ = client.Close(stx)
- cancel()
- if err != nil {
- continue
- }
- ranked[i].hasStatus = true
- ranked[i].availGB = status.Resources.Memory.AvailableGB
- }
-
- // Sort: nodes with status first, higher available memory first
- sort.Slice(ranked, func(i, j int) bool {
- if ranked[i].hasStatus != ranked[j].hasStatus {
- return ranked[i].hasStatus && !ranked[j].hasStatus
- }
- return ranked[i].availGB > ranked[j].availGB
- })
-
- // Rebuild the supernodes list in the sorted order
- for i := range ranked {
- supernodes[i] = ranked[i].sn
- }
-
// Try supernodes sequentially, one by one (now sorted)
var lastErr error
for idx, sn := range supernodes {
@@ -146,8 +100,8 @@ func (t *CascadeDownloadTask) downloadFromSupernodes(ctx context.Context, supern
continue
}
- // Success; return to caller
- return nil
+ // Success; return to caller
+ return nil
}
if lastErr != nil {
@@ -176,15 +130,15 @@ func (t *CascadeDownloadTask) attemptDownload(
t.LogEvent(ctx, evt, msg, data)
}
- resp, err := client.Download(ctx, req)
- if err != nil {
- return fmt.Errorf("download from %s: %w", sn.CosmosAddress, err)
- }
- if !resp.Success {
- return fmt.Errorf("download rejected by %s: %s", sn.CosmosAddress, resp.Message)
- }
+ resp, err := client.Download(ctx, req)
+ if err != nil {
+ return fmt.Errorf("download from %s: %w", sn.CosmosAddress, err)
+ }
+ if !resp.Success {
+ return fmt.Errorf("download rejected by %s: %s", sn.CosmosAddress, resp.Message)
+ }
- return nil
+ return nil
}
// downloadResult holds the result of a successful download attempt
diff --git a/sdk/task/helpers.go b/sdk/task/helpers.go
index f887aeb2..2ea8bcaa 100644
--- a/sdk/task/helpers.go
+++ b/sdk/task/helpers.go
@@ -3,21 +3,16 @@ package task
import (
"context"
"encoding/base64"
- "errors"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera"
- snsvc "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice"
- "github.com/LumeraProtocol/supernode/v2/sdk/net"
)
const maxFileSize = 1 * 1024 * 1024 * 1024 // 1GB limit
-var ErrNoPeersConnected = errors.New("no P2P peers connected on available supernodes")
-
// ValidateFileSize checks if a file size is within the allowed 1GB limit
func ValidateFileSize(filePath string) error {
fileInfo, err := os.Stat(filePath)
@@ -105,47 +100,7 @@ func (m *ManagerImpl) validateSignature(ctx context.Context, action lumera.Actio
return nil
}
-// checkSupernodesPeerConnectivity verifies that at least one supernode has P2P peers connected
-func (m *ManagerImpl) checkSupernodesPeerConnectivity(ctx context.Context, blockHeight int64) error {
- // Fetch supernodes for the action's block height
- supernodes, err := m.lumeraClient.GetSupernodes(ctx, blockHeight)
- if err != nil {
- return fmt.Errorf("failed to get supernodes: %w", err)
- }
-
- if len(supernodes) == 0 {
- return fmt.Errorf("no supernodes available for block height %d", blockHeight)
- }
-
- // Check each supernode for peer connectivity
- factoryCfg := net.FactoryConfig{
- LocalCosmosAddress: m.config.Account.LocalCosmosAddress,
- PeerType: m.config.Account.PeerType,
- }
- clientFactory := net.NewClientFactory(ctx, m.logger, m.keyring, m.lumeraClient, factoryCfg)
-
- for _, sn := range supernodes {
- client, err := clientFactory.CreateClient(ctx, sn)
- if err != nil {
- continue // Skip this supernode if we can't connect
- }
-
- // Request peer info and P2P metrics to assess connectivity
- ctxWithMetrics := snsvc.WithIncludeP2PMetrics(ctx)
- status, err := client.GetSupernodeStatus(ctxWithMetrics)
- client.Close(ctx)
- if err != nil {
- continue // Skip this supernode if we can't get status
- }
-
- // Check if this supernode has peers
- if status.Network.PeersCount > 1 {
- return nil // Found at least one supernode with peers
- }
- }
-
- return ErrNoPeersConnected
-}
+// (Removed) Peers connectivity preflight is now enforced during discovery in isServing.
func (m *ManagerImpl) validateDownloadAction(ctx context.Context, actionID string) (lumera.Action, error) {
action, err := m.lumeraClient.GetAction(ctx, actionID)
diff --git a/sdk/task/manager.go b/sdk/task/manager.go
index 052088f3..c5a65bf4 100644
--- a/sdk/task/manager.go
+++ b/sdk/task/manager.go
@@ -107,11 +107,7 @@ func (m *ManagerImpl) CreateCascadeTask(ctx context.Context, filePath string, ac
return "", err
}
- // Check peer connectivity before creating task
- if err := m.checkSupernodesPeerConnectivity(taskCtx, action.Height); err != nil {
- cancel() // Clean up if peer check fails
- return "", err
- }
+ // Peer connectivity is now validated during discovery health checks
taskID := uuid.New().String()[:8]
@@ -280,11 +276,7 @@ func (m *ManagerImpl) CreateDownloadTask(ctx context.Context, actionID string, o
return "", fmt.Errorf("no filename found in cascade metadata")
}
- // Check peer connectivity before creating task
- if err := m.checkSupernodesPeerConnectivity(taskCtx, action.Height); err != nil {
- cancel() // Clean up if peer check fails
- return "", err
- }
+ // Peer connectivity is now validated during discovery health checks
// Ensure the output path includes the correct filename
finalOutputPath := path.Join(outputDir, action.ID, metadata.FileName)
diff --git a/sdk/task/task.go b/sdk/task/task.go
index e359c907..bb402975 100644
--- a/sdk/task/task.go
+++ b/sdk/task/task.go
@@ -6,8 +6,10 @@ import (
"fmt"
"sync"
+ sdkmath "cosmossdk.io/math"
"github.com/LumeraProtocol/supernode/v2/pkg/errgroup"
"github.com/LumeraProtocol/supernode/v2/pkg/logtrace"
+ txmod "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx"
"github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera"
"github.com/LumeraProtocol/supernode/v2/sdk/config"
"github.com/LumeraProtocol/supernode/v2/sdk/event"
@@ -85,10 +87,6 @@ func (t *BaseTask) fetchSupernodes(ctx context.Context, height int64) (lumera.Su
return nil, errors.New("no supernodes found")
}
- if len(sns) > 10 {
- sns = sns[:10]
- }
-
// Keep only SERVING nodes (done in parallel – keeps latency flat)
healthy := make(lumera.Supernodes, 0, len(sns))
eg, ctx := errgroup.WithContext(ctx)
@@ -126,11 +124,36 @@ func (t *BaseTask) isServing(parent context.Context, sn lumera.Supernode) bool {
PeerType: t.config.Account.PeerType,
}).CreateClient(ctx, sn)
if err != nil {
- logtrace.Info(ctx, "Failed to create client for supernode", logtrace.Fields{logtrace.FieldMethod: "isServing"})
+ logtrace.Debug(ctx, "Failed to create client for supernode", logtrace.Fields{logtrace.FieldMethod: "isServing"})
return false
}
defer client.Close(ctx)
+ // First check gRPC health
resp, err := client.HealthCheck(ctx)
- return err == nil && resp.Status == grpc_health_v1.HealthCheckResponse_SERVING
+ if err != nil || resp.Status != grpc_health_v1.HealthCheckResponse_SERVING {
+ return false
+ }
+
+ // Then check P2P peers count via status
+ status, err := client.GetSupernodeStatus(ctx)
+ if err != nil {
+ return false
+ }
+ if status.Network.PeersCount <= 1 {
+ return false
+ }
+
+ denom := txmod.DefaultFeeDenom // base denom (micro), e.g., "ulume"
+ bal, err := t.client.GetBalance(ctx, sn.CosmosAddress, denom)
+ if err != nil || bal == nil || bal.Balance == nil {
+ return false
+ }
+ // Require at least 1 LUME = 10^6 micro (ulume)
+ min := sdkmath.NewInt(1_000_000)
+ if bal.Balance.Amount.LT(min) {
+ return false
+ }
+
+ return true
}
diff --git a/supernode/cmd/service.go b/supernode/cmd/service.go
index d4af1269..8cd8708f 100644
--- a/supernode/cmd/service.go
+++ b/supernode/cmd/service.go
@@ -23,7 +23,7 @@ func RunServices(ctx context.Context, services ...service) error {
if err != nil {
logtrace.Error(ctx, "service stopped with an error", logtrace.Fields{"service": reflect.TypeOf(service).String(), "error": err})
} else {
- logtrace.Info(ctx, "service stopped", logtrace.Fields{"service": reflect.TypeOf(service).String()})
+ logtrace.Debug(ctx, "service stopped", logtrace.Fields{"service": reflect.TypeOf(service).String()})
}
return err
})
diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go
index befaf85d..8c754e47 100644
--- a/supernode/cmd/start.go
+++ b/supernode/cmd/start.go
@@ -3,9 +3,12 @@ package cmd
import (
"context"
"fmt"
+ "net/http"
+ _ "net/http/pprof"
"os"
"os/signal"
"path/filepath"
+ "strings"
"syscall"
"github.com/LumeraProtocol/supernode/v2/p2p"
@@ -43,7 +46,7 @@ The supernode will connect to the Lumera network and begin participating in the
// Log configuration info
cfgFile := filepath.Join(baseDir, DefaultConfigFile)
- logtrace.Info(ctx, "Starting supernode with configuration", logtrace.Fields{"config_file": cfgFile, "keyring_dir": appConfig.GetKeyringDir(), "key_name": appConfig.SupernodeConfig.KeyName})
+ logtrace.Debug(ctx, "Starting supernode with configuration", logtrace.Fields{"config_file": cfgFile, "keyring_dir": appConfig.GetKeyringDir(), "key_name": appConfig.SupernodeConfig.KeyName})
// Initialize keyring
kr, err := initKeyringFromConfig(appConfig)
@@ -58,7 +61,7 @@ The supernode will connect to the Lumera network and begin participating in the
}
// Verify config matches chain registration before starting services
- logtrace.Info(ctx, "Verifying configuration against chain registration", logtrace.Fields{})
+ logtrace.Debug(ctx, "Verifying configuration against chain registration", logtrace.Fields{})
configVerifier := verifier.NewConfigVerifier(appConfig, lumeraClient, kr)
verificationResult, err := configVerifier.VerifyConfig(ctx)
if err != nil {
@@ -73,7 +76,15 @@ The supernode will connect to the Lumera network and begin participating in the
logtrace.Warn(ctx, "Config verification warnings", logtrace.Fields{"summary": verificationResult.Summary()})
}
- logtrace.Info(ctx, "Configuration verification successful", logtrace.Fields{})
+ logtrace.Debug(ctx, "Configuration verification successful", logtrace.Fields{})
+
+ // Set Datadog host to identity and service to latest IP address from chain
+ logtrace.SetDatadogHost(appConfig.SupernodeConfig.Identity)
+ if snInfo, err := lumeraClient.SuperNode().GetSupernodeWithLatestAddress(ctx, appConfig.SupernodeConfig.Identity); err == nil && snInfo != nil {
+ if ip := strings.TrimSpace(snInfo.LatestAddress); ip != "" {
+ logtrace.SetDatadogService(ip)
+ }
+ }
// Initialize RaptorQ store for Cascade processing
rqStore, err := initRQStore(ctx, appConfig)
@@ -81,8 +92,8 @@ The supernode will connect to the Lumera network and begin participating in the
logtrace.Fatal(ctx, "Failed to initialize RaptorQ store", logtrace.Fields{"error": err.Error()})
}
- // Initialize P2P service
- p2pService, err := initP2PService(ctx, appConfig, lumeraClient, kr, rqStore, nil, nil)
+ // Initialize P2P service
+ p2pService, err := initP2PService(ctx, appConfig, lumeraClient, kr, rqStore, nil, nil)
if err != nil {
logtrace.Fatal(ctx, "Failed to initialize P2P service", logtrace.Fields{"error": err.Error()})
}
@@ -94,18 +105,18 @@ The supernode will connect to the Lumera network and begin participating in the
}
// Configure cascade service
- cService := cascadeService.NewCascadeService(
- &cascadeService.Config{
- Config: common.Config{
- SupernodeAccountAddress: appConfig.SupernodeConfig.Identity,
- },
- RqFilesDir: appConfig.GetRaptorQFilesDir(),
- },
- lumeraClient,
- *p2pService,
- codec.NewRaptorQCodec(appConfig.GetRaptorQFilesDir()),
- rqStore,
- )
+ cService := cascadeService.NewCascadeService(
+ &cascadeService.Config{
+ Config: common.Config{
+ SupernodeAccountAddress: appConfig.SupernodeConfig.Identity,
+ },
+ RqFilesDir: appConfig.GetRaptorQFilesDir(),
+ },
+ lumeraClient,
+ *p2pService,
+ codec.NewRaptorQCodec(appConfig.GetRaptorQFilesDir()),
+ rqStore,
+ )
// Create cascade action server
cascadeActionServer := cascade.NewCascadeActionServer(cService)
@@ -139,6 +150,25 @@ The supernode will connect to the Lumera network and begin participating in the
return fmt.Errorf("failed to create gateway server: %w", err)
}
+ // Start profiling server on testnet only
+ isTestnet := strings.Contains(strings.ToLower(appConfig.LumeraClientConfig.ChainID), "testnet")
+
+ if isTestnet {
+ profilingAddr := "0.0.0.0:8082"
+
+ logtrace.Debug(ctx, "Starting profiling server", logtrace.Fields{
+ "address": profilingAddr,
+ "chain_id": appConfig.LumeraClientConfig.ChainID,
+ "is_testnet": isTestnet,
+ })
+
+ go func() {
+ if err := http.ListenAndServe(profilingAddr, nil); err != nil {
+ logtrace.Error(ctx, "Profiling server error", logtrace.Fields{"error": err.Error()})
+ }
+ }()
+ }
+
// Start the services
go func() {
if err := RunServices(ctx, grpcServer, cService, *p2pService, gatewayServer); err != nil {
@@ -152,7 +182,7 @@ The supernode will connect to the Lumera network and begin participating in the
// Wait for termination signal
sig := <-sigCh
- logtrace.Info(ctx, "Received signal, shutting down", logtrace.Fields{"signal": sig.String()})
+ logtrace.Debug(ctx, "Received signal, shutting down", logtrace.Fields{"signal": sig.String()})
// Graceful shutdown
if err := supernodeInstance.Stop(ctx); err != nil {
@@ -182,9 +212,9 @@ func initP2PService(ctx context.Context, config *config.Config, lumeraClient lum
// Create P2P config using helper function
p2pConfig := createP2PConfig(config, address.String())
- logtrace.Info(ctx, "Initializing P2P service", logtrace.Fields{"address": p2pConfig.ListenAddress, "port": p2pConfig.Port, "data_dir": p2pConfig.DataDir, "supernode_id": address.String()})
+ logtrace.Debug(ctx, "Initializing P2P service", logtrace.Fields{"address": p2pConfig.ListenAddress, "port": p2pConfig.Port, "data_dir": p2pConfig.DataDir, "supernode_id": address.String()})
- p2pService, err := p2p.New(ctx, p2pConfig, lumeraClient, kr, rqStore, cloud, mst)
+ p2pService, err := p2p.New(ctx, p2pConfig, lumeraClient, kr, rqStore, cloud, mst)
if err != nil {
return nil, fmt.Errorf("failed to initialize p2p service: %w", err)
}
diff --git a/supernode/cmd/supernode.go b/supernode/cmd/supernode.go
index 19a65718..c0740fd0 100644
--- a/supernode/cmd/supernode.go
+++ b/supernode/cmd/supernode.go
@@ -71,13 +71,13 @@ func (s *Supernode) Start(ctx context.Context) error {
return err
}
- logtrace.Info(ctx, "Found valid key in keyring", logtrace.Fields{
+ logtrace.Debug(ctx, "Found valid key in keyring", logtrace.Fields{
"key_name": s.config.SupernodeConfig.KeyName,
"address": address.String(),
})
// Use the P2P service that was passed in via constructor
- logtrace.Info(ctx, "Starting P2P service", logtrace.Fields{})
+ logtrace.Debug(ctx, "Starting P2P service", logtrace.Fields{})
if err := s.p2pService.Run(ctx); err != nil {
return fmt.Errorf("p2p service error: %w", err)
}
@@ -89,7 +89,7 @@ func (s *Supernode) Start(ctx context.Context) error {
func (s *Supernode) Stop(ctx context.Context) error {
// Close the Lumera client connection
if s.lumeraClient != nil {
- logtrace.Info(ctx, "Closing Lumera client", logtrace.Fields{})
+ logtrace.Debug(ctx, "Closing Lumera client", logtrace.Fields{})
if err := s.lumeraClient.Close(); err != nil {
logtrace.Error(ctx, "Error closing Lumera client", logtrace.Fields{
"error": err.Error(),
@@ -131,7 +131,7 @@ func initRQStore(ctx context.Context, config *config.Config) (rqstore.Store, err
// Create the SQLite file path
rqStoreFile := rqDir + "/rqstore.db"
- logtrace.Info(ctx, "Initializing RaptorQ store", logtrace.Fields{
+ logtrace.Debug(ctx, "Initializing RaptorQ store", logtrace.Fields{
"file_path": rqStoreFile,
})
diff --git a/supernode/node/action/server/cascade/cascade_action_server.go b/supernode/node/action/server/cascade/cascade_action_server.go
index a99fbf0a..6a38b750 100644
--- a/supernode/node/action/server/cascade/cascade_action_server.go
+++ b/supernode/node/action/server/cascade/cascade_action_server.go
@@ -74,7 +74,7 @@ func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) er
}
ctx := stream.Context()
- logtrace.Info(ctx, "client streaming request to upload cascade input data received", fields)
+ logtrace.Debug(ctx, "client streaming request to upload cascade input data received", fields)
const maxFileSize = 1 * 1024 * 1024 * 1024 // 1GB limit
@@ -140,7 +140,7 @@ func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) er
return fmt.Errorf("file size %d exceeds maximum allowed size of 1GB", totalSize)
}
- logtrace.Info(ctx, "received data chunk", logtrace.Fields{
+ logtrace.Debug(ctx, "received data chunk", logtrace.Fields{
"chunk_size": len(x.Chunk.Data),
"total_size_so_far": totalSize,
})
@@ -148,7 +148,7 @@ func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) er
case *pb.RegisterRequest_Metadata:
// Store metadata - this should be the final message
metadata = x.Metadata
- logtrace.Info(ctx, "received metadata", logtrace.Fields{
+ logtrace.Debug(ctx, "received metadata", logtrace.Fields{
"task_id": metadata.TaskId,
"action_id": metadata.ActionId,
})
@@ -162,7 +162,7 @@ func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) er
}
fields[logtrace.FieldTaskID] = metadata.GetTaskId()
fields[logtrace.FieldActionID] = metadata.GetActionId()
- logtrace.Info(ctx, "metadata received from action-sdk", fields)
+ logtrace.Debug(ctx, "metadata received from action-sdk", fields)
// Ensure all data is written to disk before calculating hash
if err := tempFile.Sync(); err != nil {
@@ -174,7 +174,7 @@ func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) er
hash := hasher.Sum(nil)
hashHex := hex.EncodeToString(hash)
fields[logtrace.FieldHashHex] = hashHex
- logtrace.Info(ctx, "final BLAKE3 hash generated", fields)
+ logtrace.Debug(ctx, "final BLAKE3 hash generated", fields)
targetPath, err := replaceTempDirWithTaskDir(metadata.GetTaskId(), tempFilePath, tempFile)
if err != nil {
@@ -213,7 +213,7 @@ func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) er
return fmt.Errorf("registration failed: %w", err)
}
- logtrace.Info(ctx, "cascade registration completed successfully", fields)
+ logtrace.Debug(ctx, "cascade registration completed successfully", fields)
return nil
}
@@ -225,25 +225,12 @@ func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeS
}
ctx := stream.Context()
- logtrace.Info(ctx, "download request received from client", fields)
+ logtrace.Debug(ctx, "download request received from client", fields)
task := server.factory.NewCascadeRegistrationTask()
- // Verify signature if provided
- if req.GetSignature() != "" {
- // Cast to concrete type to access helper method
- if cascadeTask, ok := task.(*cascadeService.CascadeRegistrationTask); ok {
- err := cascadeTask.VerifyDownloadSignature(ctx, req.GetActionId(), req.GetSignature())
- if err != nil {
- fields[logtrace.FieldError] = err.Error()
- logtrace.Error(ctx, "signature verification failed", fields)
- return fmt.Errorf("signature verification failed: %w", err)
- }
- } else {
- logtrace.Error(ctx, "unable to cast task to CascadeRegistrationTask", fields)
- return fmt.Errorf("unable to verify signature: task type assertion failed")
- }
- }
+ // Authorization is enforced inside the task based on metadata.Public.
+ // If public, signature is skipped; if private, signature is required.
var restoredFilePath string
var tmpDir string
@@ -254,13 +241,14 @@ func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeS
if err := task.CleanupDownload(ctx, tmpDir); err != nil {
logtrace.Error(ctx, "error cleaning up the tmp dir", logtrace.Fields{logtrace.FieldError: err.Error()})
} else {
- logtrace.Info(ctx, "tmp dir has been cleaned up", logtrace.Fields{"tmp_dir": tmpDir})
+ logtrace.Debug(ctx, "tmp dir has been cleaned up", logtrace.Fields{"tmp_dir": tmpDir})
}
}
}()
err := task.Download(ctx, &cascadeService.DownloadRequest{
- ActionID: req.GetActionId(),
+ ActionID: req.GetActionId(),
+ Signature: req.GetSignature(),
}, func(resp *cascadeService.DownloadResponse) error {
grpcResp := &pb.DownloadResponse{
ResponseType: &pb.DownloadResponse_Event{
@@ -290,7 +278,7 @@ func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeS
logtrace.Error(ctx, "no artefact file retrieved", fields)
return fmt.Errorf("no artefact to stream")
}
- logtrace.Info(ctx, "streaming artefact file in chunks", fields)
+ logtrace.Debug(ctx, "streaming artefact file in chunks", fields)
// Open the restored file and stream directly from disk to avoid buffering entire file in memory
f, err := os.Open(restoredFilePath)
@@ -308,12 +296,19 @@ func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeS
// Calculate optimal chunk size based on file size
chunkSize := calculateOptimalChunkSize(fi.Size())
- logtrace.Info(ctx, "calculated optimal chunk size for download", logtrace.Fields{
+ logtrace.Debug(ctx, "calculated optimal chunk size for download", logtrace.Fields{
"file_size": fi.Size(),
"chunk_size": chunkSize,
})
- // Announce: file is ready to be served to the client
+ // Pre-read first chunk to avoid any delay between SERVE_READY and first data
+ buf := make([]byte, chunkSize)
+ n, readErr := f.Read(buf)
+ if readErr != nil && readErr != io.EOF {
+ return fmt.Errorf("chunked read failed: %w", readErr)
+ }
+
+ // Announce: file is ready to be served to the client (right before first data)
if err := stream.Send(&pb.DownloadResponse{
ResponseType: &pb.DownloadResponse_Event{
Event: &pb.DownloadEvent{
@@ -326,10 +321,27 @@ func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeS
return err
}
- // Stream the file in fixed-size chunks
- buf := make([]byte, chunkSize)
+ // Send pre-read first chunk if available
+ if n > 0 {
+ if err := stream.Send(&pb.DownloadResponse{
+ ResponseType: &pb.DownloadResponse_Chunk{
+ Chunk: &pb.DataChunk{Data: buf[:n]},
+ },
+ }); err != nil {
+ logtrace.Error(ctx, "failed to stream first chunk", logtrace.Fields{logtrace.FieldError: err.Error()})
+ return err
+ }
+ }
+
+ // If EOF after first read, we're done
+ if readErr == io.EOF {
+ logtrace.Debug(ctx, "completed streaming all chunks", fields)
+ return nil
+ }
+
+ // Continue streaming remaining chunks
for {
- n, readErr := f.Read(buf)
+ n, readErr = f.Read(buf)
if n > 0 {
if err := stream.Send(&pb.DownloadResponse{
ResponseType: &pb.DownloadResponse_Chunk{
@@ -350,6 +362,6 @@ func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeS
// Cleanup is handled in deferred block above
- logtrace.Info(ctx, "completed streaming all chunks", fields)
+ logtrace.Debug(ctx, "completed streaming all chunks", fields)
return nil
}
diff --git a/supernode/node/action/server/cascade/cascade_action_server_test.go b/supernode/node/action/server/cascade/cascade_action_server_test.go
index eca121d8..ff2738b3 100644
--- a/supernode/node/action/server/cascade/cascade_action_server_test.go
+++ b/supernode/node/action/server/cascade/cascade_action_server_test.go
@@ -10,7 +10,7 @@ import (
cascademocks "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/mocks"
"github.com/stretchr/testify/assert"
- "go.uber.org/mock/gomock"
+ "github.com/golang/mock/gomock"
)
func TestRegister_Success(t *testing.T) {
diff --git a/supernode/node/supernode/gateway/server.go b/supernode/node/supernode/gateway/server.go
index 5440a7f4..7e17e238 100644
--- a/supernode/node/supernode/gateway/server.go
+++ b/supernode/node/supernode/gateway/server.go
@@ -86,7 +86,7 @@ func (s *Server) Run(ctx context.Context) error {
IdleTimeout: 60 * time.Second,
}
- logtrace.Info(ctx, "Starting HTTP gateway server", logtrace.Fields{
+ logtrace.Debug(ctx, "Starting HTTP gateway server", logtrace.Fields{
"address": s.ipAddress,
"port": s.port,
})
@@ -105,7 +105,7 @@ func (s *Server) Stop(ctx context.Context) error {
return nil
}
- logtrace.Info(ctx, "Shutting down HTTP gateway server", nil)
+ logtrace.Debug(ctx, "Shutting down HTTP gateway server", nil)
return s.server.Shutdown(ctx)
}
diff --git a/supernode/node/supernode/server/server.go b/supernode/node/supernode/server/server.go
index 37e8f4dd..774be094 100644
--- a/supernode/node/supernode/server/server.go
+++ b/supernode/node/supernode/server/server.go
@@ -48,8 +48,8 @@ func (server *Server) Run(ctx context.Context) error {
// Set up gRPC logging
logtrace.SetGRPCLogger()
- logtrace.Info(ctx, "Server identity configured", logtrace.Fields{logtrace.FieldModule: "server", "identity": server.config.Identity})
- logtrace.Info(ctx, "Server listening", logtrace.Fields{logtrace.FieldModule: "server", "addresses": server.config.ListenAddresses})
+ logtrace.Debug(ctx, "Server identity configured", logtrace.Fields{logtrace.FieldModule: "server", "identity": server.config.Identity})
+ logtrace.Debug(ctx, "Server listening", logtrace.Fields{logtrace.FieldModule: "server", "addresses": server.config.ListenAddresses})
group, ctx := errgroup.WithContext(ctx)
@@ -74,7 +74,7 @@ func (server *Server) Run(ctx context.Context) error {
address := addr // Create a new variable to avoid closure issues
group.Go(func() error {
- logtrace.Info(ctx, "Starting gRPC server", logtrace.Fields{logtrace.FieldModule: "server", "address": address})
+ logtrace.Debug(ctx, "Starting gRPC server", logtrace.Fields{logtrace.FieldModule: "server", "address": address})
return server.grpcServer.Serve(ctx, address, opts)
})
}
diff --git a/supernode/node/supernode/server/status_server.go b/supernode/node/supernode/server/status_server.go
index d90b1e3e..8b061a3b 100644
--- a/supernode/node/supernode/server/status_server.go
+++ b/supernode/node/supernode/server/status_server.go
@@ -174,68 +174,7 @@ func (s *SupernodeServer) GetStatus(ctx context.Context, req *pb.StatusRequest)
pbpm.Disk.UsedMb = pm.Disk.UsedMB
pbpm.Disk.FreeMb = pm.Disk.FreeMB
- // Recent batch store
- for _, e := range pm.RecentBatchStore {
- pbpm.RecentBatchStore = append(pbpm.RecentBatchStore, &pb.StatusResponse_P2PMetrics_RecentBatchStoreEntry{
- TimeUnix: e.TimeUnix,
- SenderId: e.SenderID,
- SenderIp: e.SenderIP,
- Keys: int32(e.Keys),
- DurationMs: e.DurationMS,
- Ok: e.OK,
- Error: e.Error,
- })
- }
- // Recent batch retrieve
- for _, e := range pm.RecentBatchRetrieve {
- pbpm.RecentBatchRetrieve = append(pbpm.RecentBatchRetrieve, &pb.StatusResponse_P2PMetrics_RecentBatchRetrieveEntry{
- TimeUnix: e.TimeUnix,
- SenderId: e.SenderID,
- SenderIp: e.SenderIP,
- Requested: int32(e.Requested),
- Found: int32(e.Found),
- DurationMs: e.DurationMS,
- Error: e.Error,
- })
- }
-
- // Per-IP buckets
- if pm.RecentBatchStoreByIP != nil {
- pbpm.RecentBatchStoreByIp = map[string]*pb.StatusResponse_P2PMetrics_RecentBatchStoreList{}
- for ip, list := range pm.RecentBatchStoreByIP {
- pbList := &pb.StatusResponse_P2PMetrics_RecentBatchStoreList{}
- for _, e := range list {
- pbList.Entries = append(pbList.Entries, &pb.StatusResponse_P2PMetrics_RecentBatchStoreEntry{
- TimeUnix: e.TimeUnix,
- SenderId: e.SenderID,
- SenderIp: e.SenderIP,
- Keys: int32(e.Keys),
- DurationMs: e.DurationMS,
- Ok: e.OK,
- Error: e.Error,
- })
- }
- pbpm.RecentBatchStoreByIp[ip] = pbList
- }
- }
- if pm.RecentBatchRetrieveByIP != nil {
- pbpm.RecentBatchRetrieveByIp = map[string]*pb.StatusResponse_P2PMetrics_RecentBatchRetrieveList{}
- for ip, list := range pm.RecentBatchRetrieveByIP {
- pbList := &pb.StatusResponse_P2PMetrics_RecentBatchRetrieveList{}
- for _, e := range list {
- pbList.Entries = append(pbList.Entries, &pb.StatusResponse_P2PMetrics_RecentBatchRetrieveEntry{
- TimeUnix: e.TimeUnix,
- SenderId: e.SenderID,
- SenderIp: e.SenderIP,
- Requested: int32(e.Requested),
- Found: int32(e.Found),
- DurationMs: e.DurationMS,
- Error: e.Error,
- })
- }
- pbpm.RecentBatchRetrieveByIp[ip] = pbList
- }
- }
+ // Detailed recent per-request lists removed from API
response.P2PMetrics = pbpm
}
diff --git a/supernode/services/cascade/adaptors/mocks/lumera_mock.go b/supernode/services/cascade/adaptors/mocks/lumera_mock.go
index 15a6c901..29cdd48f 100644
--- a/supernode/services/cascade/adaptors/mocks/lumera_mock.go
+++ b/supernode/services/cascade/adaptors/mocks/lumera_mock.go
@@ -1,10 +1,5 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: lumera.go
-//
-// Generated by this command:
-//
-// mockgen -destination=mocks/lumera_mock.go -package=cascadeadaptormocks -source=lumera.go
-//
// Package cascadeadaptormocks is a generated GoMock package.
package cascadeadaptormocks
@@ -16,14 +11,13 @@ import (
types "github.com/LumeraProtocol/lumera/x/action/v1/types"
types0 "github.com/LumeraProtocol/lumera/x/supernode/v1/types"
tx "github.com/cosmos/cosmos-sdk/types/tx"
- gomock "go.uber.org/mock/gomock"
+ gomock "github.com/golang/mock/gomock"
)
// MockLumeraClient is a mock of LumeraClient interface.
type MockLumeraClient struct {
ctrl *gomock.Controller
recorder *MockLumeraClientMockRecorder
- isgomock struct{}
}
// MockLumeraClientMockRecorder is the mock recorder for MockLumeraClient.
@@ -53,26 +47,11 @@ func (m *MockLumeraClient) FinalizeAction(ctx context.Context, actionID string,
}
// FinalizeAction indicates an expected call of FinalizeAction.
-func (mr *MockLumeraClientMockRecorder) FinalizeAction(ctx, actionID, rqids any) *gomock.Call {
+func (mr *MockLumeraClientMockRecorder) FinalizeAction(ctx, actionID, rqids interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinalizeAction", reflect.TypeOf((*MockLumeraClient)(nil).FinalizeAction), ctx, actionID, rqids)
}
-// SimulateFinalizeAction mocks base method.
-func (m *MockLumeraClient) SimulateFinalizeAction(ctx context.Context, actionID string, rqids []string) (*tx.SimulateResponse, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "SimulateFinalizeAction", ctx, actionID, rqids)
- ret0, _ := ret[0].(*tx.SimulateResponse)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// SimulateFinalizeAction indicates an expected call of SimulateFinalizeAction.
-func (mr *MockLumeraClientMockRecorder) SimulateFinalizeAction(ctx, actionID, rqids any) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimulateFinalizeAction", reflect.TypeOf((*MockLumeraClient)(nil).SimulateFinalizeAction), ctx, actionID, rqids)
-}
-
// GetAction mocks base method.
func (m *MockLumeraClient) GetAction(ctx context.Context, actionID string) (*types.QueryGetActionResponse, error) {
m.ctrl.T.Helper()
@@ -83,7 +62,7 @@ func (m *MockLumeraClient) GetAction(ctx context.Context, actionID string) (*typ
}
// GetAction indicates an expected call of GetAction.
-func (mr *MockLumeraClientMockRecorder) GetAction(ctx, actionID any) *gomock.Call {
+func (mr *MockLumeraClientMockRecorder) GetAction(ctx, actionID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAction", reflect.TypeOf((*MockLumeraClient)(nil).GetAction), ctx, actionID)
}
@@ -98,7 +77,7 @@ func (m *MockLumeraClient) GetActionFee(ctx context.Context, dataSize string) (*
}
// GetActionFee indicates an expected call of GetActionFee.
-func (mr *MockLumeraClientMockRecorder) GetActionFee(ctx, dataSize any) *gomock.Call {
+func (mr *MockLumeraClientMockRecorder) GetActionFee(ctx, dataSize interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActionFee", reflect.TypeOf((*MockLumeraClient)(nil).GetActionFee), ctx, dataSize)
}
@@ -113,11 +92,26 @@ func (m *MockLumeraClient) GetTopSupernodes(ctx context.Context, height uint64)
}
// GetTopSupernodes indicates an expected call of GetTopSupernodes.
-func (mr *MockLumeraClientMockRecorder) GetTopSupernodes(ctx, height any) *gomock.Call {
+func (mr *MockLumeraClientMockRecorder) GetTopSupernodes(ctx, height interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTopSupernodes", reflect.TypeOf((*MockLumeraClient)(nil).GetTopSupernodes), ctx, height)
}
+// SimulateFinalizeAction mocks base method.
+func (m *MockLumeraClient) SimulateFinalizeAction(ctx context.Context, actionID string, rqids []string) (*tx.SimulateResponse, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SimulateFinalizeAction", ctx, actionID, rqids)
+ ret0, _ := ret[0].(*tx.SimulateResponse)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// SimulateFinalizeAction indicates an expected call of SimulateFinalizeAction.
+func (mr *MockLumeraClientMockRecorder) SimulateFinalizeAction(ctx, actionID, rqids interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimulateFinalizeAction", reflect.TypeOf((*MockLumeraClient)(nil).SimulateFinalizeAction), ctx, actionID, rqids)
+}
+
// Verify mocks base method.
func (m *MockLumeraClient) Verify(ctx context.Context, creator string, file, sigBytes []byte) error {
m.ctrl.T.Helper()
@@ -127,7 +121,7 @@ func (m *MockLumeraClient) Verify(ctx context.Context, creator string, file, sig
}
// Verify indicates an expected call of Verify.
-func (mr *MockLumeraClientMockRecorder) Verify(ctx, creator, file, sigBytes any) *gomock.Call {
+func (mr *MockLumeraClientMockRecorder) Verify(ctx, creator, file, sigBytes interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockLumeraClient)(nil).Verify), ctx, creator, file, sigBytes)
}
diff --git a/supernode/services/cascade/adaptors/mocks/p2p_mock.go b/supernode/services/cascade/adaptors/mocks/p2p_mock.go
index 4f62a440..ec99d92a 100644
--- a/supernode/services/cascade/adaptors/mocks/p2p_mock.go
+++ b/supernode/services/cascade/adaptors/mocks/p2p_mock.go
@@ -1,10 +1,5 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: p2p.go
-//
-// Generated by this command:
-//
-// mockgen -destination=mocks/p2p_mock.go -package=cascadeadaptormocks -source=p2p.go
-//
// Package cascadeadaptormocks is a generated GoMock package.
package cascadeadaptormocks
@@ -15,14 +10,13 @@ import (
logtrace "github.com/LumeraProtocol/supernode/v2/pkg/logtrace"
adaptors "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors"
- gomock "go.uber.org/mock/gomock"
+ gomock "github.com/golang/mock/gomock"
)
// MockP2PService is a mock of P2PService interface.
type MockP2PService struct {
ctrl *gomock.Controller
recorder *MockP2PServiceMockRecorder
- isgomock struct{}
}
// MockP2PServiceMockRecorder is the mock recorder for MockP2PService.
@@ -51,7 +45,7 @@ func (m *MockP2PService) StoreArtefacts(ctx context.Context, req adaptors.StoreA
}
// StoreArtefacts indicates an expected call of StoreArtefacts.
-func (mr *MockP2PServiceMockRecorder) StoreArtefacts(ctx, req, f any) *gomock.Call {
+func (mr *MockP2PServiceMockRecorder) StoreArtefacts(ctx, req, f interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreArtefacts", reflect.TypeOf((*MockP2PService)(nil).StoreArtefacts), ctx, req, f)
}
diff --git a/supernode/services/cascade/adaptors/mocks/rq_mock.go b/supernode/services/cascade/adaptors/mocks/rq_mock.go
index 4c53c1dd..f45f2eb5 100644
--- a/supernode/services/cascade/adaptors/mocks/rq_mock.go
+++ b/supernode/services/cascade/adaptors/mocks/rq_mock.go
@@ -1,10 +1,5 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: rq.go
-//
-// Generated by this command:
-//
-// mockgen -destination=mocks/rq_mock.go -package=cascadeadaptormocks -source=rq.go
-//
// Package cascadeadaptormocks is a generated GoMock package.
package cascadeadaptormocks
@@ -13,15 +8,15 @@ import (
context "context"
reflect "reflect"
+ codec "github.com/LumeraProtocol/supernode/v2/pkg/codec"
adaptors "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors"
- gomock "go.uber.org/mock/gomock"
+ gomock "github.com/golang/mock/gomock"
)
// MockCodecService is a mock of CodecService interface.
type MockCodecService struct {
ctrl *gomock.Controller
recorder *MockCodecServiceMockRecorder
- isgomock struct{}
}
// MockCodecServiceMockRecorder is the mock recorder for MockCodecService.
@@ -51,7 +46,7 @@ func (m *MockCodecService) Decode(ctx context.Context, req adaptors.DecodeReques
}
// Decode indicates an expected call of Decode.
-func (mr *MockCodecServiceMockRecorder) Decode(ctx, req any) *gomock.Call {
+func (mr *MockCodecServiceMockRecorder) Decode(ctx, req interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Decode", reflect.TypeOf((*MockCodecService)(nil).Decode), ctx, req)
}
@@ -66,7 +61,25 @@ func (m *MockCodecService) EncodeInput(ctx context.Context, taskID, path string,
}
// EncodeInput indicates an expected call of EncodeInput.
-func (mr *MockCodecServiceMockRecorder) EncodeInput(ctx, taskID, path, dataSize any) *gomock.Call {
+func (mr *MockCodecServiceMockRecorder) EncodeInput(ctx, taskID, path, dataSize interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EncodeInput", reflect.TypeOf((*MockCodecService)(nil).EncodeInput), ctx, taskID, path, dataSize)
}
+
+// PrepareDecode mocks base method.
+func (m *MockCodecService) PrepareDecode(ctx context.Context, actionID string, layout codec.Layout) ([]string, func(int, string, []byte) (string, error), func() error, *codec.Workspace, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PrepareDecode", ctx, actionID, layout)
+ ret0, _ := ret[0].([]string)
+ ret1, _ := ret[1].(func(int, string, []byte) (string, error))
+ ret2, _ := ret[2].(func() error)
+ ret3, _ := ret[3].(*codec.Workspace)
+ ret4, _ := ret[4].(error)
+ return ret0, ret1, ret2, ret3, ret4
+}
+
+// PrepareDecode indicates an expected call of PrepareDecode.
+func (mr *MockCodecServiceMockRecorder) PrepareDecode(ctx, actionID, layout interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareDecode", reflect.TypeOf((*MockCodecService)(nil).PrepareDecode), ctx, actionID, layout)
+}
diff --git a/supernode/services/cascade/adaptors/p2p.go b/supernode/services/cascade/adaptors/p2p.go
index 116d6810..d1fd6ab9 100644
--- a/supernode/services/cascade/adaptors/p2p.go
+++ b/supernode/services/cascade/adaptors/p2p.go
@@ -13,7 +13,6 @@ import (
"github.com/LumeraProtocol/supernode/v2/p2p"
"github.com/LumeraProtocol/supernode/v2/pkg/logtrace"
- cm "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics"
"github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore"
"github.com/LumeraProtocol/supernode/v2/pkg/utils"
"github.com/LumeraProtocol/supernode/v2/supernode/services/common/storage"
@@ -39,8 +38,8 @@ type P2PService interface {
// p2pImpl is the default implementation of the P2PService interface.
type p2pImpl struct {
- p2p p2p.Client
- rqStore rqstore.Store
+ p2p p2p.Client
+ rqStore rqstore.Store
}
// NewP2PService returns a concrete implementation of P2PService.
@@ -56,11 +55,7 @@ type StoreArtefactsRequest struct {
}
func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) error {
- logtrace.Info(ctx, "About to store artefacts (metadata + symbols)", logtrace.Fields{"taskID": req.TaskID, "id_files": len(req.IDFiles)})
-
- // Enable per-node store RPC capture for this task
- cm.StartStoreCapture(req.TaskID)
- defer cm.StopStoreCapture(req.TaskID)
+ logtrace.Info(ctx, "store: p2p start", logtrace.Fields{"taskID": req.TaskID, "actionID": req.ActionID, "id_files": len(req.IDFiles), "symbols_dir": req.SymbolsDir})
start := time.Now()
firstPassSymbols, totalSymbols, err := p.storeCascadeSymbolsAndData(ctx, req.TaskID, req.ActionID, req.SymbolsDir, req.IDFiles)
@@ -68,9 +63,18 @@ func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest,
return errors.Wrap(err, "error storing artefacts")
}
dur := time.Since(start).Milliseconds()
- logtrace.Info(ctx, "artefacts have been stored", logtrace.Fields{"taskID": req.TaskID, "symbols_first_pass": firstPassSymbols, "symbols_total": totalSymbols, "id_files_count": len(req.IDFiles)})
- // Record store summary for later event emission
- cm.SetStoreSummary(req.TaskID, firstPassSymbols, totalSymbols, len(req.IDFiles), dur)
+ // After first-pass, log how many symbols remain on disk
+ remaining := 0
+ if req.SymbolsDir != "" {
+ if keys, werr := walkSymbolTree(req.SymbolsDir); werr == nil {
+ remaining = len(keys)
+ }
+ }
+ logtrace.Info(ctx, "store: first-pass complete", logtrace.Fields{"taskID": req.TaskID, "symbols_first_pass": firstPassSymbols, "symbols_total_available": totalSymbols, "id_files_count": len(req.IDFiles), "symbols_left_on_disk": remaining, "ms": dur})
+ if remaining == 0 {
+ logtrace.Info(ctx, "store: dir empty after first-pass", logtrace.Fields{"taskID": req.TaskID, "dir": req.SymbolsDir})
+ }
+ // Metrics collection removed; logs retained
return nil
}
@@ -98,7 +102,8 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action
if targetCount < 1 && totalAvailable > 0 {
targetCount = 1
}
- logtrace.Info(ctx, "first-pass target coverage (symbols)", logtrace.Fields{
+ logtrace.Info(ctx, "store: symbols discovered", logtrace.Fields{"total_symbols": totalAvailable, "dir": symbolsDir})
+ logtrace.Info(ctx, "store: target coverage", logtrace.Fields{
"total_symbols": totalAvailable,
"target_percent": storeSymbolsPercent,
"target_count": targetCount,
@@ -113,8 +118,8 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action
}
sort.Strings(keys) // deterministic order inside the sample
}
-
- logtrace.Info(ctx, "storing RaptorQ symbols", logtrace.Fields{"count": len(keys)})
+ logtrace.Info(ctx, "store: selected symbols", logtrace.Fields{"selected": len(keys), "of_total": totalAvailable, "dir": symbolsDir})
+ logtrace.Info(ctx, "store: sending symbols", logtrace.Fields{"count": len(keys)})
/* stream in fixed-size batches -------------------------------------- */
@@ -150,13 +155,14 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action
payload = append(payload, symBytes...)
// Send as the same data type you use for symbols
+ logtrace.Info(ctx, "store: batch send (first)", logtrace.Fields{"taskID": taskID, "metadata_count": len(metadataFiles), "symbols_in_batch": len(symBytes), "payload_total": len(payload)})
bctx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout)
- bctx = cm.WithTaskID(bctx, taskID)
err = p.p2p.StoreBatch(bctx, payload, storage.P2PDataRaptorQSymbol, taskID)
cancel()
if err != nil {
return totalSymbols, totalAvailable, fmt.Errorf("p2p store batch (first): %w", err)
}
+ logtrace.Info(ctx, "store: batch ok (first)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symBytes)})
totalSymbols += len(symBytes)
// No per-RPC metrics propagated from p2p
@@ -167,6 +173,14 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action
return totalSymbols, totalAvailable, fmt.Errorf("delete symbols: %w", err)
}
}
+ // Log remaining symbols in directory after deletion
+ if rem, werr := walkSymbolTree(symbolsDir); werr == nil {
+ if left := len(rem); left > 0 {
+ logtrace.Info(ctx, "store: remaining after first batch", logtrace.Fields{"taskID": taskID, "left": left})
+ } else {
+ logtrace.Info(ctx, "store: dir empty after first batch", logtrace.Fields{"taskID": taskID, "dir": symbolsDir})
+ }
+ }
firstBatchProcessed = true
} else {
@@ -185,12 +199,19 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action
if totalAvailable > 0 {
achievedPct = (float64(totalSymbols) / float64(totalAvailable)) * 100.0
}
- logtrace.Info(ctx, "first-pass achieved coverage (symbols)",
- logtrace.Fields{"achieved_symbols": totalSymbols, "achieved_percent": achievedPct})
+ logtrace.Info(ctx, "store: coverage", logtrace.Fields{"achieved_symbols": totalSymbols, "achieved_percent": achievedPct})
if err := p.rqStore.UpdateIsFirstBatchStored(actionID); err != nil {
return totalSymbols, totalAvailable, fmt.Errorf("update first-batch flag: %w", err)
}
+ // Final remaining count after first pass flagged
+ if rem, werr := walkSymbolTree(symbolsDir); werr == nil {
+ if left := len(rem); left > 0 {
+ logtrace.Info(ctx, "store: remaining after first-pass", logtrace.Fields{"taskID": taskID, "left": left, "dir": symbolsDir})
+ } else {
+ logtrace.Info(ctx, "store: directory empty after first-pass", logtrace.Fields{"taskID": taskID, "dir": symbolsDir})
+ }
+ }
return totalSymbols, totalAvailable, nil
@@ -225,7 +246,7 @@ func walkSymbolTree(root string) ([]string, error) {
// storeSymbolsInP2P loads a batch of symbols and stores them via P2P.
// Returns (ratePct, requests, count, error) where `count` is the number of symbols in this batch.
func (c *p2pImpl) storeSymbolsInP2P(ctx context.Context, taskID, root string, fileKeys []string) (int, error) {
- logtrace.Info(ctx, "loading batch symbols", logtrace.Fields{"count": len(fileKeys)})
+ logtrace.Debug(ctx, "loading batch symbols", logtrace.Fields{"taskID": taskID, "count": len(fileKeys)})
symbols, err := utils.LoadSymbols(root, fileKeys)
if err != nil {
@@ -233,18 +254,23 @@ func (c *p2pImpl) storeSymbolsInP2P(ctx context.Context, taskID, root string, fi
}
symCtx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout)
- symCtx = cm.WithTaskID(symCtx, taskID)
defer cancel()
+ logtrace.Info(ctx, "store: batch send (symbols)", logtrace.Fields{"taskID": taskID, "symbols_in_batch": len(symbols)})
if err := c.p2p.StoreBatch(symCtx, symbols, storage.P2PDataRaptorQSymbol, taskID); err != nil {
return len(symbols), fmt.Errorf("p2p store batch: %w", err)
}
- logtrace.Info(ctx, "stored batch symbols", logtrace.Fields{"count": len(symbols)})
+ logtrace.Info(ctx, "store: batch ok (symbols)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symbols)})
if err := utils.DeleteSymbols(ctx, root, fileKeys); err != nil {
return len(symbols), fmt.Errorf("delete symbols: %w", err)
}
- logtrace.Info(ctx, "deleted batch symbols", logtrace.Fields{"count": len(symbols)})
+ // After deletion, log remaining count in directory
+ left := -1
+ if rem, werr := walkSymbolTree(root); werr == nil {
+ left = len(rem)
+ }
+ logtrace.Debug(ctx, "deleted batch symbols", logtrace.Fields{"taskID": taskID, "count": len(symbols), "symbols_left_on_disk": left})
// No per-RPC metrics propagated from p2p
return len(symbols), nil
diff --git a/supernode/services/cascade/adaptors/rq.go b/supernode/services/cascade/adaptors/rq.go
index 5f4443cf..92e89819 100644
--- a/supernode/services/cascade/adaptors/rq.go
+++ b/supernode/services/cascade/adaptors/rq.go
@@ -11,6 +11,7 @@ import (
//go:generate mockgen -destination=mocks/rq_mock.go -package=cascadeadaptormocks -source=rq.go
type CodecService interface {
EncodeInput(ctx context.Context, taskID string, path string, dataSize int) (EncodeResult, error)
+ PrepareDecode(ctx context.Context, actionID string, layout codec.Layout) (blockPaths []string, Write func(block int, symbolID string, data []byte) (string, error), Cleanup func() error, ws *codec.Workspace, err error)
Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error)
}
@@ -70,7 +71,11 @@ func (c *codecImpl) Decode(ctx context.Context, req DecodeRequest) (DecodeRespon
}
return DecodeResponse{
- FilePath: resp.Path,
+ FilePath: resp.FilePath,
DecodeTmpDir: resp.DecodeTmpDir,
}, nil
}
+
+func (c *codecImpl) PrepareDecode(ctx context.Context, actionID string, layout codec.Layout) (blockPaths []string, Write func(block int, symbolID string, data []byte) (string, error), Cleanup func() error, ws *codec.Workspace, err error) {
+ return
+}
diff --git a/supernode/services/cascade/config.go b/supernode/services/cascade/config.go
index 7a0f1ef2..df6abd1f 100644
--- a/supernode/services/cascade/config.go
+++ b/supernode/services/cascade/config.go
@@ -6,8 +6,8 @@ import (
// Config contains settings for the cascade service
type Config struct {
- common.Config `mapstructure:",squash" json:"-"`
+ common.Config `mapstructure:",squash" json:"-"`
- RaptorQServiceAddress string `mapstructure:"-" json:"-"`
- RqFilesDir string `mapstructure:"rq_files_dir" json:"rq_files_dir,omitempty"`
+ RaptorQServiceAddress string `mapstructure:"-" json:"-"`
+ RqFilesDir string `mapstructure:"rq_files_dir" json:"rq_files_dir,omitempty"`
}
diff --git a/supernode/services/cascade/download.go b/supernode/services/cascade/download.go
index b8220045..6ad40aab 100644
--- a/supernode/services/cascade/download.go
+++ b/supernode/services/cascade/download.go
@@ -14,7 +14,6 @@ import (
"github.com/LumeraProtocol/supernode/v2/pkg/crypto"
"github.com/LumeraProtocol/supernode/v2/pkg/errors"
"github.com/LumeraProtocol/supernode/v2/pkg/logtrace"
- cm "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics"
"github.com/LumeraProtocol/supernode/v2/pkg/utils"
"github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors"
"github.com/LumeraProtocol/supernode/v2/supernode/services/common"
@@ -24,6 +23,9 @@ const targetRequiredPercent = 17
type DownloadRequest struct {
ActionID string
+ // Signature is required for private downloads. For public cascade
+ // actions (metadata.Public == true), this is ignored.
+ Signature string
}
type DownloadResponse struct {
@@ -33,13 +35,24 @@ type DownloadResponse struct {
DownloadedDir string
}
+// Download retrieves a cascade artefact by action ID.
+//
+// Authorization behavior:
+// - If the cascade metadata has Public = true, signature verification is skipped
+// and the file is downloadable by anyone.
+// - If Public = false, a valid download signature is required.
func (task *CascadeRegistrationTask) Download(
ctx context.Context,
req *DownloadRequest,
send func(resp *DownloadResponse) error,
) (err error) {
+ // Seed correlation ID and origin from actionID for downstream logs
+ if req != nil && req.ActionID != "" {
+ ctx = logtrace.CtxWithCorrelationID(ctx, req.ActionID)
+ ctx = logtrace.CtxWithOrigin(ctx, "download")
+ }
fields := logtrace.Fields{logtrace.FieldMethod: "Download", logtrace.FieldRequest: req}
- logtrace.Info(ctx, "Cascade download request received", fields)
+ logtrace.Info(ctx, "download: request", fields)
// Ensure task status is finalized regardless of outcome
defer func() {
@@ -53,37 +66,64 @@ func (task *CascadeRegistrationTask) Download(
actionDetails, err := task.LumeraClient.GetAction(ctx, req.ActionID)
if err != nil {
- fields[logtrace.FieldError] = err
+ // Ensure error is logged as string for consistency
+ fields[logtrace.FieldError] = err.Error()
return task.wrapErr(ctx, "failed to get action", err, fields)
}
- logtrace.Info(ctx, "Action retrieved", fields)
+ logtrace.Info(ctx, "download: action fetched", fields)
task.streamDownloadEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", "", send)
if actionDetails.GetAction().State != actiontypes.ActionStateDone {
+ // Return a clearer error message when action is not yet finalized
err = errors.New("action is not in a valid state")
fields[logtrace.FieldError] = "action state is not done yet"
fields[logtrace.FieldActionState] = actionDetails.GetAction().State
- return task.wrapErr(ctx, "action not found", err, fields)
+ return task.wrapErr(ctx, "action not finalized yet", err, fields)
}
- logtrace.Info(ctx, "Action state validated", fields)
+ logtrace.Info(ctx, "download: action state ok", fields)
metadata, err := task.decodeCascadeMetadata(ctx, actionDetails.GetAction().Metadata, fields)
if err != nil {
fields[logtrace.FieldError] = err.Error()
return task.wrapErr(ctx, "error decoding cascade metadata", err, fields)
}
- logtrace.Info(ctx, "Cascade metadata decoded", fields)
+ logtrace.Info(ctx, "download: metadata decoded", fields)
task.streamDownloadEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", "", send)
+ // Enforce download authorization based on metadata.Public
+ // - If public: skip signature verification; allow anonymous downloads
+ // - If private: require a valid signature
+ if !metadata.Public {
+ if req.Signature == "" {
+ fields[logtrace.FieldError] = "missing signature for private download"
+ // Provide a descriptive message without a fabricated root error
+ return task.wrapErr(ctx, "private cascade requires a download signature", nil, fields)
+ }
+ if err := task.VerifyDownloadSignature(ctx, req.ActionID, req.Signature); err != nil {
+ fields[logtrace.FieldError] = err.Error()
+ return task.wrapErr(ctx, "failed to verify download signature", err, fields)
+ }
+ logtrace.Info(ctx, "download: signature verified", fields)
+ } else {
+ logtrace.Info(ctx, "download: public cascade (no signature)", fields)
+ }
+
// Notify: network retrieval phase begins
task.streamDownloadEvent(SupernodeEventTypeNetworkRetrieveStarted, "Network retrieval started", "", "", send)
+ logtrace.Info(ctx, "download: network retrieval start", logtrace.Fields{logtrace.FieldActionID: actionDetails.GetAction().ActionID})
filePath, tmpDir, err := task.downloadArtifacts(ctx, actionDetails.GetAction().ActionID, metadata, fields, send)
if err != nil {
fields[logtrace.FieldError] = err.Error()
+ // Ensure temporary decode directory is cleaned if decode failed after being created
+ if tmpDir != "" {
+ if cerr := task.CleanupDownload(ctx, tmpDir); cerr != nil {
+ logtrace.Warn(ctx, "cleanup of tmp dir after error failed", logtrace.Fields{"tmp_dir": tmpDir, logtrace.FieldError: cerr.Error()})
+ }
+ }
return task.wrapErr(ctx, "failed to download artifacts", err, fields)
}
- logtrace.Info(ctx, "File reconstructed and hash verified", fields)
+ logtrace.Debug(ctx, "File reconstructed and hash verified", fields)
// Notify: decode completed, file ready on disk
task.streamDownloadEvent(SupernodeEventTypeDecodeCompleted, "Decode completed", filePath, tmpDir, send)
@@ -91,7 +131,7 @@ func (task *CascadeRegistrationTask) Download(
}
func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, actionID string, metadata actiontypes.CascadeMetadata, fields logtrace.Fields, send func(resp *DownloadResponse) error) (string, string, error) {
- logtrace.Info(ctx, "started downloading the artifacts", fields)
+ logtrace.Debug(ctx, "started downloading the artifacts", fields)
var (
layout codec.Layout
@@ -101,15 +141,19 @@ func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, acti
)
for _, indexID := range metadata.RqIdsIds {
+ iStart := time.Now()
+ logtrace.Debug(ctx, "RPC Retrieve index file", logtrace.Fields{"index_id": indexID})
indexFile, err := task.P2PClient.Retrieve(ctx, indexID)
if err != nil || len(indexFile) == 0 {
+ logtrace.Warn(ctx, "Retrieve index file failed or empty", logtrace.Fields{"index_id": indexID, logtrace.FieldError: fmt.Sprintf("%v", err)})
continue
}
+ logtrace.Debug(ctx, "Retrieve index file completed", logtrace.Fields{"index_id": indexID, "bytes": len(indexFile), "ms": time.Since(iStart).Milliseconds()})
// Parse index file to get layout IDs
indexData, err := task.parseIndexFile(indexFile)
if err != nil {
- logtrace.Info(ctx, "failed to parse index file", fields)
+ logtrace.Warn(ctx, "failed to parse index file", logtrace.Fields{"index_id": indexID, logtrace.FieldError: err.Error()})
continue
}
@@ -117,14 +161,14 @@ func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, acti
var netMS, decMS int64
layout, netMS, decMS, layoutAttempts, err = task.retrieveLayoutFromIndex(ctx, indexData, fields)
if err != nil {
- logtrace.Info(ctx, "failed to retrieve layout from index", fields)
+ logtrace.Warn(ctx, "failed to retrieve layout from index", logtrace.Fields{"index_id": indexID, logtrace.FieldError: err.Error(), "attempts": layoutAttempts})
continue
}
layoutFetchMS = netMS
layoutDecodeMS = decMS
if len(layout.Blocks) > 0 {
- logtrace.Info(ctx, "layout file retrieved via index", fields)
+ logtrace.Debug(ctx, "layout file retrieved via index", logtrace.Fields{"index_id": indexID, "attempts": layoutAttempts, "net_ms": layoutFetchMS, "decode_ms": layoutDecodeMS})
break
}
}
@@ -139,6 +183,10 @@ func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, acti
return task.restoreFileFromLayout(ctx, layout, metadata.DataHash, actionID, send)
}
+// restoreFileFromLayout reconstructs the original file from the provided layout
+// and a subset of retrieved symbols. The method deduplicates symbol identifiers
+// before network retrieval to avoid redundant requests and ensure the requested
+// count reflects unique symbols only.
func (task *CascadeRegistrationTask) restoreFileFromLayout(
ctx context.Context,
layout codec.Layout,
@@ -150,9 +198,16 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout(
fields := logtrace.Fields{
logtrace.FieldActionID: actionID,
}
- var allSymbols []string
+ // Deduplicate symbols across blocks to avoid redundant requests
+ symSet := make(map[string]struct{})
for _, block := range layout.Blocks {
- allSymbols = append(allSymbols, block.Symbols...)
+ for _, s := range block.Symbols {
+ symSet[s] = struct{}{}
+ }
+ }
+ allSymbols := make([]string, 0, len(symSet))
+ for s := range symSet {
+ allSymbols = append(allSymbols, s)
}
sort.Strings(allSymbols)
@@ -163,26 +218,32 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout(
if targetRequiredCount < 1 && totalSymbols > 0 {
targetRequiredCount = 1
}
- logtrace.Info(ctx, "Retrieving all symbols for decode", fields)
-
- // Enable retrieve metrics capture for this action
- cm.StartRetrieveCapture(actionID)
- defer cm.StopRetrieveCapture(actionID)
+ logtrace.Info(ctx, "download: plan symbols", logtrace.Fields{"total_symbols": totalSymbols, "target_required_percent": targetRequiredPercent, "target_required_count": targetRequiredCount})
// Measure symbols batch retrieve duration
retrieveStart := time.Now()
- // Tag context with metrics task ID (actionID)
- ctxRetrieve := cm.WithTaskID(ctx, actionID)
- symbols, err := task.P2PClient.BatchRetrieve(ctxRetrieve, allSymbols, totalSymbols, actionID)
+ // Use context as-is; metrics task tagging removed
+ // Retrieve only a fraction of symbols (targetRequiredCount) based on redundancy
+ // The DHT will short-circuit once it finds the required number across the provided keys
+ reqCount := targetRequiredCount
+ if reqCount > totalSymbols {
+ reqCount = totalSymbols
+ }
+ rStart := time.Now()
+ logtrace.Info(ctx, "download: batch retrieve start", logtrace.Fields{"action_id": actionID, "requested": reqCount, "total_candidates": totalSymbols})
+ symbols, err := task.P2PClient.BatchRetrieve(ctx, allSymbols, reqCount, actionID)
if err != nil {
fields[logtrace.FieldError] = err.Error()
logtrace.Error(ctx, "batch retrieve failed", fields)
return "", "", fmt.Errorf("batch retrieve symbols: %w", err)
}
retrieveMS := time.Since(retrieveStart).Milliseconds()
+ logtrace.Info(ctx, "download: batch retrieve ok", logtrace.Fields{"action_id": actionID, "received": len(symbols), "ms": time.Since(rStart).Milliseconds()})
// Measure decode duration
decodeStart := time.Now()
+ dStart := time.Now()
+ logtrace.Info(ctx, "download: decode start", logtrace.Fields{"action_id": actionID})
decodeInfo, err := task.RQ.Decode(ctx, adaptors.DecodeRequest{
ActionID: actionID,
Symbols: symbols,
@@ -194,16 +255,19 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout(
return "", "", fmt.Errorf("decode symbols using RaptorQ: %w", err)
}
decodeMS := time.Since(decodeStart).Milliseconds()
-
- // Set minimal retrieve summary and emit event strictly from internal collector
- cm.SetRetrieveSummary(actionID, retrieveMS, decodeMS)
- payload := cm.BuildDownloadEventPayloadFromCollector(actionID)
- if retrieve, ok := payload["retrieve"].(map[string]any); ok {
- retrieve["target_required_percent"] = targetRequiredPercent
- retrieve["target_required_count"] = targetRequiredCount
- retrieve["total_symbols"] = totalSymbols
+ logtrace.Info(ctx, "download: decode ok", logtrace.Fields{"action_id": actionID, "ms": time.Since(dStart).Milliseconds(), "tmp_dir": decodeInfo.DecodeTmpDir, "file_path": decodeInfo.FilePath})
+
+ // Emit minimal JSON payload (metrics system removed)
+ minPayload := map[string]any{
+ "retrieve": map[string]any{
+ "retrieve_ms": retrieveMS,
+ "decode_ms": decodeMS,
+ "target_required_percent": targetRequiredPercent,
+ "target_required_count": targetRequiredCount,
+ "total_symbols": totalSymbols,
+ },
}
- if b, err := json.MarshalIndent(payload, "", " "); err == nil {
+ if b, err := json.MarshalIndent(minPayload, "", " "); err == nil {
task.streamDownloadEvent(SupernodeEventTypeArtefactsDownloaded, string(b), "", "", send)
}
@@ -225,7 +289,17 @@ func (task *CascadeRegistrationTask) restoreFileFromLayout(
fields[logtrace.FieldError] = err.Error()
return "", decodeInfo.DecodeTmpDir, err
}
- logtrace.Info(ctx, "File successfully restored and hash verified", fields)
+ // Log the state of the temporary decode directory
+ if decodeInfo.DecodeTmpDir != "" {
+ if set, derr := utils.ReadDirFilenames(decodeInfo.DecodeTmpDir); derr == nil {
+ if left := len(set); left > 0 {
+ logtrace.Debug(ctx, "Decode tmp directory has files remaining", logtrace.Fields{"dir": decodeInfo.DecodeTmpDir, "left": left})
+ } else {
+ logtrace.Debug(ctx, "Decode tmp directory is empty", logtrace.Fields{"dir": decodeInfo.DecodeTmpDir})
+ }
+ }
+ }
+ logtrace.Info(ctx, "download: file verified", fields)
return decodeInfo.FilePath, decodeInfo.DecodeTmpDir, nil
}
@@ -267,20 +341,26 @@ func (task *CascadeRegistrationTask) retrieveLayoutFromIndex(ctx context.Context
for _, layoutID := range indexData.LayoutIDs {
attempts++
t0 := time.Now()
+ logtrace.Debug(ctx, "RPC Retrieve layout file", logtrace.Fields{"layout_id": layoutID, "attempt": attempts})
layoutFile, err := task.P2PClient.Retrieve(ctx, layoutID)
- totalFetchMS += time.Since(t0).Milliseconds()
+ took := time.Since(t0).Milliseconds()
+ totalFetchMS += took
if err != nil || len(layoutFile) == 0 {
+ logtrace.Warn(ctx, "Retrieve layout file failed or empty", logtrace.Fields{"layout_id": layoutID, "attempt": attempts, "ms": took, logtrace.FieldError: fmt.Sprintf("%v", err)})
continue
}
t1 := time.Now()
layout, _, _, err := parseRQMetadataFile(layoutFile)
- totalDecodeMS += time.Since(t1).Milliseconds()
+ decMS := time.Since(t1).Milliseconds()
+ totalDecodeMS += decMS
if err != nil {
+ logtrace.Warn(ctx, "Parse layout file failed", logtrace.Fields{"layout_id": layoutID, "attempt": attempts, "decode_ms": decMS, logtrace.FieldError: err.Error()})
continue
}
if len(layout.Blocks) > 0 {
+ logtrace.Debug(ctx, "Layout file retrieved and parsed", logtrace.Fields{"layout_id": layoutID, "attempt": attempts, "net_ms": took, "decode_ms": decMS})
return layout, totalFetchMS, totalDecodeMS, attempts, nil
}
}
@@ -288,15 +368,20 @@ func (task *CascadeRegistrationTask) retrieveLayoutFromIndex(ctx context.Context
return codec.Layout{}, totalFetchMS, totalDecodeMS, attempts, errors.New("no valid layout found in index")
}
-func (task *CascadeRegistrationTask) CleanupDownload(ctx context.Context, actionID string) error {
- if actionID == "" {
- return errors.New("actionID is empty")
+// CleanupDownload removes the temporary directory created during decode.
+// The parameter is a directory path (not an action ID).
+func (task *CascadeRegistrationTask) CleanupDownload(ctx context.Context, dirPath string) error {
+ if dirPath == "" {
+ return errors.New("directory path is empty")
}
- // For now, we use actionID as the directory path to maintain compatibility
- if err := os.RemoveAll(actionID); err != nil {
- return errors.Errorf("failed to delete download directory: %s, :%s", actionID, err.Error())
+ // For now, we use tmp directory path as provided by decoder
+ logtrace.Debug(ctx, "Cleanup download directory", logtrace.Fields{"dir": dirPath})
+ if err := os.RemoveAll(dirPath); err != nil {
+ logtrace.Warn(ctx, "Cleanup download directory failed", logtrace.Fields{"dir": dirPath, logtrace.FieldError: err.Error()})
+ return errors.Errorf("failed to delete download directory: %s, :%s", dirPath, err.Error())
}
+ logtrace.Debug(ctx, "Cleanup download directory completed", logtrace.Fields{"dir": dirPath})
return nil
}
diff --git a/supernode/services/cascade/helper.go b/supernode/services/cascade/helper.go
index fb8c7ef5..99d3985a 100644
--- a/supernode/services/cascade/helper.go
+++ b/supernode/services/cascade/helper.go
@@ -16,7 +16,6 @@ import (
"github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode"
"github.com/LumeraProtocol/supernode/v2/pkg/utils"
"github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors"
- cm "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/golang/protobuf/proto"
@@ -36,7 +35,7 @@ func (task *CascadeRegistrationTask) fetchAction(ctx context.Context, actionID s
if res.GetAction().ActionID == "" {
return nil, task.wrapErr(ctx, "action not found", errors.New(""), f)
}
- logtrace.Info(ctx, "action has been retrieved", f)
+ logtrace.Debug(ctx, "action has been retrieved", f)
return res.GetAction(), nil
}
@@ -46,7 +45,7 @@ func (task *CascadeRegistrationTask) ensureIsTopSupernode(ctx context.Context, b
if err != nil {
return task.wrapErr(ctx, "failed to get top SNs", err, f)
}
- logtrace.Info(ctx, "Fetched Top Supernodes", f)
+ logtrace.Debug(ctx, "Fetched Top Supernodes", f)
if !supernode.Exists(top.Supernodes, task.config.SupernodeAccountAddress) {
// Build information about supernodes for better error context
@@ -54,7 +53,7 @@ func (task *CascadeRegistrationTask) ensureIsTopSupernode(ctx context.Context, b
for i, sn := range top.Supernodes {
addresses[i] = sn.SupernodeAccount
}
- logtrace.Info(ctx, "Supernode not in top list", logtrace.Fields{
+ logtrace.Debug(ctx, "Supernode not in top list", logtrace.Fields{
"currentAddress": task.config.SupernodeAccountAddress,
"topSupernodes": addresses,
})
@@ -78,7 +77,7 @@ func (task *CascadeRegistrationTask) verifyDataHash(ctx context.Context, dh []by
if string(b64) != expected {
return task.wrapErr(ctx, "data hash doesn't match", errors.New(""), f)
}
- logtrace.Info(ctx, "request data-hash has been matched with the action data-hash", f)
+ logtrace.Debug(ctx, "request data-hash has been matched with the action data-hash", f)
return nil
}
@@ -110,7 +109,7 @@ func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context.
if err := task.LumeraClient.Verify(ctx, creator, []byte(indexFileB64), creatorSigBytes); err != nil {
return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify creator signature", err, f)
}
- logtrace.Info(ctx, "creator signature successfully verified", f)
+ logtrace.Debug(ctx, "creator signature successfully verified", f)
// Decode index file to get the layout signature
indexFile, err := decodeIndexFile(indexFileB64)
@@ -132,7 +131,7 @@ func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context.
if err := task.LumeraClient.Verify(ctx, creator, layoutB64, layoutSigBytes); err != nil {
return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify layout signature", err, f)
}
- logtrace.Info(ctx, "layout signature successfully verified", f)
+ logtrace.Debug(ctx, "layout signature successfully verified", f)
return encodedMeta, indexFile.LayoutSignature, nil
}
@@ -175,12 +174,32 @@ func (task *CascadeRegistrationTask) generateRQIDFiles(ctx context.Context, meta
// storeArtefacts persists cascade artefacts (ID files + RaptorQ symbols) via the
// P2P adaptor. P2P does not return metrics; cascade summarizes and emits them.
func (task *CascadeRegistrationTask) storeArtefacts(ctx context.Context, actionID string, idFiles [][]byte, symbolsDir string, f logtrace.Fields) error {
- return task.P2P.StoreArtefacts(ctx, adaptors.StoreArtefactsRequest{
+ if f == nil {
+ f = logtrace.Fields{}
+ }
+ lf := logtrace.Fields{
+ logtrace.FieldActionID: actionID,
+ logtrace.FieldTaskID: task.ID(),
+ "id_files_count": len(idFiles),
+ "symbols_dir": symbolsDir,
+ }
+ for k, v := range f {
+ lf[k] = v
+ }
+ // Tag the flow as first-pass just before handing over to P2P
+ ctx = logtrace.CtxWithOrigin(ctx, "first_pass")
+ logtrace.Info(ctx, "store: first-pass begin", lf)
+
+ if err := task.P2P.StoreArtefacts(ctx, adaptors.StoreArtefactsRequest{
IDFiles: idFiles,
SymbolsDir: symbolsDir,
TaskID: task.ID(),
ActionID: actionID,
- }, f)
+ }, f); err != nil {
+ // Log and wrap to ensure a proper error line and context
+ return task.wrapErr(ctx, "failed to store artefacts", err, lf)
+ }
+ return nil
}
func (task *CascadeRegistrationTask) wrapErr(ctx context.Context, msg string, err error, f logtrace.Fields) error {
@@ -199,24 +218,19 @@ func (task *CascadeRegistrationTask) wrapErr(ctx context.Context, msg string, er
// emitArtefactsStored builds a single-line metrics summary and emits the
// SupernodeEventTypeArtefactsStored event while logging the metrics line.
func (task *CascadeRegistrationTask) emitArtefactsStored(
- ctx context.Context,
- fields logtrace.Fields,
- _ codec.Layout,
- send func(resp *RegisterResponse) error,
+ ctx context.Context,
+ fields logtrace.Fields,
+ _ codec.Layout,
+ send func(resp *RegisterResponse) error,
) {
if fields == nil {
fields = logtrace.Fields{}
}
- // Build payload strictly from internal collector (no P2P snapshots)
- payload := cm.BuildStoreEventPayloadFromCollector(task.ID())
-
- b, _ := json.MarshalIndent(payload, "", " ")
- msg := string(b)
- fields["metrics_json"] = msg
- logtrace.Info(ctx, "artefacts have been stored", fields)
+ // Emit a minimal event message (metrics system removed)
+ msg := "Artefacts stored"
+ logtrace.Debug(ctx, "artefacts have been stored", fields)
task.streamEvent(SupernodeEventTypeArtefactsStored, msg, "", send)
- // No central state to clear; adaptor returns calls inline
}
// extractSignatureAndFirstPart extracts the signature and first part from the encoded data
@@ -279,7 +293,7 @@ func (task *CascadeRegistrationTask) verifyActionFee(ctx context.Context, action
requiredFee := sdk.NewCoin("ulume", math.NewInt(amount))
// Log the calculated fee
- logtrace.Info(ctx, "calculated required fee", logtrace.Fields{
+ logtrace.Debug(ctx, "calculated required fee", logtrace.Fields{
"fee": requiredFee.String(),
"dataBytes": dataSize,
})
@@ -377,6 +391,6 @@ func (task *CascadeRegistrationTask) VerifyDownloadSignature(ctx context.Context
return task.wrapErr(ctx, "failed to verify download signature", err, fields)
}
- logtrace.Info(ctx, "download signature successfully verified", fields)
+ logtrace.Debug(ctx, "download signature successfully verified", fields)
return nil
}
diff --git a/supernode/services/cascade/mocks/cascade_interfaces_mock.go b/supernode/services/cascade/mocks/cascade_interfaces_mock.go
index 497497c3..44d3189c 100644
--- a/supernode/services/cascade/mocks/cascade_interfaces_mock.go
+++ b/supernode/services/cascade/mocks/cascade_interfaces_mock.go
@@ -1,10 +1,5 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: interfaces.go
-//
-// Generated by this command:
-//
-// mockgen -destination=mocks/cascade_interfaces_mock.go -package=cascademocks -source=interfaces.go
-//
// Package cascademocks is a generated GoMock package.
package cascademocks
@@ -14,14 +9,13 @@ import (
reflect "reflect"
cascade "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade"
- gomock "go.uber.org/mock/gomock"
+ gomock "github.com/golang/mock/gomock"
)
// MockCascadeServiceFactory is a mock of CascadeServiceFactory interface.
type MockCascadeServiceFactory struct {
ctrl *gomock.Controller
recorder *MockCascadeServiceFactoryMockRecorder
- isgomock struct{}
}
// MockCascadeServiceFactoryMockRecorder is the mock recorder for MockCascadeServiceFactory.
@@ -59,7 +53,6 @@ func (mr *MockCascadeServiceFactoryMockRecorder) NewCascadeRegistrationTask() *g
type MockCascadeTask struct {
ctrl *gomock.Controller
recorder *MockCascadeTaskMockRecorder
- isgomock struct{}
}
// MockCascadeTaskMockRecorder is the mock recorder for MockCascadeTask.
@@ -88,7 +81,7 @@ func (m *MockCascadeTask) CleanupDownload(ctx context.Context, actionID string)
}
// CleanupDownload indicates an expected call of CleanupDownload.
-func (mr *MockCascadeTaskMockRecorder) CleanupDownload(ctx, actionID any) *gomock.Call {
+func (mr *MockCascadeTaskMockRecorder) CleanupDownload(ctx, actionID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupDownload", reflect.TypeOf((*MockCascadeTask)(nil).CleanupDownload), ctx, actionID)
}
@@ -102,7 +95,7 @@ func (m *MockCascadeTask) Download(ctx context.Context, req *cascade.DownloadReq
}
// Download indicates an expected call of Download.
-func (mr *MockCascadeTaskMockRecorder) Download(ctx, req, send any) *gomock.Call {
+func (mr *MockCascadeTaskMockRecorder) Download(ctx, req, send interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockCascadeTask)(nil).Download), ctx, req, send)
}
@@ -116,7 +109,7 @@ func (m *MockCascadeTask) Register(ctx context.Context, req *cascade.RegisterReq
}
// Register indicates an expected call of Register.
-func (mr *MockCascadeTaskMockRecorder) Register(ctx, req, send any) *gomock.Call {
+func (mr *MockCascadeTaskMockRecorder) Register(ctx, req, send interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Register", reflect.TypeOf((*MockCascadeTask)(nil).Register), ctx, req, send)
}
diff --git a/supernode/services/cascade/register.go b/supernode/services/cascade/register.go
index dd6e1e77..1e8659f3 100644
--- a/supernode/services/cascade/register.go
+++ b/supernode/services/cascade/register.go
@@ -44,9 +44,14 @@ func (task *CascadeRegistrationTask) Register(
req *RegisterRequest,
send func(resp *RegisterResponse) error,
) (err error) {
+ // Seed correlation ID and origin so logs across layers can be joined and filtered
+ if req != nil && req.ActionID != "" {
+ ctx = logtrace.CtxWithCorrelationID(ctx, req.ActionID)
+ ctx = logtrace.CtxWithOrigin(ctx, "first_pass")
+ }
fields := logtrace.Fields{logtrace.FieldMethod: "Register", logtrace.FieldRequest: req}
- logtrace.Info(ctx, "Cascade registration request received", fields)
+ logtrace.Info(ctx, "register: request", fields)
// Ensure task status and resources are finalized regardless of outcome
defer func() {
@@ -64,7 +69,7 @@ func (task *CascadeRegistrationTask) Register(
if remErr := os.RemoveAll(req.FilePath); remErr != nil {
logtrace.Warn(ctx, "Failed to remove uploaded file", fields)
} else {
- logtrace.Info(ctx, "Uploaded file cleaned up", fields)
+ logtrace.Debug(ctx, "Uploaded file cleaned up", fields)
}
}
}()
@@ -78,14 +83,14 @@ func (task *CascadeRegistrationTask) Register(
fields[logtrace.FieldCreator] = action.Creator
fields[logtrace.FieldStatus] = action.State
fields[logtrace.FieldPrice] = action.Price
- logtrace.Info(ctx, "Action retrieved", fields)
+ logtrace.Info(ctx, "register: action fetched", fields)
task.streamEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", send)
/* 2. Verify action fee -------------------------------------------------------- */
if err := task.verifyActionFee(ctx, action, req.DataSize, fields); err != nil {
return err
}
- logtrace.Info(ctx, "Action fee verified", fields)
+ logtrace.Info(ctx, "register: fee verified", fields)
task.streamEvent(SupernodeEventTypeActionFeeVerified, "Action fee verified", "", send)
/* 3. Ensure this super-node is eligible -------------------------------------- */
@@ -93,7 +98,7 @@ func (task *CascadeRegistrationTask) Register(
if err := task.ensureIsTopSupernode(ctx, uint64(action.BlockHeight), fields); err != nil {
return err
}
- logtrace.Info(ctx, "Top supernode eligibility confirmed", fields)
+ logtrace.Info(ctx, "register: top supernode confirmed", fields)
task.streamEvent(SupernodeEventTypeTopSupernodeCheckPassed, "Top supernode eligibility confirmed", "", send)
/* 4. Decode cascade metadata -------------------------------------------------- */
@@ -101,14 +106,14 @@ func (task *CascadeRegistrationTask) Register(
if err != nil {
return err
}
- logtrace.Info(ctx, "Cascade metadata decoded", fields)
+ logtrace.Info(ctx, "register: metadata decoded", fields)
task.streamEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", send)
/* 5. Verify data hash --------------------------------------------------------- */
if err := task.verifyDataHash(ctx, req.DataHash, cascadeMeta.DataHash, fields); err != nil {
return err
}
- logtrace.Info(ctx, "Data hash verified", fields)
+ logtrace.Info(ctx, "register: data hash matched", fields)
task.streamEvent(SupernodeEventTypeDataHashVerified, "Data hash verified", "", send)
/* 6. Encode the raw data ------------------------------------------------------ */
@@ -116,7 +121,9 @@ func (task *CascadeRegistrationTask) Register(
if err != nil {
return err
}
- logtrace.Info(ctx, "Input encoded", fields)
+ // Promote to Info and include symbols directory for quick visibility
+ fields["symbols_dir"] = encResp.SymbolsDir
+ logtrace.Info(ctx, "register: input encoded", fields)
task.streamEvent(SupernodeEventTypeInputEncoded, "Input encoded", "", send)
/* 7. Signature verification + layout decode ---------------------------------- */
@@ -126,7 +133,7 @@ func (task *CascadeRegistrationTask) Register(
if err != nil {
return err
}
- logtrace.Info(ctx, "Signature verified", fields)
+ logtrace.Info(ctx, "register: signature verified", fields)
task.streamEvent(SupernodeEventTypeSignatureVerified, "Signature verified", "", send)
/* 8. Generate RQ-ID files ----------------------------------------------------- */
@@ -134,46 +141,48 @@ func (task *CascadeRegistrationTask) Register(
if err != nil {
return err
}
- logtrace.Info(ctx, "RQID files generated", fields)
+ // Include count of ID files generated for visibility
+ fields["id_files_count"] = len(rqidResp.RedundantMetadataFiles)
+ logtrace.Info(ctx, "register: rqid files generated", fields)
task.streamEvent(SupernodeEventTypeRQIDsGenerated, "RQID files generated", "", send)
/* 9. Consistency checks ------------------------------------------------------- */
if err := verifyIDs(layout, encResp.Metadata); err != nil {
return task.wrapErr(ctx, "failed to verify IDs", err, fields)
}
- logtrace.Info(ctx, "RQIDs verified", fields)
+ logtrace.Info(ctx, "register: rqids validated", fields)
task.streamEvent(SupernodeEventTypeRqIDsVerified, "RQIDs verified", "", send)
/* 10. Simulate finalize to avoid storing artefacts if it would fail ---------- */
if _, err := task.LumeraClient.SimulateFinalizeAction(ctx, action.ActionID, rqidResp.RQIDs); err != nil {
fields[logtrace.FieldError] = err.Error()
- logtrace.Info(ctx, "Finalize simulation failed", fields)
+ logtrace.Info(ctx, "register: finalize simulation failed", fields)
// Emit explicit simulation failure event for client visibility
task.streamEvent(SupernodeEventTypeFinalizeSimulationFailed, "Finalize simulation failed", "", send)
return task.wrapErr(ctx, "finalize action simulation failed", err, fields)
}
- logtrace.Info(ctx, "Finalize simulation passed", fields)
+ logtrace.Info(ctx, "register: finalize simulation passed", fields)
// Transmit as a standard event so SDK can propagate it (dedicated type)
task.streamEvent(SupernodeEventTypeFinalizeSimulated, "Finalize simulation passed", "", send)
/* 11. Persist artefacts -------------------------------------------------------- */
- // Persist artefacts to the P2P network. P2P interfaces return error only;
- // metrics are summarized at the cascade layer and emitted via event.
+ // Persist artefacts to the P2P network. P2P interfaces return error only;
+ // metrics are summarized at the cascade layer and emitted via event.
if err := task.storeArtefacts(ctx, action.ActionID, rqidResp.RedundantMetadataFiles, encResp.SymbolsDir, fields); err != nil {
return err
}
- // Emit compact analytics payload from centralized metrics collector
- task.emitArtefactsStored(ctx, fields, encResp.Metadata, send)
+ // Emit artefacts stored event (metrics payload removed; logs preserved)
+ task.emitArtefactsStored(ctx, fields, encResp.Metadata, send)
resp, err := task.LumeraClient.FinalizeAction(ctx, action.ActionID, rqidResp.RQIDs)
if err != nil {
fields[logtrace.FieldError] = err.Error()
- logtrace.Info(ctx, "Finalize action error", fields)
+ logtrace.Info(ctx, "register: finalize action error", fields)
return task.wrapErr(ctx, "failed to finalize action", err, fields)
}
txHash := resp.TxResponse.TxHash
fields[logtrace.FieldTxHash] = txHash
- logtrace.Info(ctx, "Action finalized", fields)
+ logtrace.Info(ctx, "register: action finalized", fields)
task.streamEvent(SupernodeEventTypeActionFinalized, "Action finalized", txHash, send)
return nil
diff --git a/supernode/services/cascade/register_test.go b/supernode/services/cascade/register_test.go
index c73b96b7..6f56791a 100644
--- a/supernode/services/cascade/register_test.go
+++ b/supernode/services/cascade/register_test.go
@@ -21,8 +21,8 @@ import (
"github.com/cosmos/gogoproto/proto"
"lukechampine.com/blake3"
+ "github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
- "go.uber.org/mock/gomock"
)
func TestCascadeRegistrationTask_Register(t *testing.T) {
@@ -104,10 +104,10 @@ func TestCascadeRegistrationTask_Register(t *testing.T) {
Metadata: codecpkg.Layout{Blocks: []codecpkg.Block{{BlockID: 1, Hash: "abc"}}},
}, nil)
- // 8. Store artefacts (no metrics returned; recorded centrally)
- p2p.EXPECT().
- StoreArtefacts(gomock.Any(), gomock.Any(), gomock.Any()).
- Return(nil)
+ // 8. Store artefacts (no metrics returned; recorded centrally)
+ p2p.EXPECT().
+ StoreArtefacts(gomock.Any(), gomock.Any(), gomock.Any()).
+ Return(nil)
},
expectedError: "",
expectedEvents: 12,
diff --git a/supernode/services/cascade/service.go b/supernode/services/cascade/service.go
index a1d9898b..f88c284b 100644
--- a/supernode/services/cascade/service.go
+++ b/supernode/services/cascade/service.go
@@ -56,11 +56,11 @@ func (service *CascadeService) GetRunningTasks() []string {
// NewCascadeService returns a new CascadeService instance
func NewCascadeService(config *Config, lumera lumera.Client, p2pClient p2p.Client, codec codec.Codec, rqstore rqstore.Store) *CascadeService {
- return &CascadeService{
- config: config,
- SuperNodeService: base.NewSuperNodeService(p2pClient),
- LumeraClient: adaptors.NewLumeraClient(lumera),
- P2P: adaptors.NewP2PService(p2pClient, rqstore),
- RQ: adaptors.NewCodecService(codec),
- }
+ return &CascadeService{
+ config: config,
+ SuperNodeService: base.NewSuperNodeService(p2pClient),
+ LumeraClient: adaptors.NewLumeraClient(lumera),
+ P2P: adaptors.NewP2PService(p2pClient, rqstore),
+ RQ: adaptors.NewCodecService(codec),
+ }
}
diff --git a/supernode/services/cascade/service_test.go b/supernode/services/cascade/service_test.go
index eaa7bf7f..bc2998ad 100644
--- a/supernode/services/cascade/service_test.go
+++ b/supernode/services/cascade/service_test.go
@@ -8,8 +8,8 @@ import (
"github.com/LumeraProtocol/supernode/v2/supernode/services/cascade"
cascadeadaptormocks "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors/mocks"
"github.com/LumeraProtocol/supernode/v2/supernode/services/common"
+ "github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
- "go.uber.org/mock/gomock"
)
func TestNewCascadeService(t *testing.T) {
diff --git a/supernode/services/common/base/supernode_service.go b/supernode/services/common/base/supernode_service.go
index 1d41715b..424556b0 100644
--- a/supernode/services/common/base/supernode_service.go
+++ b/supernode/services/common/base/supernode_service.go
@@ -52,7 +52,7 @@ func (service *SuperNodeService) RunHelper(ctx context.Context, nodeID string, p
service.Worker = task.NewWorker()
logtrace.Error(ctx, "Service run failed, retrying", logtrace.Fields{logtrace.FieldModule: "supernode", logtrace.FieldError: err.Error()})
} else {
- logtrace.Info(ctx, "Service run completed successfully - closing sn services", logtrace.Fields{logtrace.FieldModule: "supernode"})
+ logtrace.Debug(ctx, "Service run completed successfully - closing sn services", logtrace.Fields{logtrace.FieldModule: "supernode"})
return nil
}
}
diff --git a/supernode/services/common/base/supernode_task.go b/supernode/services/common/base/supernode_task.go
index 937e6013..2908558d 100644
--- a/supernode/services/common/base/supernode_task.go
+++ b/supernode/services/common/base/supernode_task.go
@@ -25,7 +25,7 @@ type SuperNodeTask struct {
func (task *SuperNodeTask) RunHelper(ctx context.Context, clean TaskCleanerFunc) error {
ctx = task.context(ctx)
logtrace.Debug(ctx, "Start task", logtrace.Fields{})
- defer logtrace.Info(ctx, "Task canceled", logtrace.Fields{})
+ defer logtrace.Debug(ctx, "Task canceled", logtrace.Fields{})
defer task.Cancel()
task.SetStatusNotifyFunc(func(status *state.Status) {
diff --git a/supernode/services/common/storage/handler.go b/supernode/services/common/storage/handler.go
index 210dab0f..9e570d03 100644
--- a/supernode/services/common/storage/handler.go
+++ b/supernode/services/common/storage/handler.go
@@ -14,7 +14,6 @@ import (
"github.com/LumeraProtocol/supernode/v2/p2p"
"github.com/LumeraProtocol/supernode/v2/pkg/errors"
"github.com/LumeraProtocol/supernode/v2/pkg/logtrace"
- "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics"
"github.com/LumeraProtocol/supernode/v2/pkg/storage/files"
"github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore"
"github.com/LumeraProtocol/supernode/v2/pkg/utils"
@@ -74,9 +73,7 @@ func (h *StorageHandler) StoreBatch(ctx context.Context, list [][]byte, typ int)
taskID = fmt.Sprintf("%v", val)
}
- logtrace.Info(ctx, "task_id in storeList", logtrace.Fields{logtrace.FieldTaskID: taskID})
- // Add taskID to context for metrics
- ctx = p2pmetrics.WithTaskID(ctx, taskID)
+ logtrace.Debug(ctx, "task_id in storeList", logtrace.Fields{logtrace.FieldTaskID: taskID})
return h.P2PClient.StoreBatch(ctx, list, typ, taskID)
}
@@ -110,7 +107,7 @@ func (h *StorageHandler) StoreRaptorQSymbolsIntoP2P(ctx context.Context, taskID,
sort.Strings(keys) // deterministic order inside the sample
}
- logtrace.Info(ctx, "storing RaptorQ symbols", logtrace.Fields{"count": len(keys)})
+ logtrace.Debug(ctx, "storing RaptorQ symbols", logtrace.Fields{"count": len(keys)})
/* stream in fixed-size batches -------------------------------------- */
for start := 0; start < len(keys); {
@@ -128,7 +125,7 @@ func (h *StorageHandler) StoreRaptorQSymbolsIntoP2P(ctx context.Context, taskID,
return fmt.Errorf("update first-batch flag: %w", err)
}
- logtrace.Info(ctx, "finished storing RaptorQ symbols", logtrace.Fields{"curr-time": time.Now().UTC(), "count": len(keys)})
+ logtrace.Debug(ctx, "finished storing RaptorQ symbols", logtrace.Fields{"curr-time": time.Now().UTC(), "count": len(keys)})
return nil
}
@@ -160,26 +157,24 @@ func walkSymbolTree(root string) ([]string, error) {
}
func (h *StorageHandler) storeSymbolsInP2P(ctx context.Context, taskID, root string, fileKeys []string) error {
- logtrace.Info(ctx, "loading batch symbols", logtrace.Fields{"count": len(fileKeys)})
+ logtrace.Debug(ctx, "loading batch symbols", logtrace.Fields{"count": len(fileKeys)})
symbols, err := utils.LoadSymbols(root, fileKeys)
if err != nil {
return fmt.Errorf("load symbols: %w", err)
}
- // Add taskID to context for metrics
- ctx = p2pmetrics.WithTaskID(ctx, taskID)
if err := h.P2PClient.StoreBatch(ctx, symbols, P2PDataRaptorQSymbol, taskID); err != nil {
return fmt.Errorf("p2p store batch: %w", err)
}
- logtrace.Info(ctx, "stored batch symbols", logtrace.Fields{"count": len(symbols)})
+ logtrace.Debug(ctx, "stored batch symbols", logtrace.Fields{"count": len(symbols)})
if err := utils.DeleteSymbols(ctx, root, fileKeys); err != nil {
return fmt.Errorf("delete symbols: %w", err)
}
- logtrace.Info(ctx, "deleted batch symbols", logtrace.Fields{"count": len(symbols)})
+ logtrace.Debug(ctx, "deleted batch symbols", logtrace.Fields{"count": len(symbols)})
return nil
}
diff --git a/supernode/services/common/supernode/service.go b/supernode/services/common/supernode/service.go
index 13d5efe4..1d0b9dd0 100644
--- a/supernode/services/common/supernode/service.go
+++ b/supernode/services/common/supernode/service.go
@@ -54,7 +54,7 @@ func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetric
logtrace.FieldMethod: "GetStatus",
logtrace.FieldModule: "SupernodeStatusService",
}
- logtrace.Info(ctx, "status request received", fields)
+ logtrace.Debug(ctx, "status request received", fields)
var resp StatusResponse
resp.Version = Version
@@ -218,104 +218,7 @@ func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetric
}
}
- // Recent batch store/retrieve (overall lists)
- if rbs, ok := dhtStats["recent_batch_store_overall"].([]kademlia.RecentBatchStoreEntry); ok {
- for _, e := range rbs {
- metrics.RecentBatchStore = append(metrics.RecentBatchStore, RecentBatchStoreEntry{
- TimeUnix: e.TimeUnix,
- SenderID: e.SenderID,
- SenderIP: e.SenderIP,
- Keys: e.Keys,
- DurationMS: e.DurationMS,
- OK: e.OK,
- Error: e.Error,
- })
- }
- } else if anyList, ok := dhtStats["recent_batch_store_overall"].([]interface{}); ok {
- for _, vi := range anyList {
- if e, ok := vi.(kademlia.RecentBatchStoreEntry); ok {
- metrics.RecentBatchStore = append(metrics.RecentBatchStore, RecentBatchStoreEntry{
- TimeUnix: e.TimeUnix,
- SenderID: e.SenderID,
- SenderIP: e.SenderIP,
- Keys: e.Keys,
- DurationMS: e.DurationMS,
- OK: e.OK,
- Error: e.Error,
- })
- }
- }
- }
- if rbr, ok := dhtStats["recent_batch_retrieve_overall"].([]kademlia.RecentBatchRetrieveEntry); ok {
- for _, e := range rbr {
- metrics.RecentBatchRetrieve = append(metrics.RecentBatchRetrieve, RecentBatchRetrieveEntry{
- TimeUnix: e.TimeUnix,
- SenderID: e.SenderID,
- SenderIP: e.SenderIP,
- Requested: e.Requested,
- Found: e.Found,
- DurationMS: e.DurationMS,
- Error: e.Error,
- })
- }
- } else if anyList, ok := dhtStats["recent_batch_retrieve_overall"].([]interface{}); ok {
- for _, vi := range anyList {
- if e, ok := vi.(kademlia.RecentBatchRetrieveEntry); ok {
- metrics.RecentBatchRetrieve = append(metrics.RecentBatchRetrieve, RecentBatchRetrieveEntry{
- TimeUnix: e.TimeUnix,
- SenderID: e.SenderID,
- SenderIP: e.SenderIP,
- Requested: e.Requested,
- Found: e.Found,
- DurationMS: e.DurationMS,
- Error: e.Error,
- })
- }
- }
- }
-
- // Per-IP buckets
- if byip, ok := dhtStats["recent_batch_store_by_ip"].(map[string][]kademlia.RecentBatchStoreEntry); ok {
- for ip, list := range byip {
- bucket := make([]RecentBatchStoreEntry, 0, len(list))
- for _, e := range list {
- bucket = append(bucket, RecentBatchStoreEntry{
- TimeUnix: e.TimeUnix,
- SenderID: e.SenderID,
- SenderIP: e.SenderIP,
- Keys: e.Keys,
- DurationMS: e.DurationMS,
- OK: e.OK,
- Error: e.Error,
- })
- }
- // initialize map if needed
- if metrics.RecentBatchStoreByIP == nil {
- metrics.RecentBatchStoreByIP = map[string][]RecentBatchStoreEntry{}
- }
- metrics.RecentBatchStoreByIP[ip] = bucket
- }
- }
- if byip, ok := dhtStats["recent_batch_retrieve_by_ip"].(map[string][]kademlia.RecentBatchRetrieveEntry); ok {
- for ip, list := range byip {
- bucket := make([]RecentBatchRetrieveEntry, 0, len(list))
- for _, e := range list {
- bucket = append(bucket, RecentBatchRetrieveEntry{
- TimeUnix: e.TimeUnix,
- SenderID: e.SenderID,
- SenderIP: e.SenderIP,
- Requested: e.Requested,
- Found: e.Found,
- DurationMS: e.DurationMS,
- Error: e.Error,
- })
- }
- if metrics.RecentBatchRetrieveByIP == nil {
- metrics.RecentBatchRetrieveByIP = map[string][]RecentBatchRetrieveEntry{}
- }
- metrics.RecentBatchRetrieveByIP[ip] = bucket
- }
- }
+ // Detailed recent per-request lists removed from API mapping
}
// DHT rolling metrics snapshot is attached at top-level under dht_metrics
diff --git a/supernode/services/common/supernode/types.go b/supernode/services/common/supernode/types.go
index 9a6f0953..e84b954a 100644
--- a/supernode/services/common/supernode/types.go
+++ b/supernode/services/common/supernode/types.go
@@ -60,16 +60,12 @@ type NetworkInfo struct {
// P2PMetrics mirrors the proto P2P metrics for status API
type P2PMetrics struct {
- DhtMetrics DhtMetrics
- NetworkHandleMetrics map[string]HandleCounters
- ConnPoolMetrics map[string]int64
- BanList []BanEntry
- Database DatabaseStats
- Disk DiskStatus
- RecentBatchStore []RecentBatchStoreEntry
- RecentBatchRetrieve []RecentBatchRetrieveEntry
- RecentBatchStoreByIP map[string][]RecentBatchStoreEntry
- RecentBatchRetrieveByIP map[string][]RecentBatchRetrieveEntry
+ DhtMetrics DhtMetrics
+ NetworkHandleMetrics map[string]HandleCounters
+ ConnPoolMetrics map[string]int64
+ BanList []BanEntry
+ Database DatabaseStats
+ Disk DiskStatus
}
type StoreSuccessPoint struct {
@@ -122,25 +118,7 @@ type DiskStatus struct {
FreeMB float64
}
-type RecentBatchStoreEntry struct {
- TimeUnix int64
- SenderID string
- SenderIP string
- Keys int
- DurationMS int64
- OK bool
- Error string
-}
-
-type RecentBatchRetrieveEntry struct {
- TimeUnix int64
- SenderID string
- SenderIP string
- Requested int
- Found int
- DurationMS int64
- Error string
-}
+// Removed: recent per-request lists from public API
// TaskProvider interface defines the contract for services to provide
// their running task information to the status service
diff --git a/supernode/services/verifier/verifier.go b/supernode/services/verifier/verifier.go
index 867bd966..68a2ae77 100644
--- a/supernode/services/verifier/verifier.go
+++ b/supernode/services/verifier/verifier.go
@@ -75,7 +75,7 @@ func (cv *ConfigVerifier) VerifyConfig(ctx context.Context) (*VerificationResult
// Check 5: Verify all required ports are available
cv.checkPortsAvailable(result)
- logtrace.Info(ctx, "Config verification completed", logtrace.Fields{
+ logtrace.Debug(ctx, "Config verification completed", logtrace.Fields{
"valid": result.IsValid(),
"errors": len(result.Errors),
"warnings": len(result.Warnings),
diff --git a/tests/integration/p2p/p2p_integration_test.go b/tests/integration/p2p/p2p_integration_test.go
index bce71f58..478711d2 100644
--- a/tests/integration/p2p/p2p_integration_test.go
+++ b/tests/integration/p2p/p2p_integration_test.go
@@ -108,7 +108,7 @@ func TestP2PBasicIntegration(t *testing.T) {
// Add debug logging
log.Printf("Storing batch with keys: %v", expectedKeys)
- err := services[0].StoreBatch(ctx, batchData, 0, taskID)
+ err := services[0].StoreBatch(ctx, batchData, 0, taskID)
require.NoError(t, err)
// Add immediate verification
@@ -203,7 +203,8 @@ func SetupTestP2PNodes(t *testing.T, ctx context.Context) ([]p2p.Client, []*rqst
require.NoError(t, err, "failed to create rqstore for node %d: %v", i, err)
rqStores = append(rqStores, rqStore)
- service, err := p2p.New(ctx, p2pConfig, mockClient, kr, rqStore, nil, nil)
+ // Disable metrics in integration tests by default
+ service, err := p2p.New(ctx, p2pConfig, mockClient, kr, rqStore, nil, nil)
require.NoError(t, err, "failed to create p2p service for node %d: %v", i, err)
// Start P2P service
diff --git a/tests/system/go.mod b/tests/system/go.mod
index 15b8212d..e6eb3bba 100644
--- a/tests/system/go.mod
+++ b/tests/system/go.mod
@@ -95,6 +95,7 @@ require (
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/glog v1.2.4 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/golang/mock v1.6.0 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/flatbuffers v1.12.1 // indirect
diff --git a/tests/system/go.sum b/tests/system/go.sum
index 9ff0158a..6e9c0112 100644
--- a/tests/system/go.sum
+++ b/tests/system/go.sum
@@ -803,6 +803,7 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM=
github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U=
@@ -888,6 +889,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -933,6 +935,7 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1025,6 +1028,7 @@ golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU=
golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=