diff --git a/.github/actions/setup-env/action.yml b/.github/actions/setup-env/action.yml index 1b44339d..8ed19f6c 100644 --- a/.github/actions/setup-env/action.yml +++ b/.github/actions/setup-env/action.yml @@ -1,7 +1,11 @@ name: Setup Environment description: Sets up Go (dynamically from go.mod) and installs system dependencies -inputs: {} +inputs: + bust_lumera_retag: + description: "One-time: remove lumera v1.8.0 sums after retag" + required: false + default: 'false' outputs: go-version: description: "Go version parsed from go.mod" @@ -29,6 +33,18 @@ runs: sudo apt-get update sudo apt-get install -y libwebp-dev make + - name: One-time reset retagged lumera checksums + if: ${{ inputs.bust_lumera_retag == 'true' }} + shell: bash + run: | + echo "Busting go.sum entries for github.com/LumeraProtocol/lumera v1.8.0 (one-time)" + # Remove stale checksums in all local modules + find . -name 'go.sum' -maxdepth 3 -print0 | xargs -0 -I{} sed -i \ + '/github.com\/LumeraProtocol\/lumera v1.8.0/d' {} + # Clear module/build caches to avoid cached zips + go clean -modcache || true + rm -rf "$(go env GOCACHE)" || true + - name: Set Go Private Modules shell: bash run: | diff --git a/.github/workflows/build&release.yml b/.github/workflows/build&release.yml index ead3e013..0416c958 100644 --- a/.github/workflows/build&release.yml +++ b/.github/workflows/build&release.yml @@ -27,14 +27,20 @@ jobs: - name: Setup Go and dependencies uses: ./.github/actions/setup-env + with: + bust_lumera_retag: 'true' - name: Build binaries run: | + # Ensure module metadata is up to date + go mod tidy # Build supernode CGO_ENABLED=1 go build -trimpath -o /tmp/supernode ./supernode # Build sn-manager cd sn-manager + # Ensure sn-manager module metadata is up to date + go mod tidy CGO_ENABLED=0 go build -trimpath -o /tmp/sn-manager . echo "✅ Build successful" @@ -68,6 +74,8 @@ jobs: - name: Setup Go and dependencies uses: ./.github/actions/setup-env + with: + bust_lumera_retag: 'true' - name: Prepare Release Variables id: vars @@ -82,7 +90,12 @@ jobs: echo "binary_name=supernode-linux-amd64" >> $GITHUB_OUTPUT - name: Build Release Version + env: + DD_API_KEY: ${{ secrets.DD_API_KEY }} + DD_SITE: ${{ secrets.DD_SITE }} run: | + # Ensure module metadata is up to date + go mod tidy mkdir -p release # Build supernode @@ -94,12 +107,17 @@ jobs: -ldflags="-s -w \ -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.Version=${{ steps.vars.outputs.version }} \ -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.GitCommit=${{ steps.vars.outputs.git_commit }} \ - -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.BuildTime=${{ steps.vars.outputs.build_time }}" \ + -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.BuildTime=${{ steps.vars.outputs.build_time }} \ + -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.MinVer=${{ vars.MIN_VER }} \ + -X github.com/LumeraProtocol/supernode/v2/pkg/logtrace.DDAPIKey=${DD_API_KEY} \ + -X github.com/LumeraProtocol/supernode/v2/pkg/logtrace.DDSite=${DD_SITE}" \ -o release/supernode \ ./supernode # Build sn-manager cd sn-manager + # Ensure sn-manager module metadata is up to date + go mod tidy CGO_ENABLED=0 \ GOOS=linux \ GOARCH=amd64 \ diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index bad0f6ee..c8b03728 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -16,7 +16,11 @@ jobs: - name: Checkout code uses: actions/checkout@v4 - name: Setup Go and system deps - uses: ./.github/actions/setup-env + uses: ./.github/actions/setup-env + with: + bust_lumera_retag: 'true' + - name: Go mod tidy + run: go mod tidy - name: Run unit tests run: go test $(go list ./... | grep -v '/tests') -v @@ -30,7 +34,12 @@ jobs: uses: actions/checkout@v4 - name: Setup Go and system deps - uses: ./.github/actions/setup-env + uses: ./.github/actions/setup-env + with: + bust_lumera_retag: 'true' + + - name: Go mod tidy + run: go mod tidy - name: Run integration tests run: go test -v ./tests/integration/... @@ -44,7 +53,12 @@ jobs: uses: actions/checkout@v4 - name: Setup Go and system deps - uses: ./.github/actions/setup-env + uses: ./.github/actions/setup-env + with: + bust_lumera_retag: 'true' + + - name: Go mod tidy + run: go mod tidy - name: Install Lumera @@ -75,4 +89,4 @@ jobs: # run: make setup-supernodes # - name: Run sn-manager e2e tests - # run: make test-sn-manager \ No newline at end of file + # run: make test-sn-manager diff --git a/.gitignore b/.gitignore index 685adc58..5fe0570a 100644 --- a/.gitignore +++ b/.gitignore @@ -8,7 +8,7 @@ *.so *.dylib *.idea/ - +*.zip # Test binary, built with `go test -c` *.test @@ -21,11 +21,12 @@ CLAUDE.md # Go workspace file go.work go.work.sum -*tests/system/testnet +tests/system/testnet +tests/system/**/supernode-data* +tests/system/data +tests/system/1 # env file .env /data /release -/tests/system/data -tests/system/**/supernode-data* AGENTS.md \ No newline at end of file diff --git a/Makefile b/Makefile index 01272fbf..2a171245 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,8 @@ -.PHONY: build build-release build-sncli build-sn-manager +.PHONY: build build-sncli build-sn-manager .PHONY: install-lumera setup-supernodes system-test-setup install-deps .PHONY: gen-cascade gen-supernode .PHONY: test-e2e test-unit test-integration test-system +.PHONY: release # Build variables VERSION ?= $(shell git describe --tags --always --dirty 2>/dev/null || echo "dev") @@ -9,9 +10,13 @@ GIT_COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo "unknown") BUILD_TIME ?= $(shell date -u '+%Y-%m-%d_%H:%M:%S') # Linker flags for version information +# Optional minimum peer version for DHT gating can be provided via MIN_VER env/make var LDFLAGS = -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.Version=$(VERSION) \ -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.GitCommit=$(GIT_COMMIT) \ - -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.BuildTime=$(BUILD_TIME) + -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.BuildTime=$(BUILD_TIME) \ + -X github.com/LumeraProtocol/supernode/v2/supernode/cmd.MinVer=$(MIN_VER) \ + -X github.com/LumeraProtocol/supernode/v2/pkg/logtrace.DDAPIKey=$(DD_API_KEY) \ + -X github.com/LumeraProtocol/supernode/v2/pkg/logtrace.DDSite=$(DD_SITE) # Linker flags for sn-manager SN_MANAGER_LDFLAGS = -X main.Version=$(VERSION) \ @@ -20,11 +25,8 @@ SN_MANAGER_LDFLAGS = -X main.Version=$(VERSION) \ build: @mkdir -p release - CGO_ENABLED=1 \ - GOOS=linux \ - GOARCH=amd64 \ - echo "Building supernode..." - go build \ + @echo "Building supernode..." + CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build \ -trimpath \ -ldflags="-s -w $(LDFLAGS)" \ -o release/supernode-linux-amd64 \ @@ -96,7 +98,7 @@ gen-supernode: --grpc-gateway_out=gen \ --grpc-gateway_opt=paths=source_relative \ --openapiv2_out=gen \ - proto/supernode/supernode.proto + proto/supernode/service.proto proto/supernode/status.proto # Define the paths SUPERNODE_SRC=supernode/main.go @@ -113,14 +115,17 @@ SETUP_SCRIPT=tests/scripts/setup-supernodes.sh # Install Lumera # Optional: specify lumera binary path to skip download LUMERAD_BINARY ?= +# Derive default Lumera version from go.mod (strip pseudo-version suffix if present) +LUMERA_DEFAULT_VERSION := $(shell awk '/github.com\/LumeraProtocol\/lumera[[:space:]]+v/ {print $$2; exit}' go.mod | sed 's/-.*//') # Optional: specify installation mode (latest-release, latest-tag, or vX.Y.Z) -INSTALL_MODE ?=latest-tag +INSTALL_MODE ?= $(if $(LUMERA_DEFAULT_VERSION),$(LUMERA_DEFAULT_VERSION),latest-release) install-lumera: @echo "Installing Lumera..." @chmod +x tests/scripts/install-lumera.sh @sudo LUMERAD_BINARY="$(LUMERAD_BINARY)" tests/scripts/install-lumera.sh $(INSTALL_MODE) @echo "PtTDUHythfRfXHh63yzyiGDid4TZj2P76Zd,18749999981413" > ~/claims.csv + # Setup supernode environments setup-supernodes: @echo "Setting up all supernode environments..." @@ -146,3 +151,30 @@ test-cascade: test-sn-manager: @echo "Running sn-manager e2e tests..." @cd tests/system && go test -tags=system_test -v -run '^TestSNManager' . + + + +# Release command: push branch, tag, and push tag with auto-increment - this is for testing only (including releases) setup a new remote upstream or rename the script +release: + @echo "Getting current branch..." + $(eval CURRENT_BRANCH := $(shell git branch --show-current)) + @echo "Current branch: $(CURRENT_BRANCH)" + + @echo "Getting latest tag..." + $(eval LATEST_TAG := $(shell git tag -l "v*" | sort -V | tail -n1)) + $(eval NEXT_TAG := $(shell \ + if [ -z "$(LATEST_TAG)" ]; then \ + echo "v2.5.0"; \ + else \ + echo "$(LATEST_TAG)" | sed 's/^v//' | awk -F. '{print "v" $$1 "." $$2 "." $$3+1}'; \ + fi)) + @echo "Next tag: $(NEXT_TAG)" + + @echo "Pushing branch to upstream..." + git push upstream $(CURRENT_BRANCH) -f + + @echo "Creating and pushing tag $(NEXT_TAG)..." + git tag $(NEXT_TAG) + git push upstream $(NEXT_TAG) + + @echo "Release complete: $(NEXT_TAG) pushed to upstream" diff --git a/README.md b/README.md index 6e152ba3..4b4da332 100644 --- a/README.md +++ b/README.md @@ -54,19 +54,12 @@ message StatusResponse { string hardware_summary = 4; // Formatted hardware summary (e.g., "8 cores / 32GB RAM") } - message ServiceTasks { - string service_name = 1; - repeated string task_ids = 2; - int32 task_count = 3; - } - message Network { int32 peers_count = 1; // Number of connected peers in P2P network repeated string peer_addresses = 2; // List of connected peer addresses (format: "ID@IP:Port") } Resources resources = 3; - repeated ServiceTasks running_tasks = 4; // Services with currently running tasks repeated string registered_services = 5; // All registered/available services Network network = 6; // P2P network information int32 rank = 7; // Rank in the top supernodes list (0 if not in top list) diff --git a/cmd/sncli/cli/cmd_get_status.go b/cmd/sncli/cli/cmd_get_status.go index 9603089b..303d20cd 100644 --- a/cmd/sncli/cli/cmd_get_status.go +++ b/cmd/sncli/cli/cmd_get_status.go @@ -16,16 +16,9 @@ func (c *CLI) GetSupernodeStatus() error { fmt.Println("Supernode Status:") fmt.Printf(" Version: %s\n", resp.Version) fmt.Printf(" Uptime: %d seconds\n", resp.UptimeSeconds) - fmt.Printf(" CPU Usage: %.2f%% (%d cores)\n", resp.Resources.CPU.UsagePercent, resp.Resources.CPU.Cores) + fmt.Printf(" CPU Usage: %.2f%% (%d cores)\n", resp.Resources.Cpu.UsagePercent, resp.Resources.Cpu.Cores) fmt.Printf(" Memory: %.2fGB used / %.2fGB total (%.2f%%)\n", - resp.Resources.Memory.UsedGB, resp.Resources.Memory.TotalGB, resp.Resources.Memory.UsagePercent) - - if len(resp.RunningTasks) > 0 { - fmt.Println(" Running Tasks:") - for _, service := range resp.RunningTasks { - fmt.Printf(" - %s (Tasks: %d)\n", service.ServiceName, service.TaskCount) - } - } + resp.Resources.Memory.UsedGb, resp.Resources.Memory.TotalGb, resp.Resources.Memory.UsagePercent) if len(resp.RegisteredServices) > 0 { fmt.Println(" Registered Services:") @@ -38,8 +31,8 @@ func (c *CLI) GetSupernodeStatus() error { if resp.Rank > 0 { fmt.Printf(" Rank: %d\n", resp.Rank) } - if resp.IPAddress != "" { - fmt.Printf(" IP Address: %s\n", resp.IPAddress) + if resp.IpAddress != "" { + fmt.Printf(" IP Address: %s\n", resp.IpAddress) } return nil diff --git a/cmd/sncli/go.mod b/cmd/sncli/go.mod index ef7bb7e0..009f0840 100644 --- a/cmd/sncli/go.mod +++ b/cmd/sncli/go.mod @@ -1,108 +1,116 @@ module github.com/LumeraProtocol/supernode/v2/cmd/sncli -go 1.24.1 +go 1.25.1 replace ( github.com/LumeraProtocol/supernode/v2 => ../.. github.com/LumeraProtocol/supernode/v2/supernode => ../../supernode - github.com/bytedance/sonic => github.com/bytedance/sonic v1.14.0 - github.com/bytedance/sonic/loader => github.com/bytedance/sonic/loader v0.3.0 + github.com/cosmos/cosmos-sdk => github.com/cosmos/cosmos-sdk v0.50.14 ) require ( - github.com/BurntSushi/toml v1.4.0 - github.com/LumeraProtocol/lumera v1.7.0 - github.com/LumeraProtocol/supernode/v2 v2.1.0 - github.com/cosmos/cosmos-sdk v0.50.14 - github.com/spf13/cobra v1.9.1 - google.golang.org/grpc v1.71.0 - google.golang.org/protobuf v1.36.6 + github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c + github.com/LumeraProtocol/lumera v1.8.0 + github.com/LumeraProtocol/supernode/v2 v2.3.88 + github.com/cosmos/cosmos-sdk v0.53.0 + github.com/spf13/cobra v1.10.1 + google.golang.org/grpc v1.76.0 + google.golang.org/protobuf v1.36.10 ) require ( - cosmossdk.io/api v0.9.0 // indirect - cosmossdk.io/collections v0.4.0 // indirect + cosmossdk.io/api v0.9.2 // indirect + cosmossdk.io/collections v1.3.0 // indirect cosmossdk.io/core v0.11.3 // indirect - cosmossdk.io/depinject v1.1.0 // indirect + cosmossdk.io/depinject v1.2.0 // indirect cosmossdk.io/errors v1.0.2 // indirect - cosmossdk.io/log v1.5.0 // indirect + cosmossdk.io/log v1.6.0 // indirect cosmossdk.io/math v1.5.3 // indirect - cosmossdk.io/store v1.1.1 // indirect - cosmossdk.io/x/tx v0.13.7 // indirect + cosmossdk.io/schema v1.1.0 // indirect + cosmossdk.io/store v1.1.2 // indirect + cosmossdk.io/x/tx v0.14.0 // indirect + cosmossdk.io/x/upgrade v0.2.0 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.2 // indirect - github.com/DataDog/datadog-go v3.2.0+incompatible // indirect - github.com/DataDog/zstd v1.5.5 // indirect + github.com/DataDog/datadog-go v4.8.3+incompatible // indirect + github.com/DataDog/zstd v1.5.7 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 // indirect + github.com/bgentry/speakeasy v0.2.0 // indirect github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce // indirect - github.com/bytedance/sonic v1.12.3 // indirect + github.com/bytedance/gopkg v0.1.3 // indirect + github.com/bytedance/sonic v1.14.1 // indirect github.com/bytedance/sonic/loader v0.3.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cloudwego/base64x v0.1.5 // indirect - github.com/cockroachdb/errors v1.11.3 // indirect - github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect - github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/pebble v1.1.2 // indirect - github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cloudwego/base64x v0.1.6 // indirect + github.com/cockroachdb/errors v1.12.0 // indirect + github.com/cockroachdb/fifo v0.0.0-20240616162244-4768e80dfb9a // indirect + github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 // indirect + github.com/cockroachdb/pebble v1.1.5 // indirect + github.com/cockroachdb/redact v1.1.6 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect - github.com/cometbft/cometbft v0.38.15 // indirect + github.com/cometbft/cometbft v0.38.18 // indirect github.com/cometbft/cometbft-db v0.14.1 // indirect github.com/cosmos/btcutil v1.0.5 // indirect - github.com/cosmos/cosmos-db v1.1.1 // indirect + github.com/cosmos/cosmos-db v1.1.2 // indirect github.com/cosmos/cosmos-proto v1.0.0-beta.5 // indirect github.com/cosmos/go-bip39 v1.0.0 // indirect github.com/cosmos/gogogateway v1.2.0 // indirect github.com/cosmos/gogoproto v1.7.0 // indirect - github.com/cosmos/iavl v1.2.2 // indirect + github.com/cosmos/iavl v1.2.4 // indirect + github.com/cosmos/ibc-go/v10 v10.3.0 // indirect github.com/cosmos/ics23/go v0.11.0 // indirect github.com/cosmos/ledger-cosmos-go v0.14.0 // indirect - github.com/danieljoos/wincred v1.2.1 // indirect + github.com/danieljoos/wincred v1.2.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect - github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect + github.com/desertbit/timer v1.0.1 // indirect github.com/dgraph-io/badger/v4 v4.2.0 // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect + github.com/dgraph-io/ristretto/v2 v2.2.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/dvsekhvalnov/jose2go v1.6.0 // indirect + github.com/dvsekhvalnov/jose2go v1.7.0 // indirect github.com/emicklei/dot v1.6.2 // indirect - github.com/fatih/color v1.16.0 // indirect + github.com/fatih/color v1.18.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/getsentry/sentry-go v0.32.0 // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-kit/kit v0.13.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.2.4 // indirect + github.com/golang/glog v1.2.5 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/flatbuffers v1.12.1 // indirect + github.com/google/flatbuffers v24.3.25+incompatible // indirect github.com/google/go-cmp v0.7.0 // indirect + github.com/google/orderedcode v0.0.1 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/handlers v1.5.2 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect - github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect - github.com/hashicorp/go-metrics v0.5.3 // indirect - github.com/hashicorp/go-plugin v1.6.0 // indirect + github.com/hashicorp/go-metrics v0.5.4 // indirect + github.com/hashicorp/go-plugin v1.6.3 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect - github.com/hashicorp/yamux v0.1.1 // indirect - github.com/hdevalence/ed25519consensus v0.1.0 // indirect - github.com/huandu/skiplist v1.2.0 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hashicorp/yamux v0.1.2 // indirect + github.com/hdevalence/ed25519consensus v0.2.0 // indirect + github.com/huandu/skiplist v1.2.1 // indirect github.com/iancoleman/strcase v0.3.0 // indirect github.com/improbable-eng/grpc-web v0.15.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -110,16 +118,15 @@ require ( github.com/jmoiron/sqlx v1.4.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.18.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.4 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect + github.com/lib/pq v1.10.9 // indirect github.com/linxGnu/grocksdb v1.9.8 // indirect - github.com/magiconair/properties v1.8.7 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-sqlite3 v1.14.24 // indirect - github.com/mitchellh/go-testing-interface v1.14.1 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/minio/highwayhash v1.0.3 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mtibben/percent v0.2.1 // indirect @@ -127,27 +134,26 @@ require ( github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a // indirect github.com/oklog/run v1.1.0 // indirect github.com/patrickmn/go-cache v2.1.0+incompatible // indirect - github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.20.5 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.63.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/rs/cors v1.11.1 // indirect - github.com/rs/zerolog v1.33.0 // indirect - github.com/sagikazarmark/locafero v0.4.0 // indirect - github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/rs/zerolog v1.34.0 // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/sasha-s/go-deadlock v0.3.5 // indirect - github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.1 // indirect - github.com/spf13/pflag v1.0.6 // indirect - github.com/spf13/viper v1.19.0 // indirect - github.com/stretchr/testify v1.10.0 // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/spf13/viper v1.21.0 // indirect + github.com/stretchr/testify v1.11.1 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/tendermint/go-amino v0.16.0 // indirect @@ -155,28 +161,28 @@ require ( github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/zondax/hid v0.9.2 // indirect github.com/zondax/ledger-go v0.14.3 // indirect - go.etcd.io/bbolt v1.4.0-alpha.0.0.20240404170359-43604f3112c5 // indirect + go.etcd.io/bbolt v1.4.0-alpha.1 // indirect go.opencensus.io v0.24.0 // indirect - go.uber.org/mock v0.5.2 // indirect + go.uber.org/mock v0.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/ratelimit v0.3.1 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/arch v0.3.0 // indirect - golang.org/x/crypto v0.36.0 // indirect - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/net v0.38.0 // indirect - golang.org/x/sync v0.12.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/term v0.30.0 // indirect - golang.org/x/text v0.23.0 // indirect - google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/arch v0.15.0 // indirect + golang.org/x/crypto v0.42.0 // indirect + golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b // indirect + golang.org/x/net v0.44.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/sys v0.36.0 // indirect + golang.org/x/term v0.35.0 // indirect + golang.org/x/text v0.29.0 // indirect + google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - gotest.tools/v3 v3.5.1 // indirect - lukechampine.com/blake3 v1.4.0 // indirect - nhooyr.io/websocket v1.8.10 // indirect + gotest.tools/v3 v3.5.2 // indirect + lukechampine.com/blake3 v1.4.1 // indirect + nhooyr.io/websocket v1.8.17 // indirect pgregory.net/rapid v1.2.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/cmd/sncli/go.sum b/cmd/sncli/go.sum index 95b01d4a..332e380f 100644 --- a/cmd/sncli/go.sum +++ b/cmd/sncli/go.sum @@ -1,48 +1,52 @@ +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= -cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= -cloud.google.com/go/auth v0.6.0 h1:5x+d6b5zdezZ7gmLWD1m/xNjnaQ2YDhmIz/HH3doy1g= -cloud.google.com/go/auth v0.6.0/go.mod h1:b4acV+jLQDyjwm4OXHYjNvRi4jvGBzHWJRtJcy+2P4g= -cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= -cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= -cloud.google.com/go/compute v1.27.1 h1:0WbBLIPNANheCRZ4h8QhgzjN53KMutbiVBOLtPiVzBU= -cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= -cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= -cloud.google.com/go/iam v1.1.9 h1:oSkYLVtVme29uGYrOcKcvJRht7cHJpYD09GM9JaR0TE= -cloud.google.com/go/iam v1.1.9/go.mod h1:Nt1eDWNYH9nGQg3d/mY7U1hvfGmsaG9o/kLGoLoLXjQ= -cloud.google.com/go/storage v1.41.0 h1:RusiwatSu6lHeEXe3kglxakAmAbfV+rhtPqA6i8RBx0= -cloud.google.com/go/storage v1.41.0/go.mod h1:J1WCa/Z2FcgdEDuPUY8DxT5I+d9mFKsCepp5vR6Sq80= -cosmossdk.io/api v0.9.0 h1:QYs9APeSlDNGbsBOBFjp3jXgGd4hnEPnnku3+W3tT4Y= -cosmossdk.io/api v0.9.0/go.mod h1:pLkU/NSqYHWxyN7XftVt8iD7oldKJzqMZgzeiOmT2nk= -cosmossdk.io/client/v2 v2.0.0-beta.5 h1:0LVv3nEByn//hFDIrYLs2WvsEU3HodOelh4SDHnA/1I= -cosmossdk.io/client/v2 v2.0.0-beta.5/go.mod h1:4p0P6o0ro+FizakJUYS9SeM94RNbv0thLmkHRw5o5as= -cosmossdk.io/collections v0.4.0 h1:PFmwj2W8szgpD5nOd8GWH6AbYNi1f2J6akWXJ7P5t9s= -cosmossdk.io/collections v0.4.0/go.mod h1:oa5lUING2dP+gdDquow+QjlF45eL1t4TJDypgGd+tv0= +cloud.google.com/go v0.120.0 h1:wc6bgG9DHyKqF5/vQvX1CiZrtHnxJjBlKUyF9nP6meA= +cloud.google.com/go v0.120.0/go.mod h1:/beW32s8/pGRuj4IILWQNd4uuebeT4dkOhKmkfit64Q= +cloud.google.com/go/auth v0.16.0 h1:Pd8P1s9WkcrBE2n/PhAwKsdrR35V3Sg2II9B+ndM3CU= +cloud.google.com/go/auth v0.16.0/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute v1.37.0 h1:XxtZlXYkZXub3LNaLu90TTemcFqIU1yZ4E4q9VlR39A= +cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= +cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= +cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= +cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= +cloud.google.com/go/storage v1.50.0 h1:3TbVkzTooBvnZsk7WaAQfOsNrdoM8QHusXA1cpk6QJs= +cloud.google.com/go/storage v1.50.0/go.mod h1:l7XeiD//vx5lfqE3RavfmU9yvk5Pp0Zhcv482poyafY= +cosmossdk.io/api v0.9.2 h1:9i9ptOBdmoIEVEVWLtYYHjxZonlF/aOVODLFaxpmNtg= +cosmossdk.io/api v0.9.2/go.mod h1:CWt31nVohvoPMTlPv+mMNCtC0a7BqRdESjCsstHcTkU= +cosmossdk.io/client/v2 v2.0.0-beta.8.0.20250402172810-41e3e9d004a1 h1:nlMUeKu6CGrO7Gxt5S31qT3g27CHmBJHsZPjqHApVTI= +cosmossdk.io/client/v2 v2.0.0-beta.8.0.20250402172810-41e3e9d004a1/go.mod h1:xgv0ejeOk5yeDraPW5tv+PfBkCDt4yYa/+u45MyP+bM= +cosmossdk.io/collections v1.3.0 h1:RUY23xXBy/bu5oSHZ5y+mkJRyA4ZboKDO4Yvx4+g2uc= +cosmossdk.io/collections v1.3.0/go.mod h1:cqVpBMDGEYhuNmNSXIOmqpnQ7Eav43hpJIetzLuEkns= cosmossdk.io/core v0.11.3 h1:mei+MVDJOwIjIniaKelE3jPDqShCc/F4LkNNHh+4yfo= cosmossdk.io/core v0.11.3/go.mod h1:9rL4RE1uDt5AJ4Tg55sYyHWXA16VmpHgbe0PbJc6N2Y= -cosmossdk.io/depinject v1.1.0 h1:wLan7LG35VM7Yo6ov0jId3RHWCGRhe8E8bsuARorl5E= -cosmossdk.io/depinject v1.1.0/go.mod h1:kkI5H9jCGHeKeYWXTqYdruogYrEeWvBQCw1Pj4/eCFI= +cosmossdk.io/depinject v1.2.0 h1:6NW/FSK1IkWTrX7XxUpBmX1QMBozpEI9SsWkKTBc5zw= +cosmossdk.io/depinject v1.2.0/go.mod h1:pvitjtUxZZZTQESKNS9KhGjWVslJZxtO9VooRJYyPjk= cosmossdk.io/errors v1.0.2 h1:wcYiJz08HThbWxd/L4jObeLaLySopyyuUFB5w4AGpCo= cosmossdk.io/errors v1.0.2/go.mod h1:0rjgiHkftRYPj//3DrD6y8hcm40HcPv/dR4R/4efr0k= -cosmossdk.io/log v1.5.0 h1:dVdzPJW9kMrnAYyMf1duqacoidB9uZIl+7c6z0mnq0g= -cosmossdk.io/log v1.5.0/go.mod h1:Tr46PUJjiUthlwQ+hxYtUtPn4D/oCZXAkYevBeh5+FI= +cosmossdk.io/log v1.6.0 h1:SJIOmJ059wi1piyRgNRXKXhlDXGqnB5eQwhcZKv2tOk= +cosmossdk.io/log v1.6.0/go.mod h1:5cXXBvfBkR2/BcXmosdCSLXllvgSjphrrDVdfVRmBGM= cosmossdk.io/math v1.5.3 h1:WH6tu6Z3AUCeHbeOSHg2mt9rnoiUWVWaQ2t6Gkll96U= cosmossdk.io/math v1.5.3/go.mod h1:uqcZv7vexnhMFJF+6zh9EWdm/+Ylyln34IvPnBauPCQ= -cosmossdk.io/store v1.1.1 h1:NA3PioJtWDVU7cHHeyvdva5J/ggyLDkyH0hGHl2804Y= -cosmossdk.io/store v1.1.1/go.mod h1:8DwVTz83/2PSI366FERGbWSH7hL6sB7HbYp8bqksNwM= +cosmossdk.io/schema v1.1.0 h1:mmpuz3dzouCoyjjcMcA/xHBEmMChN+EHh8EHxHRHhzE= +cosmossdk.io/schema v1.1.0/go.mod h1:Gb7pqO+tpR+jLW5qDcNOSv0KtppYs7881kfzakguhhI= +cosmossdk.io/store v1.1.2 h1:3HOZG8+CuThREKv6cn3WSohAc6yccxO3hLzwK6rBC7o= +cosmossdk.io/store v1.1.2/go.mod h1:60rAGzTHevGm592kFhiUVkNC9w7gooSEn5iUBPzHQ6A= cosmossdk.io/x/circuit v0.1.1 h1:KPJCnLChWrxD4jLwUiuQaf5mFD/1m7Omyo7oooefBVQ= cosmossdk.io/x/circuit v0.1.1/go.mod h1:B6f/urRuQH8gjt4eLIXfZJucrbreuYrKh5CSjaOxr+Q= cosmossdk.io/x/evidence v0.1.1 h1:Ks+BLTa3uftFpElLTDp9L76t2b58htjVbSZ86aoK/E4= cosmossdk.io/x/evidence v0.1.1/go.mod h1:OoDsWlbtuyqS70LY51aX8FBTvguQqvFrt78qL7UzeNc= cosmossdk.io/x/feegrant v0.1.1 h1:EKFWOeo/pup0yF0svDisWWKAA9Zags6Zd0P3nRvVvw8= cosmossdk.io/x/feegrant v0.1.1/go.mod h1:2GjVVxX6G2fta8LWj7pC/ytHjryA6MHAJroBWHFNiEQ= -cosmossdk.io/x/nft v0.1.1 h1:pslAVS8P5NkW080+LWOamInjDcq+v2GSCo+BjN9sxZ8= -cosmossdk.io/x/nft v0.1.1/go.mod h1:Kac6F6y2gsKvoxU+fy8uvxRTi4BIhLOor2zgCNQwVgY= -cosmossdk.io/x/tx v0.13.7 h1:8WSk6B/OHJLYjiZeMKhq7DK7lHDMyK0UfDbBMxVmeOI= -cosmossdk.io/x/tx v0.13.7/go.mod h1:V6DImnwJMTq5qFjeGWpXNiT/fjgE4HtmclRmTqRVM3w= -cosmossdk.io/x/upgrade v0.1.4 h1:/BWJim24QHoXde8Bc64/2BSEB6W4eTydq0X/2f8+g38= -cosmossdk.io/x/upgrade v0.1.4/go.mod h1:9v0Aj+fs97O+Ztw+tG3/tp5JSlrmT7IcFhAebQHmOPo= +cosmossdk.io/x/tx v0.14.0 h1:hB3O25kIcyDW/7kMTLMaO8Ripj3yqs5imceVd6c/heA= +cosmossdk.io/x/tx v0.14.0/go.mod h1:Tn30rSRA1PRfdGB3Yz55W4Sn6EIutr9xtMKSHij+9PM= +cosmossdk.io/x/upgrade v0.2.0 h1:ZHy0xny3wBCSLomyhE06+UmQHWO8cYlVYjfFAJxjz5g= +cosmossdk.io/x/upgrade v0.2.0/go.mod h1:DXDtkvi//TrFyHWSOaeCZGBoiGAE6Rs8/0ABt2pcDD0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= @@ -50,25 +54,40 @@ github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMb github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= -github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs= +github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/CosmWasm/wasmd v0.53.0 h1:kdaoAi20bIb4VCsxw9pRaT2g5PpIp82Wqrr9DRVN9ao= -github.com/CosmWasm/wasmd v0.53.0/go.mod h1:FJl/aWjdpGof3usAMFQpDe07Rkx77PUzp0cygFMOvtw= -github.com/CosmWasm/wasmvm/v2 v2.1.2 h1:GkJ5bAsRlLHfIQVg/FY1VHwLyBwlCjAhDea0B8L+e20= -github.com/CosmWasm/wasmvm/v2 v2.1.2/go.mod h1:bMhLQL4Yp9CzJi9A83aR7VO9wockOsSlZbT4ztOl6bg= -github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= +github.com/CosmWasm/wasmd v0.55.0-ibc2.0 h1:9bH+QDnSGxmZhjSykLYGtW4sltzGFFVm10Awk683q2Y= +github.com/CosmWasm/wasmd v0.55.0-ibc2.0/go.mod h1:c9l+eycjUB2zNVLIGjAXd7QrFEbxVTEa1Fh1Mx74VwQ= +github.com/CosmWasm/wasmvm/v3 v3.0.0-ibc2.0 h1:QoagSm5iYuRSPYDxgRxsa6hVfDppUp4+bOwY7bDuMO0= +github.com/CosmWasm/wasmvm/v3 v3.0.0-ibc2.0/go.mod h1:oknpb1bFERvvKcY7vHRp1F/Y/z66xVrsl7n9uWkOAlM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= -github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q= +github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.5.7 h1:ybO8RBeh29qrxIhCA9E8gKY6xfONU9T6G6aP9DTKfLE= +github.com/DataDog/zstd v1.5.7/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 h1:5IT7xOdq17MtcdtL/vtl6mGfzhaq4m4vpollPRmlsBQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0/go.mod h1:ZV4VOm0/eHR06JLrXWe09068dHpr3TRpY9Uo7T+anuA= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 h1:ig/FpDD2JofP/NExKQUbn7uOSZzJAQqogfqluZK4ed4= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/LumeraProtocol/lumera v1.7.0 h1:F5zgRBnCtgGfdMB6jz01PFWIzbS8VjQfCu1H9OYt3BU= -github.com/LumeraProtocol/lumera v1.7.0/go.mod h1:c1M+sjewuCvxw+pznwlspUzenDJI8Y+suKB3RFKS2Wo= +github.com/LumeraProtocol/lumera v1.8.0 h1:0t5/6qOSs9wKti7utPAWo9Jq8wk2X+L/eEaH8flk/Hc= +github.com/LumeraProtocol/lumera v1.8.0/go.mod h1:38uAZxxleZyXaWKbqOQKwjw7CSX92lTxdF+B7c4SRPw= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/adlio/schema v1.3.6 h1:k1/zc2jNfeiZBA5aFTRy37jlBIuCkXCm0XmvpzCKI9I= +github.com/adlio/schema v1.3.6/go.mod h1:qkxwLgPBd1FgLRHYVCmQT/rrBr3JH38J9LjmVzWNudg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -81,12 +100,13 @@ github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.44.224 h1:09CiaaF35nRmxrzWZ2uRq5v6Ghg/d2RiPjZnSgtt+RQ= -github.com/aws/aws-sdk-go v1.44.224/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.49.0 h1:g9BkW1fo9GqKfwg2+zCD+TW/D36Ux+vtfJ8guF4AYmY= +github.com/aws/aws-sdk-go v1.49.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= @@ -98,10 +118,10 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 h1:41iFGWnSlI2gVpmOtVTJZNodLdLQLn/KsJqFvXwnd/s= -github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bits-and-blooms/bitset v1.8.0 h1:FD+XqgOZDUxxZ8hzoBFuV9+cGWY9CslN6d5MS5JVb4c= -github.com/bits-and-blooms/bitset v1.8.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bgentry/speakeasy v0.2.0 h1:tgObeVOf8WAvtuAX6DhJ4xks4CFNwPDZiqzGqIHE51E= +github.com/bgentry/speakeasy v0.2.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bits-and-blooms/bitset v1.22.0 h1:Tquv9S8+SGaS3EhyA+up3FXzmkhxPGjQQCkcs2uw7w4= +github.com/bits-and-blooms/bitset v1.22.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= @@ -117,13 +137,16 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/bufbuild/protocompile v0.14.0 h1:z3DW4IvXE5G/uTOnSQn+qwQQxvhckkTWLS/0No/o7KU= -github.com/bufbuild/protocompile v0.14.0/go.mod h1:N6J1NYzkspJo3ZwyL4Xjvli86XOj1xq4qAasUFxGups= -github.com/bytedance/sonic v1.14.0 h1:/OfKt8HFw0kh2rj8N0F6C/qPGRESq0BbaNZgcNXXzQQ= -github.com/bytedance/sonic v1.14.0/go.mod h1:WoEbx8WTcFJfzCe0hbmyTGrfjt8PzNEBdxlNUO24NhA= +github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= +github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= +github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M= +github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM= +github.com/bytedance/sonic v1.14.1 h1:FBMC0zVz5XUmE4z9wF4Jey0An5FueFvOsTKKKtwIl7w= +github.com/bytedance/sonic v1.14.1/go.mod h1:gi6uhQLMbTdeP0muCnrjHLeCUPyb70ujhnNlhOylAFc= github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA= github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= @@ -141,8 +164,8 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4= -github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= +github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M= +github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= @@ -150,36 +173,40 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= -github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= -github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= -github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= -github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= -github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= -github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v1.1.2 h1:CUh2IPtR4swHlEj48Rhfzw6l/d0qA31fItcIszQVIsA= -github.com/cockroachdb/pebble v1.1.2/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= -github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= -github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/errors v1.12.0 h1:d7oCs6vuIMUQRVbi6jWWWEJZahLCfJpnJSVobd1/sUo= +github.com/cockroachdb/errors v1.12.0/go.mod h1:SvzfYNNBshAVbZ8wzNc/UPK3w1vf0dKDUP41ucAIf7g= +github.com/cockroachdb/fifo v0.0.0-20240616162244-4768e80dfb9a h1:f52TdbU4D5nozMAhO9TvTJ2ZMCXtN4VIAmfrrZ0JXQ4= +github.com/cockroachdb/fifo v0.0.0-20240616162244-4768e80dfb9a/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= +github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 h1:ASDL+UJcILMqgNeV5jiqR4j+sTuvQNHdf2chuKj1M5k= +github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506/go.mod h1:Mw7HqKr2kdtu6aYGn3tPmAftiP3QPX63LdK/zcariIo= +github.com/cockroachdb/pebble v1.1.5 h1:5AAWCBWbat0uE0blr8qzufZP5tBjkRyy/jWe1QWLnvw= +github.com/cockroachdb/pebble v1.1.5/go.mod h1:17wO9el1YEigxkP/YtV8NtCivQDgoCyBg5c4VR/eOWo= +github.com/cockroachdb/redact v1.1.6 h1:zXJBwDZ84xJNlHl1rMyCojqyIxv+7YUpQiJLQ7n4314= +github.com/cockroachdb/redact v1.1.6/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/cometbft/cometbft v0.38.15 h1:5veFd8k1uXM27PBg9sMO3hAfRJ3vbh4OmmLf6cVrqXg= -github.com/cometbft/cometbft v0.38.15/go.mod h1:+wh6ap6xctVG+JOHwbl8pPKZ0GeqdPYqISu7F4b43cQ= +github.com/cometbft/cometbft v0.38.18 h1:1ZHYMdu0S75YxFM13LlPXnOwiIpUW5z9TKMQtTIALpw= +github.com/cometbft/cometbft v0.38.18/go.mod h1:PlOQgf3jQorep+g6oVnJgtP65TJvBJoLiXjGaMdNxBE= github.com/cometbft/cometbft-db v0.14.1 h1:SxoamPghqICBAIcGpleHbmoPqy+crij/++eZz3DlerQ= github.com/cometbft/cometbft-db v0.14.1/go.mod h1:KHP1YghilyGV/xjD5DP3+2hyigWx0WTp9X+0Gnx0RxQ= +github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= -github.com/cosmos/cosmos-db v1.1.1 h1:FezFSU37AlBC8S98NlSagL76oqBRWq/prTPvFcEJNCM= -github.com/cosmos/cosmos-db v1.1.1/go.mod h1:AghjcIPqdhSLP/2Z0yha5xPH3nLnskz81pBx3tcVSAw= +github.com/cosmos/cosmos-db v1.1.2 h1:KZm4xLlPp6rLkyIOmPOhh+XDK9oH1++pNH/csLdX0Dk= +github.com/cosmos/cosmos-db v1.1.2/go.mod h1:dMg2gav979Ig2N076POEw4CEKbCsieaOfDWSfFZxs8M= github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= github.com/cosmos/cosmos-sdk v0.50.14 h1:G8CtGHFWbExa+ZpVOVAb4kFmko/R30igsYOwyzRMtgY= @@ -191,12 +218,12 @@ github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ github.com/cosmos/gogoproto v1.4.2/go.mod h1:cLxOsn1ljAHSV527CHOtaIP91kK6cCrZETRBrkzItWU= github.com/cosmos/gogoproto v1.7.0 h1:79USr0oyXAbxg3rspGh/m4SWNyoz/GLaAh0QlCe2fro= github.com/cosmos/gogoproto v1.7.0/go.mod h1:yWChEv5IUEYURQasfyBW5ffkMHR/90hiHgbNgrtp4j0= -github.com/cosmos/iavl v1.2.2 h1:qHhKW3I70w+04g5KdsdVSHRbFLgt3yY3qTMd4Xa4rC8= -github.com/cosmos/iavl v1.2.2/go.mod h1:GiM43q0pB+uG53mLxLDzimxM9l/5N9UuSY3/D0huuVw= -github.com/cosmos/ibc-go/modules/capability v1.0.1 h1:ibwhrpJ3SftEEZRxCRkH0fQZ9svjthrX2+oXdZvzgGI= -github.com/cosmos/ibc-go/modules/capability v1.0.1/go.mod h1:rquyOV262nGJplkumH+/LeYs04P3eV8oB7ZM4Ygqk4E= -github.com/cosmos/ibc-go/v8 v8.5.1 h1:3JleEMKBjRKa3FeTKt4fjg22za/qygLBo7mDkoYTNBs= -github.com/cosmos/ibc-go/v8 v8.5.1/go.mod h1:P5hkAvq0Qbg0h18uLxDVA9q1kOJ0l36htMsskiNwXbo= +github.com/cosmos/iavl v1.2.4 h1:IHUrG8dkyueKEY72y92jajrizbkZKPZbMmG14QzsEkw= +github.com/cosmos/iavl v1.2.4/go.mod h1:GiM43q0pB+uG53mLxLDzimxM9l/5N9UuSY3/D0huuVw= +github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v10 v10.1.0 h1:epKcbFAeWRRw1i1jZnYzLIEm9sgUPaL1RftuRjjUKGw= +github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v10 v10.1.0/go.mod h1:S4ZQwf5/LhpOi8JXSAese/6QQDk87nTdicJPlZ5q9UQ= +github.com/cosmos/ibc-go/v10 v10.3.0 h1:w5DkHih8qn15deAeFoTk778WJU+xC1krJ5kDnicfUBc= +github.com/cosmos/ibc-go/v10 v10.3.0/go.mod h1:CthaR7n4d23PJJ7wZHegmNgbVcLXCQql7EwHrAXnMtw= github.com/cosmos/ics23/go v0.11.0 h1:jk5skjT0TqX5e5QJbEnwXIS2yI2vnmLOgpQPeM5RtnU= github.com/cosmos/ics23/go v0.11.0/go.mod h1:A8OjxPE67hHST4Icw94hOxxFEJMBG031xIGF/JHNIY0= github.com/cosmos/ledger-cosmos-go v0.14.0 h1:WfCHricT3rPbkPSVKRH+L4fQGKYHuGOK9Edpel8TYpE= @@ -205,35 +232,42 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/danieljoos/wincred v1.2.1 h1:dl9cBrupW8+r5250DYkYxocLeZ1Y4vB1kxgtjxw8GQs= -github.com/danieljoos/wincred v1.2.1/go.mod h1:uGaFL9fDn3OLTvzCGulzE+SzjEe5NGlh5FdCcyfPwps= +github.com/danieljoos/wincred v1.2.2 h1:774zMFJrqaeYCK2W57BgAem/MLi6mtSE47MB6BOJ0i0= +github.com/danieljoos/wincred v1.2.2/go.mod h1:w7w4Utbrz8lqeMbDAK0lkNJUv5sAOkFi7nd/ogr0Uh8= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= -github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= -github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= +github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= +github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= +github.com/desertbit/timer v1.0.1 h1:yRpYNn5Vaaj6QXecdLMPMJsW81JLiI1eokUft5nBmeo= +github.com/desertbit/timer v1.0.1/go.mod h1:htRrYeY5V/t4iu1xCJ5XsQvp4xve8QulXXctAzxqcwE= github.com/dgraph-io/badger/v4 v4.2.0 h1:kJrlajbXXL9DFTNuhhu9yCx7JJa4qpYWxtE8BzuWsEs= github.com/dgraph-io/badger/v4 v4.2.0/go.mod h1:qfCqhPoWDFJRx1gp5QwwyGo8xk1lbHUxvK9nK0OGAak= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM= +github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= -github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38= +github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= -github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/dvsekhvalnov/jose2go v1.7.0 h1:bnQc8+GMnidJZA8zc6lLEAb4xNrIqHwO+9TzqvtQZPo= +github.com/dvsekhvalnov/jose2go v1.7.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= @@ -247,11 +281,18 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= +github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/ethereum/go-ethereum v1.15.11 h1:JK73WKeu0WC0O1eyX+mdQAVHUV+UR1a9VB/domDngBU= +github.com/ethereum/go-ethereum v1.15.11/go.mod h1:mf8YiHIb0GR4x4TipcvBUPxJLw1mFdmxzoDi11sDRoI= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= @@ -263,16 +304,18 @@ github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7z github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= -github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/getsentry/sentry-go v0.32.0 h1:YKs+//QmwE3DcYtfKRH8/KyOOF/I6Qnx7qYGNHCGmCY= +github.com/getsentry/sentry-go v0.32.0/go.mod h1:CYNcMMz73YigoHljQRG+qPF+eMq8gG72XcGN/p71BAY= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-jose/go-jose/v4 v4.1.2 h1:TK/7NqRQZfgAh+Td8AlsrvtPoUyiHh0LqVvokh+1vHI= +github.com/go-jose/go-jose/v4 v4.1.2/go.mod h1:22cg9HWM1pOlnRiY+9cQYJ9XHmya1bYW8OeDM6Ku6Oo= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= @@ -286,8 +329,8 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= @@ -299,6 +342,8 @@ github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpv github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= @@ -316,8 +361,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= -github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= +github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -346,20 +391,21 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e h1:4bw4WeyTYPp0smaXiJZCNnLrvVBqirQVreixayXezGc= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw= -github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI= +github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -373,16 +419,16 @@ github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA= -github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E= +github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= +github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= +github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= @@ -403,6 +449,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= @@ -412,19 +460,19 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-getter v1.7.5 h1:dT58k9hQ/vbxNMwoI5+xFYAJuv6152UNvdHokfI5wE4= -github.com/hashicorp/go-getter v1.7.5/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-getter v1.7.8 h1:mshVHx1Fto0/MydBekWan5zUipGq7jO0novchgMmSiY= +github.com/hashicorp/go-getter v1.7.8/go.mod h1:2c6CboOEb9jG6YvmC9xdD+tyAFsrUaJPedwXDGr0TM4= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-metrics v0.5.3 h1:M5uADWMOGCTUNU1YuC4hfknOeHNaX54LDm4oYSucoNE= -github.com/hashicorp/go-metrics v0.5.3/go.mod h1:KEjodfebIOuBYSAe/bHTm+HChmKSxAOXPBieMLYozDE= +github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY= +github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= -github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= +github.com/hashicorp/go-plugin v1.6.3 h1:xgHB+ZUSYeuJi96WtxEjzi23uh7YQpznjGh0U0UUrwg= +github.com/hashicorp/go-plugin v1.6.3/go.mod h1:MRobyh+Wc/nYy1V4KAXUiYfzxoYhs7V1mlH1Z7iY2h0= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= @@ -432,11 +480,12 @@ github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoD github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -444,22 +493,24 @@ github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iP github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= -github.com/hdevalence/ed25519consensus v0.1.0 h1:jtBwzzcHuTmFrQN6xQZn6CQEO/V9f7HsjsjeEZ6auqU= -github.com/hdevalence/ed25519consensus v0.1.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= +github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8= +github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns= +github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= +github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= +github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= +github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= -github.com/huandu/skiplist v1.2.0 h1:gox56QD77HzSC0w+Ws3MH3iie755GBJU1OER3h5VsYw= -github.com/huandu/skiplist v1.2.0/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= +github.com/huandu/skiplist v1.2.1 h1:dTi93MgjwErA/8idWTzIw4Y1kZsMWx35fmI2c8Rij7w= +github.com/huandu/skiplist v1.2.1/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/iancoleman/orderedmap v0.3.0 h1:5cbR2grmZR/DiVt+VJopEhtVs9YGInGIxAoMJn+Ichc= +github.com/iancoleman/orderedmap v0.3.0/go.mod h1:XuLcCUkdL5owUCQeF2Ue9uuw1EptkJDkXXS7VoV7XGE= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -487,6 +538,7 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -501,9 +553,8 @@ github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= -github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -514,6 +565,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= @@ -529,8 +582,9 @@ github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GW github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= @@ -558,8 +612,6 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -605,11 +657,15 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q= -github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= +github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= +github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= +github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -618,6 +674,8 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= +github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= @@ -625,8 +683,8 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= -github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 h1:Dx7Ovyv/SFnMFw3fD4oEoeorXc6saIiQ23LrGLth0Gw= github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= @@ -640,6 +698,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -650,8 +710,9 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= -github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -666,14 +727,16 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2 github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= +github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -688,16 +751,14 @@ github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7 github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= -github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= +github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= -github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= -github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU= github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U= @@ -709,25 +770,30 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= -github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= -github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= -github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= +github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= @@ -747,10 +813,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= @@ -772,47 +836,51 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw= github.com/zondax/ledger-go v0.14.3/go.mod h1:IKKaoxupuB43g4NxeQmbLXv7T9AlQyie1UpHb342ycI= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.4.0-alpha.0.0.20240404170359-43604f3112c5 h1:qxen9oVGzDdIRP6ejyAJc760RwW4SnVDiTYTzwnXuxo= -go.etcd.io/bbolt v1.4.0-alpha.0.0.20240404170359-43604f3112c5/go.mod h1:eW0HG9/oHQhvRCvb1/pIXW4cOvtDqeQK+XSi3TnwaXY= +go.etcd.io/bbolt v1.4.0-alpha.1 h1:3yrqQzbRRPFPdOMWS/QQIVxVnzSkAZQYeWlZFv1kbj4= +go.etcd.io/bbolt v1.4.0-alpha.1/go.mod h1:S/Z/Nm3iuOnyO1W4XuFfPci51Gj6F1Hv0z8hisyYYOw= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= -go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= -go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= -go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= -go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= -go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= -go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= -go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= -go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= -go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= -go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= +go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= +go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= @@ -826,8 +894,10 @@ go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= -golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/arch v0.15.0 h1:QtOrQd0bTUnhNVNndMpLHNWrDmYzZ2KDqSrEymqInZw= +golang.org/x/arch v0.15.0/go.mod h1:JmwW7aLIoRUKgaTzhkiEFxvcEiQGyOg9BMonBJUS7EE= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -838,13 +908,13 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= +golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b h1:DXr+pvt3nC887026GRP39Ej11UATqWDmWuS99x26cD0= +golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -886,13 +956,13 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= -golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I= +golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -900,8 +970,9 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -936,9 +1007,11 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -949,29 +1022,29 @@ golang.org/x/sys v0.0.0-20220315194320-039c03cc5b86/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ= +golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= +golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -997,9 +1070,11 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.186.0 h1:n2OPp+PPXX0Axh4GuSsL5QL8xQCTb2oDwyzPnQvqUug= -google.golang.org/api v0.186.0/go.mod h1:hvRbBmgoje49RV3xqVXrmP6w93n6ehGgIVPYrGtBFFc= +google.golang.org/api v0.229.0 h1:p98ymMtqeJ5i3lIBMj5MpR9kzIIgzpHHh8vQ+vgAzx8= +google.golang.org/api v0.229.0/go.mod h1:wyDfmq5g1wYJWn29O22FDWN48P7Xcz0xz+LBpptYvB0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1014,12 +1089,12 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094 h1:6whtk83KtD3FkGrVb2hFXuQ+ZMbCNdakARIn/aHMmG8= -google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094/go.mod h1:Zs4wYw8z1zr6RNF4cwYb31mvN/EGaKAdQjNCF3DW6K4= -google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 h1:hE3bRWtU6uceqlh4fhrSnUyjKHMKB9KrTLLG+bc0ddM= -google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463/go.mod h1:U90ffi8eUL9MwPcrJylN5+Mk2v3vuPDptd5yyNUiRR8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= +google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 h1:8XJ4pajGwOlasW+L13MnEGA8W4115jJySQtVfS2/IBU= +google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4/go.mod h1:NnuHhy+bxcg30o7FnVAZbXsPHUDQ9qKWAQKCD7VxFtk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 h1:i8QOKZfYg6AbGVZzUAY3LrNWCKF8O6zFisU9Wl9RER4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1038,8 +1113,8 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= -google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= +google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A= +google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1054,8 +1129,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1067,8 +1142,6 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -1087,19 +1160,19 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= -lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= +lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= +lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo= lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= -nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= -nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= +nhooyr.io/websocket v1.8.17 h1:KEVeLJkUywCKVsnLIDlD/5gtayKp8VoCkksHCGGfT9Y= +nhooyr.io/websocket v1.8.17/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/docs/cascade-performance.md b/docs/cascade-performance.md deleted file mode 100644 index 1cecf566..00000000 --- a/docs/cascade-performance.md +++ /dev/null @@ -1,191 +0,0 @@ -# Cascade Downloads & Performance: Concepts, Limits, and Tuning - -This document explains how Cascade encoding/decoding works, the performance and memory factors involved, and practical configuration guidance. It consolidates the “blocks and symbols” primer and expands it with deeper operational tuning, error references, and code pointers — in a concise, professional format. - -## Overview - -- Cascade uses RaptorQ forward error correction to split a file into blocks and symbols that can be stored/fetched from a P2P network. -- Decoding requires enough symbols to reconstruct each block; integrity is verified with hashes recorded in the layout. -- Performance and reliability are driven by four main levers: block size, redundancy, concurrency, and memory headroom. Batching and ordering in the store path, and supernode selection in the download path, also matter. - -## Current Defaults (Implementation) - -- RaptorQ (codec) - - Block cap: 256 MB (encode‑time upper bound per block) - - Decode concurrency: 1 - - Memory headroom: 20% of detected RAM - - Symbol size: ~65,535 bytes - - Redundancy: 5 - -- Store path (foreground adaptor) - - Batch size: 2,500 files per batch (≈156 MiB typical at default symbol size) - - Downsampling: if total files > 2,500, take 10% sorted prefix for initial store - - Per‑batch P2P store timeout: 5 minutes - -- Store path (background worker) - - Batch size: 1,000 files per batch (≈62.5 MiB typical) - -- Download path - - SDK per‑supernode download deadline: 10 minutes - - Supernode ranking: status probe ~2 seconds per node; sorted by available memory (desc) - - P2P exec timeouts (per RPC): - - FindValue: 5s - - BatchFindValues: 60s - - BatchGetValues: 75s - - StoreData: 10s - - BatchStoreData: 75s - - Replicate: 90s - -- Upload constraints - - Max file size: 1 GB (enforced in SDK and server) - - Adaptive upload chunk size: ~64 KB → 4 MB based on file size - -## Core Concepts - -- Block: A contiguous segment of the original file. Think of it as a “chapter”. -- Symbol: A small piece produced by RaptorQ for a block. You only need “enough” symbols to reconstruct the block. -- Layout: Metadata that lists all blocks (block_id, size, original offset, per‑block hash) and the symbol IDs belonging to each block. - -Encode (upload): -- Choose a block size; RaptorQ creates symbols per block; symbols + layout are stored. - -Decode (download): -- Fetch symbols from the network; reconstruct each block independently; write each block back at its original offset; verify hashes; stream the file. - -Key facts: -- Symbols never mix across blocks. -- Peak memory during decode scales roughly with the chosen block size (plus overhead). - -## File Size Limits & Upload Chunking - -- Maximum file size: 1 GB (enforced both in SDK and server handlers). -- Adaptive upload chunk size: ~64 KB → 4 MB depending on total file size for throughput vs memory stability. - -## Encoding/Decoding Workflow (high level) - -1) SDK uploads file to a supernode (gRPC stream). Server writes to a temporary file, validates size and integrity. -2) Server encodes with RaptorQ: produces a symbols directory and a layout JSON. -3) Server stores artefacts: layout/ID files and symbols into P2P in batches. -4) Later, SDK requests download; supernode fetches symbols progressively and decodes to reconstruct the file; integrity is verified. - -## Contexts & Timeouts (download path) - -- SDK: wraps the download RPC with a 10‑minute deadline. -- Server: uses that context; P2P layer applies per‑RPC timeouts (e.g., 5s for single key FindValue, ~75s for BatchGetValues), with internal early cancellation once enough symbols are found. -- RaptorQ: uses the same context for logging; no additional deadline inside decode. - -## Memory Model - -- Decoder memory is primarily a function of block size and concurrency. -- Headroom percentage reduces the usable memory budget to leave safety buffer for the OS and other processes. -- Example formula: usable_memory ≈ TotalRAM × (1 − headroom%). - -## Configuration Levers - -The implementation uses simple fixed constants for safety and predictability. You can adjust them and rebuild. - -1) Block Size Cap (`targetBlockMB`, encode‑time) -- What: Upper bound on block size. Actual used size = min(recommended_by_codec, cap). -- Effect: Smaller cap lowers peak decode memory (more blocks, more symbols/keys). Larger cap reduces block count (faster on big machines) but raises peak memory. -- Current default: 256 MB (good balance on well-provisioned machines). Only affects newly encoded artefacts. - -2) Redundancy (`defaultRedundancy`, encode‑time) -- What: Extra protection (more symbols) to tolerate missing data. -- Effect: Higher redundancy improves recoverability but costs more storage and network I/O. Does not materially change peak memory. -- Current default: 5 (good real‑world trade‑off). - -3) Concurrency (`fixedConcurrency`, decode‑time) -- What: Number of RaptorQ decode workers. -- Effect: Higher is faster but multiplies memory; lower is safer and predictable. -- Current default: 1 (safe default for wide environments). - -4) Headroom (`headroomPct`, decode‑time) -- What: Percentage of detected RAM left unused by the RaptorQ processor. -- Effect: More headroom = safer under load; less headroom = more memory available to decode. -- Current default: 20% (conservative and robust for shared hosts). - -## Batching Strategy (store path) - -Why batching matters: -- Store batches are loaded wholly into memory before sending to P2P. -- A fixed “files‑per‑batch” limit gives variable memory usage because symbol files can differ slightly in size. - -Current defaults: -- Foreground adaptor: `loadSymbolsBatchSize = 2500` → ≈ 2,500 × 65,535 B ≈ 156 MiB per batch (typical). -- Background worker: `loadSymbolsBatchSize = 1000` → ≈ 62.5 MiB per batch. - -Byte‑budget alternative (conceptual, not implemented): -- Cap the total bytes per batch (e.g., 128–256 MiB), with a secondary cap on file count. -- Benefits: predictable peak memory; better throughput on small symbols; avoids spikes on larger ones. - -## Ordering for Throughput (store path) - -- We sort relative file paths before batching (e.g., `block_0/...`, `block_1/...`) to improve filesystem locality and reduce disk seeks. This favors speed. -- Trade‑off: If a process stops mid‑way, earlier blocks (lexicographically smaller) are more likely stored than later ones. For fairness across blocks at partial completion, interleaving could be used at some CPU cost. - -## Supernode Selection (download path) - -- The SDK ranks supernodes by available memory (fast 2s status probe per node) and attempts downloads in that order. -- This increases the chances of successful decode for large files. - -## Defaults & Suggested Settings - -1 GB files (general) -- Block cap: 256 MB (≈4 blocks) -- Concurrency: 1 -- Headroom: 20% -- Redundancy: 5 - -Large‑memory machines (performance‑leaning) -- Block cap: 256 MB (or 512 MB) to reduce block count and increase throughput. -- Concurrency: 1–2. -- Headroom: 15–20% depending on other workloads. -- Redundancy: 5 (or 6 in sparse networks). - -Small‑memory machines -- Block cap: 64–128 MB -- Concurrency: 1 -- Headroom: 20% -- Redundancy: 5 - -## Error Reference - -- memory limit exceeded - - The decoder exceeded its memory budget. Reduce block size or concurrency, increase RAM, or lower headroom. - -- hash mismatch for block X - - Data reconstructed for the block did not match the expected hash. Often indicates wrong/corrupt symbols; can also occur when decoding fails mid‑way under memory pressure. Re‑fetching or re‑encoding may be required. - -- insufficient symbols - - Not enough valid symbols were available; the retriever will fetch more. - -- gRPC Internal on download stream - - The supernode returned an error during decode (e.g., memory failure). The SDK will try the next supernode. - -## Code Pointers - -- Block cap, headroom, concurrency (RaptorQ): `pkg/codec/raptorq.go` -- Store batching (foreground path): `supernode/services/cascade/adaptors/p2p.go` -- Store batching (background worker): `p2p/kademlia/rq_symbols.go` -- Batch symbol loading / deletion: `pkg/utils/utils.go` (LoadSymbols, DeleteSymbols) -- Supernode ranking by memory (download): `sdk/task/download.go` -- File size cap & adaptive upload chunking: SDK and server sides (`sdk/adapters/supernodeservice/adapter.go`, `supernode/node/action/server/cascade/cascade_action_server.go`) - -## Notes & Scope - -- Changing block size only affects new encodes; existing artefacts keep their original layout. -- Tuning should reflect your fleet: prefer safety defaults for heterogeneous environments; be aggressive only on known large‑RAM hosts. - -## FAQ - -- Why might a smaller file decode but a larger file fail? - - Peak memory grows with data size and chosen block size. A smaller file may fit within the decoder’s memory budget on a given machine, while a larger one may exceed it. Smaller blocks and/or more RAM resolve this. - -- Does changing block size affect old files? - - No. It only affects newly encoded content. Existing artefacts retain their original layout. - -- Will smaller blocks slow things down? - - Slightly, due to more pieces and network lookups. For constrained machines, the reliability gain outweighs the small performance cost. - -- What’s the best block size? - - There’s no single best value. 128 MB is a solid default. Use 64 MB for smaller machines and 256–512 MB for large servers when maximizing throughput. diff --git a/docs/cascade-store-artifacts.md b/docs/cascade-store-artifacts.md deleted file mode 100644 index 880f5418..00000000 --- a/docs/cascade-store-artifacts.md +++ /dev/null @@ -1,164 +0,0 @@ -# Cascade Artefacts Storage Flow - -This document explains, in depth, how Cascade artefacts (ID files + RaptorQ symbols) are persisted to the P2P network, the control flow from the API to the P2P layer, what metrics are collected, and which background workers continue the process after the API call returns. - -## Scope & Terminology - -- Artefacts: The data produced for a Cascade action that must be stored on the network. - - ID files (a.k.a. redundant metadata files): compact metadata payloads derived from the layout/index. - - Symbols: RaptorQ-encoded chunks of the input file. -- Request IDs and files are generated during the registration flow; storing starts after validation and simulation succeed. - -## High‑Level Sequence - -1) Client calls `Register` with input file and action metadata. -2) The service verifies the action, fee, eligibility, signature and layout consistency, then encodes the input into RaptorQ symbols. -3) Finalize simulation is performed on chain to ensure the action can finalize. -4) If simulation passes, artefacts are persisted: - - ID files are stored first as a single batch. - - Symbols are stored in batches; a first pass may downsample for large directories. - - A background worker continues storing the remainder (no sampling) after the call returns. -5) Action is finalized on chain and control returns to the caller. - -Code reference: -- `supernode/services/cascade/register.go` (Register flow, steps 1–11) -- `supernode/services/cascade/helper.go` (wrappers and helpers) -- `supernode/services/cascade/adaptors/p2p.go` (P2P adaptor for storage) -- `p2p/p2p.go`, `p2p/kademlia/dht.go`, `p2p/kademlia/rq_symbols.go` (P2P and Kademlia implementation) - -## Register Flow Up To Storage - -Register performs the following (simplified): - -- Fetches and validates the on‑chain action. -- Verifies fee and that this node is in the top supernodes for the block height. -- Decodes cascade metadata and verifies that the uploaded data hash matches the ticket. -- Encodes the input using RaptorQ; produces `SymbolsDir` and `Metadata` (layout). -- Verifies layout signature (creator), generates RQ‑ID files and validates IDs. -- Simulates finalize (chain dry‑run). If simulation fails, the call returns with an error (no storage). -- Calls `storeArtefacts(...)` to persist artefacts to P2P. - -Events are streamed throughout via `send(*RegisterResponse)`, including when artefacts are stored and when the action is finalized. - -## The storeArtefacts Wrapper - -Function: `supernode/services/cascade/helper.go::storeArtefacts` - -- Thin pass‑through that packages a `StoreArtefactsRequest` and forwards to the P2P adaptor (`task.P2P.StoreArtefacts`). -- Parameters: - - `IDFiles [][]byte`: the redundant metadata files to store. - - `SymbolsDir string`: filesystem directory where symbols were written. - - `TaskID string` and `ActionID string`: identifiers for logging and DB association. - -Returns `StoreArtefactsMetrics` with separate metrics for metadata and symbols plus an aggregated view. - -## P2P Adaptor: StoreArtefacts - -Implementation: `supernode/services/cascade/adaptors/p2p.go` - -1) Store metadata (ID files) using `p2p.Client.StoreBatch(...)`: - - Returns `metaRatePct` and `metaRequests` (count of per‑node RPCs attempted during this batch store). - -2) Store symbols using `storeCascadeSymbols(...)`: - - Records the symbol directory in a small SQLite store: `rqStore.StoreSymbolDirectory(taskID, symbolsDir)`. - - Walks `symbolsDir` to list symbol files. If there are more than 2,500 symbols, downsamples to 10% for this first pass (random sample, sorted deterministically afterward). - - Streams symbols in fixed‑size batches of 2,500 files: - - Each batch loads files, calls `p2p.Client.StoreBatch(...)` with a 5‑minute timeout, and deletes successfully uploaded files. - - Marks “first batch stored” for this action: `rqStore.UpdateIsFirstBatchStored(actionID)`. - - Returns `(symRatePct, symCount, symRequests)`. - -3) Aggregation and return: - - Computes item‑weighted aggregate success rate across metadata and symbols: `aggRate = (metaRate*metaCount + symRate*symCount) / (metaCount + symCount)`. - - Total requests = `metaRequests + symRequests`. - - Returns `StoreArtefactsMetrics` with all fields populated. - -Notes: -- This adaptor only performs a first pass of symbol storage. For large directories it may downsample; the background worker completes the remaining symbols later (see Background Worker section). - -## P2P Client and DHT: StoreBatch - -`p2p.Client.StoreBatch` proxies to `DHT.StoreBatch`: - -- Local persist first: `store.StoreBatch(ctx, values, typ, true)` ensures local DB/storage contains the items. -- Network store: `DHT.IterateBatchStore(ctx, values, typ, taskID)`: - - For each value, compute its Blake3 hash; compute the top‑K closest nodes from the routing table. - - Build a node→items map and invoke `batchStoreNetwork(...)` with bounded concurrency (a goroutine per node, limited via a semaphore; all joined before returning). - - Tally per‑node RPC attempts (requests) and successes to compute `successRatePct`. - - If the measured rate is below `minimumDataStoreSuccessRate` (75%), return an error along with `(ratePct, requests)`. - - Otherwise, return `(ratePct, requests, nil)`. - -Important distinctions: -- `requests` is the number of per‑node RPCs attempted; it is not the number of items in the batch. -- Success rate is based on successful node acknowledgements divided by `requests`. - -## Metrics & Events - -Returned metrics (from `StoreArtefacts`): - -- Metadata: `MetaRate` (%), `MetaRequests`, `MetaCount`. -- Symbols: `SymRate` (%), `SymRequests`, `SymCount`. -- Aggregate: `AggregatedRate` (item‑weighted), `TotalRequests`. - -`Register` logs and emits a single event line summarizing these metrics via `emitArtefactsStored(...)`, then proceeds to finalize the action on chain. - -## Background Worker (Symbols Continuation) - -Started in DHT `run()` when P2P service starts: - -- Function: `p2p/kademlia/rq_symbols.go::startStoreSymbolsWorker` -- Every 30 seconds: - - Queries `rq_symbols_dir` for rows where `is_first_batch_stored = TRUE` and `is_completed = FALSE`. - - For each directory, scans and stores ALL remaining symbols (no sampling) in 1,000‑file batches using the same `StoreBatch` API. - - Deletes files after successful upload. - - Marks the directory as completed: `rqstore.SetIsCompleted(txid)`. - -Effectively, the API call performs a first pass, and the background worker ensures eventual completion. - -## Storage Bookkeeping (SQLite) - -Table: `rq_symbols_dir` - -- Columns: - - `txid TEXT PRIMARY KEY` — action/task identifier. - - `dir TEXT NOT NULL` — filesystem path to the symbols directory. - - `is_first_batch_stored BOOLEAN NOT NULL DEFAULT FALSE` — set true after first pass completes. - - `is_completed BOOLEAN NOT NULL DEFAULT FALSE` — set true after the background worker completes. - - `created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP`. - -APIs: -- `StoreSymbolDirectory(txid, dir)` — insert entry when first pass starts. -- `UpdateIsFirstBatchStored(txid)` — mark first pass completion. -- `GetToDoStoreSymbolDirs()` — list txids/dirs awaiting background completion. -- `SetIsCompleted(txid)` — mark directory as fully processed. - -## Timeouts, Limits, and Knobs - -- First‑pass symbol batches: 2,500 items; per‑batch timeout: 5 minutes. -- Sampling threshold: if symbol count > 2,500, downsample to 10% for first pass. -- DHT minimum success rate: 75% — batch returns error if not met. -- Background worker batch size: 1,000; runs every 30 seconds; no sampling. - -These values can be tuned in: -- `supernode/services/cascade/adaptors/p2p.go` (batching, sampling for first pass). -- `p2p/kademlia/rq_symbols.go` (background worker interval and batch size). -- `p2p/kademlia/dht.go` (minimum success rate, internal concurrencies). - -## Error Handling & Return Semantics - -- If finalize simulation fails: Register returns an error before any storage. -- If metadata store fails: `StoreArtefacts` returns error; Register wraps and returns. -- If symbol first pass fails: same; background worker does not start because `is_first_batch_stored` is not set. -- If the network success rate is below the threshold: DHT returns an error; adaptor propagates it. -- File I/O errors (load/delete) abort the corresponding batch with a wrapped error. - -## Concurrency Model - -- Within `StoreArtefacts` → `DHT.StoreBatch`, network calls are concurrent (goroutines per node) but **joined before return**. There is no detached goroutine in the first pass. -- The only long‑running background activity is the P2P‑level worker (`startStoreSymbolsWorker`) launched when the P2P service starts, not by the API call itself. - -## Cleanup Behavior - -- First pass deletes uploaded symbol files per batch (`utils.DeleteSymbols`) after a successful store batch. -- Background worker also deletes files after each batch store. -- The uploaded raw input file is removed by `Register` in a `defer` block regardless of outcome. - diff --git a/docs/p2p-metrics-capture.md b/docs/p2p-metrics-capture.md deleted file mode 100644 index 6cbafebf..00000000 --- a/docs/p2p-metrics-capture.md +++ /dev/null @@ -1,186 +0,0 @@ -# P2P Metrics Capture — What Each Field Means and Where It’s Collected - -This guide explains every field we emit in Cascade events, how it is measured, and exactly where it is captured in the code. - -The design is minimal by intent: -- Metrics are collected only for the first pass of Register (store) and for the active Download operation. -- P2P APIs return errors only; per‑RPC details are captured via a small metrics package (`pkg/p2pmetrics`). -- No aggregation; we only group raw RPC attempts by IP. - ---- - -## Store (Register) Event - -Event payload shape - -```json -{ - "store": { - "duration_ms": 9876, - "symbols_first_pass": 220, - "symbols_total": 1200, - "id_files_count": 14, - "success_rate_pct": 82.5, - "calls_by_ip": { - "10.0.0.5": [ - {"ip": "10.0.0.5", "address": "A:4445", "keys": 100, "success": true, "duration_ms": 120}, - {"ip": "10.0.0.5", "address": "A:4445", "keys": 120, "success": false, "error": "timeout", "duration_ms": 300} - ] - } - } -} -``` - -### Fields - -- `store.duration_ms` - - Meaning: End‑to‑end elapsed time of the first‑pass store phase (Register’s storage section only). - - Where captured: `supernode/services/cascade/adaptors/p2p.go` - - A `time.Now()` timestamp is taken just before the first‑pass store function and measured on return. - -- `store.symbols_first_pass` - - Meaning: Number of symbols sent during the Register first pass (across the combined first batch and any immediate first‑pass symbol batches). - - Where captured: `supernode/services/cascade/adaptors/p2p.go` via `p2pmetrics.SetStoreSummary(...)` using the value returned by `storeCascadeSymbolsAndData`. - -- `store.symbols_total` - - Meaning: Total symbols available in the symbol directory (before sampling). Used to contextualize the first‑pass coverage. - - Where captured: Computed in `storeCascadeSymbolsAndData` and included in `SetStoreSummary`. - -- `store.id_files_count` - - Meaning: Number of redundant metadata files (ID files) sent in the first combined batch. - - Where captured: `len(req.IDFiles)` in `StoreArtefacts`, passed to `SetStoreSummary`. - -- `store.calls_by_ip` - - Meaning: All raw network store RPC attempts grouped by the node IP. - - Each array entry is a single RPC attempt with: - - `ip` — Node IP (fallback to `address` if missing). - - `address` — Node string `IP:port`. - - `keys` — Number of items in that RPC attempt (metadata + first symbols for the first combined batch, symbols for subsequent batches within the first pass). - - `success` — True if there was no transport error and no error message returned by the node response. Note: this flag does not explicitly check the `ResultOk` status; in rare cases, a non‑OK response with an empty error message may appear as `success` in metrics. (Internal success‑rate enforcement still uses explicit response status.) - - `error` — Any error string captured; omitted when success. - - `duration_ms` — RPC duration in milliseconds. - - `noop` — Present and `true` when no store payload was sent to the node (empty batch for that node). Such entries are recorded as `success=true`, `keys=0`, with no `error`. - - Where captured: - - Emission point (P2P): `p2p/kademlia/dht.go::IterateBatchStore(...)` - - After each node RPC returns, we call `p2pmetrics.RecordStore(taskID, Call{...})`. For nodes with no payload, a `noop: true` entry is emitted without sending a wire RPC. - - `taskID` is read from the context via `p2pmetrics.TaskIDFromContext(ctx)`. - - Grouping: `pkg/p2pmetrics/metrics.go` - - `StartStoreCapture(taskID)` enables capture; `StopStoreCapture(taskID)` disables it. - - Calls are grouped by `ip` (fallback to `address`) without further aggregation. - -- `store.success_rate_pct` - - Meaning: First‑pass store success rate computed from captured per‑RPC outcomes: successful responses divided by total recorded store RPC attempts, expressed as a percentage. - - Where captured: Computed in `pkg/p2pmetrics/metrics.go::BuildStoreEventPayloadFromCollector` from `calls_by_ip` data. - -### First‑Pass Success Threshold - -- Internal enforcement only: if DHT first‑pass success rate is below 75%, `IterateBatchStore` returns an error. -- We also emit `store.success_rate_pct` for analytics; the threshold only affects control flow (errors), not the emitted metric. -- Code: `p2p/kademlia/dht.go::IterateBatchStore`. - -### Scope Limits - -- Background worker (which continues storing remaining symbols) is NOT captured — we don’t set a metrics task ID on those paths. - ---- - -## Download Event - -Event payload shape - -```json -{ - "retrieve": { - "found_local": 42, - "retrieve_ms": 2000, - "decode_ms": 8000, - "calls_by_ip": { - "10.0.0.7": [ - {"ip": "10.0.0.7", "address": "B:4445", "keys": 13, "success": true, "duration_ms": 90} - ] - } - } -} -``` - -### Fields - -- `retrieve.found_local` - - Meaning: Number of items retrieved from local storage before any network calls. - - Where captured: `p2p/kademlia/dht.go::BatchRetrieve(...)` - - After `fetchAndAddLocalKeys`, we call `p2pmetrics.ReportFoundLocal(taskID, int(foundLocalCount))`. - - `taskID` is read from context with `p2pmetrics.TaskIDFromContext(ctx)`. - -- `retrieve.retrieve_ms` - - Meaning: Time spent in network batch‑retrieve. - - Where captured: `supernode/services/cascade/download.go` - - Timestamp before `BatchRetrieve`, measured after it returns. - -- `retrieve.decode_ms` - - Meaning: Time spent decoding symbols and reconstructing the file. - - Where captured: `supernode/services/cascade/download.go` - - Timestamp before decode, measured after it returns. - -- `retrieve.calls_by_ip` - - Meaning: All raw per‑RPC retrieve attempts grouped by node IP. - - Each array entry is a single RPC attempt with: - - `ip`, `address` — Identifiers as available. - - `keys` — Number of symbols returned by that node in that call. - - `success` — True if the RPC completed without error (even if `keys == 0`). Transport/status errors remain `success=false` with an `error` message. - - `error` — Error string when the RPC failed; omitted otherwise. - - `duration_ms` — RPC duration in milliseconds. - - `noop` — Present and `true` when no network request was actually sent to the node (e.g., all requested keys were already satisfied or deduped before issuing the call). Such entries are recorded as `success=true`, `keys=0`, with no `error`. - - Where captured: - - Emission point (P2P): `p2p/kademlia/dht.go::iterateBatchGetValues(...)` - - Each node attempt records a `p2pmetrics.RecordRetrieve(taskID, Call{...})`. For attempts where no network RPC is sent, a `noop: true` entry is emitted. - - `taskID` is extracted from context using `p2pmetrics.TaskIDFromContext(ctx)`. - - Grouping: `pkg/p2pmetrics/metrics.go` (same grouping/fallback as store). - -### Scope Limits - -- Metrics are captured only for the active Download call (context is tagged in `download.go`). - ---- - -## Context Tagging (Task ID) - -- We use an explicit, metrics‑only context key defined in `pkg/p2pmetrics` to tag P2P calls with a task ID. - - Setters: `p2pmetrics.WithTaskID(ctx, id)`. - - Getters: `p2pmetrics.TaskIDFromContext(ctx)`. -- Where it is set: - - Store (first pass): `supernode/services/cascade/adaptors/p2p.go` wraps `StoreBatch` calls. - - Download: `supernode/services/cascade/download.go` wraps `BatchRetrieve` call. - ---- - -## Building and Emitting Events - -- Store - - `supernode/services/cascade/helper.go::emitArtefactsStored(...)` - - Builds `store` payload via `p2pmetrics.BuildStoreEventPayloadFromCollector(taskID)`. - - Includes `success_rate_pct` (first‑pass store success rate computed from captured per‑RPC outcomes) in addition to the minimal fields. - - Emits the event. - -- Download - - `supernode/services/cascade/download.go` - - Builds `retrieve` payload via `p2pmetrics.BuildDownloadEventPayloadFromCollector(actionID)`. - - Emits the event. - ---- - -## Quick File Map - -- Capture + grouping: `supernode/pkg/p2pmetrics/metrics.go` -- Store adaptor: `supernode/supernode/services/cascade/adaptors/p2p.go` -- Store event: `supernode/supernode/services/cascade/helper.go` -- Download flow: `supernode/supernode/services/cascade/download.go` -- DHT store calls: `supernode/p2p/kademlia/dht.go::IterateBatchStore` -- DHT retrieve calls: `supernode/p2p/kademlia/dht.go::BatchRetrieve` and `iterateBatchGetValues` - ---- - -## Notes - -- No P2P stats/snapshots are used to build events. -- No aggregation is performed; we only group raw RPC attempts by IP. -- First‑pass success rate is enforced internally (75% threshold) but not emitted as a metric. diff --git a/gen/dupedetection/dd-server.pb.go b/gen/dupedetection/dd-server.pb.go deleted file mode 100644 index 69e63a6d..00000000 --- a/gen/dupedetection/dd-server.pb.go +++ /dev/null @@ -1,1263 +0,0 @@ -// Copyright (c) 2021-2021 The Pastel Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.35.1 -// protoc v3.12.4 -// source: dd-server.proto - -package dupedetection - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type RarenessScoreRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ImageFilepath string `protobuf:"bytes,1,opt,name=image_filepath,json=imageFilepath,proto3" json:"image_filepath,omitempty"` - PastelBlockHashWhenRequestSubmitted string `protobuf:"bytes,2,opt,name=pastel_block_hash_when_request_submitted,json=pastelBlockHashWhenRequestSubmitted,proto3" json:"pastel_block_hash_when_request_submitted,omitempty"` - PastelBlockHeightWhenRequestSubmitted string `protobuf:"bytes,3,opt,name=pastel_block_height_when_request_submitted,json=pastelBlockHeightWhenRequestSubmitted,proto3" json:"pastel_block_height_when_request_submitted,omitempty"` - UtcTimestampWhenRequestSubmitted string `protobuf:"bytes,4,opt,name=utc_timestamp_when_request_submitted,json=utcTimestampWhenRequestSubmitted,proto3" json:"utc_timestamp_when_request_submitted,omitempty"` - PastelIdOfSubmitter string `protobuf:"bytes,5,opt,name=pastel_id_of_submitter,json=pastelIdOfSubmitter,proto3" json:"pastel_id_of_submitter,omitempty"` - PastelIdOfRegisteringSupernode_1 string `protobuf:"bytes,6,opt,name=pastel_id_of_registering_supernode_1,json=pastelIdOfRegisteringSupernode1,proto3" json:"pastel_id_of_registering_supernode_1,omitempty"` - PastelIdOfRegisteringSupernode_2 string `protobuf:"bytes,7,opt,name=pastel_id_of_registering_supernode_2,json=pastelIdOfRegisteringSupernode2,proto3" json:"pastel_id_of_registering_supernode_2,omitempty"` - PastelIdOfRegisteringSupernode_3 string `protobuf:"bytes,8,opt,name=pastel_id_of_registering_supernode_3,json=pastelIdOfRegisteringSupernode3,proto3" json:"pastel_id_of_registering_supernode_3,omitempty"` - IsPastelOpenapiRequest bool `protobuf:"varint,9,opt,name=is_pastel_openapi_request,json=isPastelOpenapiRequest,proto3" json:"is_pastel_openapi_request,omitempty"` - OpenApiGroupIdString string `protobuf:"bytes,10,opt,name=open_api_group_id_string,json=openApiGroupIdString,proto3" json:"open_api_group_id_string,omitempty"` - CollectionNameString string `protobuf:"bytes,11,opt,name=collection_name_string,json=collectionNameString,proto3" json:"collection_name_string,omitempty"` -} - -func (x *RarenessScoreRequest) Reset() { - *x = RarenessScoreRequest{} - mi := &file_dd_server_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RarenessScoreRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RarenessScoreRequest) ProtoMessage() {} - -func (x *RarenessScoreRequest) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RarenessScoreRequest.ProtoReflect.Descriptor instead. -func (*RarenessScoreRequest) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{0} -} - -func (x *RarenessScoreRequest) GetImageFilepath() string { - if x != nil { - return x.ImageFilepath - } - return "" -} - -func (x *RarenessScoreRequest) GetPastelBlockHashWhenRequestSubmitted() string { - if x != nil { - return x.PastelBlockHashWhenRequestSubmitted - } - return "" -} - -func (x *RarenessScoreRequest) GetPastelBlockHeightWhenRequestSubmitted() string { - if x != nil { - return x.PastelBlockHeightWhenRequestSubmitted - } - return "" -} - -func (x *RarenessScoreRequest) GetUtcTimestampWhenRequestSubmitted() string { - if x != nil { - return x.UtcTimestampWhenRequestSubmitted - } - return "" -} - -func (x *RarenessScoreRequest) GetPastelIdOfSubmitter() string { - if x != nil { - return x.PastelIdOfSubmitter - } - return "" -} - -func (x *RarenessScoreRequest) GetPastelIdOfRegisteringSupernode_1() string { - if x != nil { - return x.PastelIdOfRegisteringSupernode_1 - } - return "" -} - -func (x *RarenessScoreRequest) GetPastelIdOfRegisteringSupernode_2() string { - if x != nil { - return x.PastelIdOfRegisteringSupernode_2 - } - return "" -} - -func (x *RarenessScoreRequest) GetPastelIdOfRegisteringSupernode_3() string { - if x != nil { - return x.PastelIdOfRegisteringSupernode_3 - } - return "" -} - -func (x *RarenessScoreRequest) GetIsPastelOpenapiRequest() bool { - if x != nil { - return x.IsPastelOpenapiRequest - } - return false -} - -func (x *RarenessScoreRequest) GetOpenApiGroupIdString() string { - if x != nil { - return x.OpenApiGroupIdString - } - return "" -} - -func (x *RarenessScoreRequest) GetCollectionNameString() string { - if x != nil { - return x.CollectionNameString - } - return "" -} - -type ImageRarenessScoreReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PastelBlockHashWhenRequestSubmitted string `protobuf:"bytes,1,opt,name=pastel_block_hash_when_request_submitted,json=pastelBlockHashWhenRequestSubmitted,proto3" json:"pastel_block_hash_when_request_submitted,omitempty"` - PastelBlockHeightWhenRequestSubmitted string `protobuf:"bytes,2,opt,name=pastel_block_height_when_request_submitted,json=pastelBlockHeightWhenRequestSubmitted,proto3" json:"pastel_block_height_when_request_submitted,omitempty"` - UtcTimestampWhenRequestSubmitted string `protobuf:"bytes,3,opt,name=utc_timestamp_when_request_submitted,json=utcTimestampWhenRequestSubmitted,proto3" json:"utc_timestamp_when_request_submitted,omitempty"` - PastelIdOfSubmitter string `protobuf:"bytes,4,opt,name=pastel_id_of_submitter,json=pastelIdOfSubmitter,proto3" json:"pastel_id_of_submitter,omitempty"` - PastelIdOfRegisteringSupernode_1 string `protobuf:"bytes,5,opt,name=pastel_id_of_registering_supernode_1,json=pastelIdOfRegisteringSupernode1,proto3" json:"pastel_id_of_registering_supernode_1,omitempty"` - PastelIdOfRegisteringSupernode_2 string `protobuf:"bytes,6,opt,name=pastel_id_of_registering_supernode_2,json=pastelIdOfRegisteringSupernode2,proto3" json:"pastel_id_of_registering_supernode_2,omitempty"` - PastelIdOfRegisteringSupernode_3 string `protobuf:"bytes,7,opt,name=pastel_id_of_registering_supernode_3,json=pastelIdOfRegisteringSupernode3,proto3" json:"pastel_id_of_registering_supernode_3,omitempty"` - IsPastelOpenapiRequest bool `protobuf:"varint,8,opt,name=is_pastel_openapi_request,json=isPastelOpenapiRequest,proto3" json:"is_pastel_openapi_request,omitempty"` - ImageFilePath string `protobuf:"bytes,9,opt,name=image_file_path,json=imageFilePath,proto3" json:"image_file_path,omitempty"` - DupeDetectionSystemVersion string `protobuf:"bytes,10,opt,name=dupe_detection_system_version,json=dupeDetectionSystemVersion,proto3" json:"dupe_detection_system_version,omitempty"` - IsLikelyDupe bool `protobuf:"varint,11,opt,name=is_likely_dupe,json=isLikelyDupe,proto3" json:"is_likely_dupe,omitempty"` - IsRareOnInternet bool `protobuf:"varint,12,opt,name=is_rare_on_internet,json=isRareOnInternet,proto3" json:"is_rare_on_internet,omitempty"` - OverallRarenessScore float32 `protobuf:"fixed32,13,opt,name=overall_rareness_score,json=overallRarenessScore,proto3" json:"overall_rareness_score,omitempty"` - PctOfTop_10MostSimilarWithDupeProbAbove_25Pct float32 `protobuf:"fixed32,14,opt,name=pct_of_top_10_most_similar_with_dupe_prob_above_25pct,json=pctOfTop10MostSimilarWithDupeProbAbove25pct,proto3" json:"pct_of_top_10_most_similar_with_dupe_prob_above_25pct,omitempty"` - PctOfTop_10MostSimilarWithDupeProbAbove_33Pct float32 `protobuf:"fixed32,15,opt,name=pct_of_top_10_most_similar_with_dupe_prob_above_33pct,json=pctOfTop10MostSimilarWithDupeProbAbove33pct,proto3" json:"pct_of_top_10_most_similar_with_dupe_prob_above_33pct,omitempty"` - PctOfTop_10MostSimilarWithDupeProbAbove_50Pct float32 `protobuf:"fixed32,16,opt,name=pct_of_top_10_most_similar_with_dupe_prob_above_50pct,json=pctOfTop10MostSimilarWithDupeProbAbove50pct,proto3" json:"pct_of_top_10_most_similar_with_dupe_prob_above_50pct,omitempty"` - RarenessScoresTableJsonCompressedB64 string `protobuf:"bytes,17,opt,name=rareness_scores_table_json_compressed_b64,json=rarenessScoresTableJsonCompressedB64,proto3" json:"rareness_scores_table_json_compressed_b64,omitempty"` - InternetRareness *InternetRareness `protobuf:"bytes,18,opt,name=internet_rareness,json=internetRareness,proto3" json:"internet_rareness,omitempty"` - OpenNsfwScore float32 `protobuf:"fixed32,19,opt,name=open_nsfw_score,json=openNsfwScore,proto3" json:"open_nsfw_score,omitempty"` - AlternativeNsfwScores *AltNsfwScores `protobuf:"bytes,20,opt,name=alternative_nsfw_scores,json=alternativeNsfwScores,proto3" json:"alternative_nsfw_scores,omitempty"` - ImageFingerprintOfCandidateImageFile []float64 `protobuf:"fixed64,21,rep,packed,name=image_fingerprint_of_candidate_image_file,json=imageFingerprintOfCandidateImageFile,proto3" json:"image_fingerprint_of_candidate_image_file,omitempty"` - CollectionNameString string `protobuf:"bytes,22,opt,name=collection_name_string,json=collectionNameString,proto3" json:"collection_name_string,omitempty"` - HashOfCandidateImageFile string `protobuf:"bytes,23,opt,name=hash_of_candidate_image_file,json=hashOfCandidateImageFile,proto3" json:"hash_of_candidate_image_file,omitempty"` - OpenApiGroupIdString string `protobuf:"bytes,24,opt,name=open_api_group_id_string,json=openApiGroupIdString,proto3" json:"open_api_group_id_string,omitempty"` - GroupRarenessScore float32 `protobuf:"fixed32,25,opt,name=group_rareness_score,json=groupRarenessScore,proto3" json:"group_rareness_score,omitempty"` - CandidateImageThumbnailWebpAsBase64String string `protobuf:"bytes,26,opt,name=candidate_image_thumbnail_webp_as_base64_string,json=candidateImageThumbnailWebpAsBase64String,proto3" json:"candidate_image_thumbnail_webp_as_base64_string,omitempty"` - DoesNotImpactTheFollowingCollectionStrings string `protobuf:"bytes,27,opt,name=does_not_impact_the_following_collection_strings,json=doesNotImpactTheFollowingCollectionStrings,proto3" json:"does_not_impact_the_following_collection_strings,omitempty"` - IsInvalidSenseRequest bool `protobuf:"varint,28,opt,name=is_invalid_sense_request,json=isInvalidSenseRequest,proto3" json:"is_invalid_sense_request,omitempty"` - InvalidSenseRequestReason string `protobuf:"bytes,29,opt,name=invalid_sense_request_reason,json=invalidSenseRequestReason,proto3" json:"invalid_sense_request_reason,omitempty"` - SimilarityScoreToFirstEntryInCollection float32 `protobuf:"fixed32,30,opt,name=similarity_score_to_first_entry_in_collection,json=similarityScoreToFirstEntryInCollection,proto3" json:"similarity_score_to_first_entry_in_collection,omitempty"` - CpProbability float32 `protobuf:"fixed32,31,opt,name=cp_probability,json=cpProbability,proto3" json:"cp_probability,omitempty"` - ChildProbability float32 `protobuf:"fixed32,32,opt,name=child_probability,json=childProbability,proto3" json:"child_probability,omitempty"` - ImageFingerprintSetChecksum string `protobuf:"bytes,33,opt,name=image_fingerprint_set_checksum,json=imageFingerprintSetChecksum,proto3" json:"image_fingerprint_set_checksum,omitempty"` -} - -func (x *ImageRarenessScoreReply) Reset() { - *x = ImageRarenessScoreReply{} - mi := &file_dd_server_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ImageRarenessScoreReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ImageRarenessScoreReply) ProtoMessage() {} - -func (x *ImageRarenessScoreReply) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ImageRarenessScoreReply.ProtoReflect.Descriptor instead. -func (*ImageRarenessScoreReply) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{1} -} - -func (x *ImageRarenessScoreReply) GetPastelBlockHashWhenRequestSubmitted() string { - if x != nil { - return x.PastelBlockHashWhenRequestSubmitted - } - return "" -} - -func (x *ImageRarenessScoreReply) GetPastelBlockHeightWhenRequestSubmitted() string { - if x != nil { - return x.PastelBlockHeightWhenRequestSubmitted - } - return "" -} - -func (x *ImageRarenessScoreReply) GetUtcTimestampWhenRequestSubmitted() string { - if x != nil { - return x.UtcTimestampWhenRequestSubmitted - } - return "" -} - -func (x *ImageRarenessScoreReply) GetPastelIdOfSubmitter() string { - if x != nil { - return x.PastelIdOfSubmitter - } - return "" -} - -func (x *ImageRarenessScoreReply) GetPastelIdOfRegisteringSupernode_1() string { - if x != nil { - return x.PastelIdOfRegisteringSupernode_1 - } - return "" -} - -func (x *ImageRarenessScoreReply) GetPastelIdOfRegisteringSupernode_2() string { - if x != nil { - return x.PastelIdOfRegisteringSupernode_2 - } - return "" -} - -func (x *ImageRarenessScoreReply) GetPastelIdOfRegisteringSupernode_3() string { - if x != nil { - return x.PastelIdOfRegisteringSupernode_3 - } - return "" -} - -func (x *ImageRarenessScoreReply) GetIsPastelOpenapiRequest() bool { - if x != nil { - return x.IsPastelOpenapiRequest - } - return false -} - -func (x *ImageRarenessScoreReply) GetImageFilePath() string { - if x != nil { - return x.ImageFilePath - } - return "" -} - -func (x *ImageRarenessScoreReply) GetDupeDetectionSystemVersion() string { - if x != nil { - return x.DupeDetectionSystemVersion - } - return "" -} - -func (x *ImageRarenessScoreReply) GetIsLikelyDupe() bool { - if x != nil { - return x.IsLikelyDupe - } - return false -} - -func (x *ImageRarenessScoreReply) GetIsRareOnInternet() bool { - if x != nil { - return x.IsRareOnInternet - } - return false -} - -func (x *ImageRarenessScoreReply) GetOverallRarenessScore() float32 { - if x != nil { - return x.OverallRarenessScore - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetPctOfTop_10MostSimilarWithDupeProbAbove_25Pct() float32 { - if x != nil { - return x.PctOfTop_10MostSimilarWithDupeProbAbove_25Pct - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetPctOfTop_10MostSimilarWithDupeProbAbove_33Pct() float32 { - if x != nil { - return x.PctOfTop_10MostSimilarWithDupeProbAbove_33Pct - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetPctOfTop_10MostSimilarWithDupeProbAbove_50Pct() float32 { - if x != nil { - return x.PctOfTop_10MostSimilarWithDupeProbAbove_50Pct - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetRarenessScoresTableJsonCompressedB64() string { - if x != nil { - return x.RarenessScoresTableJsonCompressedB64 - } - return "" -} - -func (x *ImageRarenessScoreReply) GetInternetRareness() *InternetRareness { - if x != nil { - return x.InternetRareness - } - return nil -} - -func (x *ImageRarenessScoreReply) GetOpenNsfwScore() float32 { - if x != nil { - return x.OpenNsfwScore - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetAlternativeNsfwScores() *AltNsfwScores { - if x != nil { - return x.AlternativeNsfwScores - } - return nil -} - -func (x *ImageRarenessScoreReply) GetImageFingerprintOfCandidateImageFile() []float64 { - if x != nil { - return x.ImageFingerprintOfCandidateImageFile - } - return nil -} - -func (x *ImageRarenessScoreReply) GetCollectionNameString() string { - if x != nil { - return x.CollectionNameString - } - return "" -} - -func (x *ImageRarenessScoreReply) GetHashOfCandidateImageFile() string { - if x != nil { - return x.HashOfCandidateImageFile - } - return "" -} - -func (x *ImageRarenessScoreReply) GetOpenApiGroupIdString() string { - if x != nil { - return x.OpenApiGroupIdString - } - return "" -} - -func (x *ImageRarenessScoreReply) GetGroupRarenessScore() float32 { - if x != nil { - return x.GroupRarenessScore - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetCandidateImageThumbnailWebpAsBase64String() string { - if x != nil { - return x.CandidateImageThumbnailWebpAsBase64String - } - return "" -} - -func (x *ImageRarenessScoreReply) GetDoesNotImpactTheFollowingCollectionStrings() string { - if x != nil { - return x.DoesNotImpactTheFollowingCollectionStrings - } - return "" -} - -func (x *ImageRarenessScoreReply) GetIsInvalidSenseRequest() bool { - if x != nil { - return x.IsInvalidSenseRequest - } - return false -} - -func (x *ImageRarenessScoreReply) GetInvalidSenseRequestReason() string { - if x != nil { - return x.InvalidSenseRequestReason - } - return "" -} - -func (x *ImageRarenessScoreReply) GetSimilarityScoreToFirstEntryInCollection() float32 { - if x != nil { - return x.SimilarityScoreToFirstEntryInCollection - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetCpProbability() float32 { - if x != nil { - return x.CpProbability - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetChildProbability() float32 { - if x != nil { - return x.ChildProbability - } - return 0 -} - -func (x *ImageRarenessScoreReply) GetImageFingerprintSetChecksum() string { - if x != nil { - return x.ImageFingerprintSetChecksum - } - return "" -} - -type InternetRareness struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RareOnInternetSummaryTableAsJsonCompressedB64 string `protobuf:"bytes,1,opt,name=rare_on_internet_summary_table_as_json_compressed_b64,json=rareOnInternetSummaryTableAsJsonCompressedB64,proto3" json:"rare_on_internet_summary_table_as_json_compressed_b64,omitempty"` - RareOnInternetGraphJsonCompressedB64 string `protobuf:"bytes,2,opt,name=rare_on_internet_graph_json_compressed_b64,json=rareOnInternetGraphJsonCompressedB64,proto3" json:"rare_on_internet_graph_json_compressed_b64,omitempty"` - AlternativeRareOnInternetDictAsJsonCompressedB64 string `protobuf:"bytes,3,opt,name=alternative_rare_on_internet_dict_as_json_compressed_b64,json=alternativeRareOnInternetDictAsJsonCompressedB64,proto3" json:"alternative_rare_on_internet_dict_as_json_compressed_b64,omitempty"` - MinNumberOfExactMatchesInPage uint32 `protobuf:"varint,4,opt,name=min_number_of_exact_matches_in_page,json=minNumberOfExactMatchesInPage,proto3" json:"min_number_of_exact_matches_in_page,omitempty"` - EarliestAvailableDateOfInternetResults string `protobuf:"bytes,5,opt,name=earliest_available_date_of_internet_results,json=earliestAvailableDateOfInternetResults,proto3" json:"earliest_available_date_of_internet_results,omitempty"` -} - -func (x *InternetRareness) Reset() { - *x = InternetRareness{} - mi := &file_dd_server_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *InternetRareness) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*InternetRareness) ProtoMessage() {} - -func (x *InternetRareness) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use InternetRareness.ProtoReflect.Descriptor instead. -func (*InternetRareness) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{2} -} - -func (x *InternetRareness) GetRareOnInternetSummaryTableAsJsonCompressedB64() string { - if x != nil { - return x.RareOnInternetSummaryTableAsJsonCompressedB64 - } - return "" -} - -func (x *InternetRareness) GetRareOnInternetGraphJsonCompressedB64() string { - if x != nil { - return x.RareOnInternetGraphJsonCompressedB64 - } - return "" -} - -func (x *InternetRareness) GetAlternativeRareOnInternetDictAsJsonCompressedB64() string { - if x != nil { - return x.AlternativeRareOnInternetDictAsJsonCompressedB64 - } - return "" -} - -func (x *InternetRareness) GetMinNumberOfExactMatchesInPage() uint32 { - if x != nil { - return x.MinNumberOfExactMatchesInPage - } - return 0 -} - -func (x *InternetRareness) GetEarliestAvailableDateOfInternetResults() string { - if x != nil { - return x.EarliestAvailableDateOfInternetResults - } - return "" -} - -type AltNsfwScores struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Drawings float32 `protobuf:"fixed32,1,opt,name=drawings,proto3" json:"drawings,omitempty"` - Hentai float32 `protobuf:"fixed32,2,opt,name=hentai,proto3" json:"hentai,omitempty"` - Neutral float32 `protobuf:"fixed32,3,opt,name=neutral,proto3" json:"neutral,omitempty"` - Porn float32 `protobuf:"fixed32,4,opt,name=porn,proto3" json:"porn,omitempty"` - Sexy float32 `protobuf:"fixed32,5,opt,name=sexy,proto3" json:"sexy,omitempty"` -} - -func (x *AltNsfwScores) Reset() { - *x = AltNsfwScores{} - mi := &file_dd_server_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *AltNsfwScores) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AltNsfwScores) ProtoMessage() {} - -func (x *AltNsfwScores) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AltNsfwScores.ProtoReflect.Descriptor instead. -func (*AltNsfwScores) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{3} -} - -func (x *AltNsfwScores) GetDrawings() float32 { - if x != nil { - return x.Drawings - } - return 0 -} - -func (x *AltNsfwScores) GetHentai() float32 { - if x != nil { - return x.Hentai - } - return 0 -} - -func (x *AltNsfwScores) GetNeutral() float32 { - if x != nil { - return x.Neutral - } - return 0 -} - -func (x *AltNsfwScores) GetPorn() float32 { - if x != nil { - return x.Porn - } - return 0 -} - -func (x *AltNsfwScores) GetSexy() float32 { - if x != nil { - return x.Sexy - } - return 0 -} - -type GetStatusRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *GetStatusRequest) Reset() { - *x = GetStatusRequest{} - mi := &file_dd_server_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetStatusRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetStatusRequest) ProtoMessage() {} - -func (x *GetStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetStatusRequest.ProtoReflect.Descriptor instead. -func (*GetStatusRequest) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{4} -} - -type TaskCount struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - MaxConcurrent int32 `protobuf:"varint,1,opt,name=max_concurrent,json=maxConcurrent,proto3" json:"max_concurrent,omitempty"` - Executing int32 `protobuf:"varint,2,opt,name=executing,proto3" json:"executing,omitempty"` - WaitingInQueue int32 `protobuf:"varint,3,opt,name=waiting_in_queue,json=waitingInQueue,proto3" json:"waiting_in_queue,omitempty"` - Succeeded int32 `protobuf:"varint,4,opt,name=succeeded,proto3" json:"succeeded,omitempty"` - Failed int32 `protobuf:"varint,5,opt,name=failed,proto3" json:"failed,omitempty"` - Cancelled int32 `protobuf:"varint,6,opt,name=cancelled,proto3" json:"cancelled,omitempty"` -} - -func (x *TaskCount) Reset() { - *x = TaskCount{} - mi := &file_dd_server_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TaskCount) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TaskCount) ProtoMessage() {} - -func (x *TaskCount) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TaskCount.ProtoReflect.Descriptor instead. -func (*TaskCount) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{5} -} - -func (x *TaskCount) GetMaxConcurrent() int32 { - if x != nil { - return x.MaxConcurrent - } - return 0 -} - -func (x *TaskCount) GetExecuting() int32 { - if x != nil { - return x.Executing - } - return 0 -} - -func (x *TaskCount) GetWaitingInQueue() int32 { - if x != nil { - return x.WaitingInQueue - } - return 0 -} - -func (x *TaskCount) GetSucceeded() int32 { - if x != nil { - return x.Succeeded - } - return 0 -} - -func (x *TaskCount) GetFailed() int32 { - if x != nil { - return x.Failed - } - return 0 -} - -func (x *TaskCount) GetCancelled() int32 { - if x != nil { - return x.Cancelled - } - return 0 -} - -type TaskMetrics struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AverageTaskWaitTimeSecs float32 `protobuf:"fixed32,1,opt,name=average_task_wait_time_secs,json=averageTaskWaitTimeSecs,proto3" json:"average_task_wait_time_secs,omitempty"` - MaxTaskWaitTimeSecs float32 `protobuf:"fixed32,2,opt,name=max_task_wait_time_secs,json=maxTaskWaitTimeSecs,proto3" json:"max_task_wait_time_secs,omitempty"` - AverageTaskExecutionTimeSecs float32 `protobuf:"fixed32,3,opt,name=average_task_execution_time_secs,json=averageTaskExecutionTimeSecs,proto3" json:"average_task_execution_time_secs,omitempty"` - AverageTaskVirtualMemoryUsageBytes int64 `protobuf:"varint,4,opt,name=average_task_virtual_memory_usage_bytes,json=averageTaskVirtualMemoryUsageBytes,proto3" json:"average_task_virtual_memory_usage_bytes,omitempty"` - AverageTaskRssMemoryUsageBytes int64 `protobuf:"varint,5,opt,name=average_task_rss_memory_usage_bytes,json=averageTaskRssMemoryUsageBytes,proto3" json:"average_task_rss_memory_usage_bytes,omitempty"` - PeakTaskRssMemoryUsageBytes int64 `protobuf:"varint,6,opt,name=peak_task_rss_memory_usage_bytes,json=peakTaskRssMemoryUsageBytes,proto3" json:"peak_task_rss_memory_usage_bytes,omitempty"` - PeakTaskVmsMemoryUsageBytes int64 `protobuf:"varint,7,opt,name=peak_task_vms_memory_usage_bytes,json=peakTaskVmsMemoryUsageBytes,proto3" json:"peak_task_vms_memory_usage_bytes,omitempty"` -} - -func (x *TaskMetrics) Reset() { - *x = TaskMetrics{} - mi := &file_dd_server_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *TaskMetrics) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TaskMetrics) ProtoMessage() {} - -func (x *TaskMetrics) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[6] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TaskMetrics.ProtoReflect.Descriptor instead. -func (*TaskMetrics) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{6} -} - -func (x *TaskMetrics) GetAverageTaskWaitTimeSecs() float32 { - if x != nil { - return x.AverageTaskWaitTimeSecs - } - return 0 -} - -func (x *TaskMetrics) GetMaxTaskWaitTimeSecs() float32 { - if x != nil { - return x.MaxTaskWaitTimeSecs - } - return 0 -} - -func (x *TaskMetrics) GetAverageTaskExecutionTimeSecs() float32 { - if x != nil { - return x.AverageTaskExecutionTimeSecs - } - return 0 -} - -func (x *TaskMetrics) GetAverageTaskVirtualMemoryUsageBytes() int64 { - if x != nil { - return x.AverageTaskVirtualMemoryUsageBytes - } - return 0 -} - -func (x *TaskMetrics) GetAverageTaskRssMemoryUsageBytes() int64 { - if x != nil { - return x.AverageTaskRssMemoryUsageBytes - } - return 0 -} - -func (x *TaskMetrics) GetPeakTaskRssMemoryUsageBytes() int64 { - if x != nil { - return x.PeakTaskRssMemoryUsageBytes - } - return 0 -} - -func (x *TaskMetrics) GetPeakTaskVmsMemoryUsageBytes() int64 { - if x != nil { - return x.PeakTaskVmsMemoryUsageBytes - } - return 0 -} - -type GetStatusResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - TaskCount *TaskCount `protobuf:"bytes,2,opt,name=task_count,json=taskCount,proto3" json:"task_count,omitempty"` - TaskMetrics *TaskMetrics `protobuf:"bytes,3,opt,name=task_metrics,json=taskMetrics,proto3" json:"task_metrics,omitempty"` -} - -func (x *GetStatusResponse) Reset() { - *x = GetStatusResponse{} - mi := &file_dd_server_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetStatusResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetStatusResponse) ProtoMessage() {} - -func (x *GetStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_dd_server_proto_msgTypes[7] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetStatusResponse.ProtoReflect.Descriptor instead. -func (*GetStatusResponse) Descriptor() ([]byte, []int) { - return file_dd_server_proto_rawDescGZIP(), []int{7} -} - -func (x *GetStatusResponse) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -func (x *GetStatusResponse) GetTaskCount() *TaskCount { - if x != nil { - return x.TaskCount - } - return nil -} - -func (x *GetStatusResponse) GetTaskMetrics() *TaskMetrics { - if x != nil { - return x.TaskMetrics - } - return nil -} - -var File_dd_server_proto protoreflect.FileDescriptor - -var file_dd_server_proto_rawDesc = []byte{ - 0x0a, 0x0f, 0x64, 0x64, 0x2d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x0d, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x8a, 0x06, 0x0a, 0x14, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x53, 0x63, 0x6f, - 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6d, 0x61, - 0x67, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x70, 0x61, 0x74, 0x68, - 0x12, 0x55, 0x0a, 0x28, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x77, 0x68, 0x65, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x23, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, - 0x61, 0x73, 0x68, 0x57, 0x68, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x75, - 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x12, 0x59, 0x0a, 0x2a, 0x70, 0x61, 0x73, 0x74, 0x65, - 0x6c, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x77, - 0x68, 0x65, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x6d, - 0x69, 0x74, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x25, 0x70, 0x61, 0x73, - 0x74, 0x65, 0x6c, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x57, 0x68, - 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, - 0x65, 0x64, 0x12, 0x4e, 0x0a, 0x24, 0x75, 0x74, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x5f, 0x77, 0x68, 0x65, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x20, 0x75, 0x74, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x57, 0x68, - 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, - 0x65, 0x64, 0x12, 0x33, 0x0a, 0x16, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x69, 0x64, 0x5f, - 0x6f, 0x66, 0x5f, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x13, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, 0x66, 0x53, 0x75, - 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x4d, 0x0a, 0x24, 0x70, 0x61, 0x73, 0x74, 0x65, - 0x6c, 0x5f, 0x69, 0x64, 0x5f, 0x6f, 0x66, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, - 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x31, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, - 0x66, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x65, - 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x31, 0x12, 0x4d, 0x0a, 0x24, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, - 0x5f, 0x69, 0x64, 0x5f, 0x6f, 0x66, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, - 0x6e, 0x67, 0x5f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x32, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, 0x66, - 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x65, 0x72, - 0x6e, 0x6f, 0x64, 0x65, 0x32, 0x12, 0x4d, 0x0a, 0x24, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, - 0x69, 0x64, 0x5f, 0x6f, 0x66, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, 0x6e, - 0x67, 0x5f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x33, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x1f, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, 0x66, 0x52, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, - 0x6f, 0x64, 0x65, 0x33, 0x12, 0x39, 0x0a, 0x19, 0x69, 0x73, 0x5f, 0x70, 0x61, 0x73, 0x74, 0x65, - 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x69, 0x73, 0x50, 0x61, 0x73, 0x74, 0x65, - 0x6c, 0x4f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x36, 0x0a, 0x18, 0x6f, 0x70, 0x65, 0x6e, 0x5f, 0x61, 0x70, 0x69, 0x5f, 0x67, 0x72, 0x6f, 0x75, - 0x70, 0x5f, 0x69, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x14, 0x6f, 0x70, 0x65, 0x6e, 0x41, 0x70, 0x69, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, - 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x22, 0xcd, 0x12, - 0x0a, 0x17, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x53, - 0x63, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x55, 0x0a, 0x28, 0x70, 0x61, 0x73, - 0x74, 0x65, 0x6c, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x77, - 0x68, 0x65, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x6d, - 0x69, 0x74, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x23, 0x70, 0x61, 0x73, - 0x74, 0x65, 0x6c, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x57, 0x68, 0x65, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, - 0x12, 0x59, 0x0a, 0x2a, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x77, 0x68, 0x65, 0x6e, 0x5f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x25, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x57, 0x68, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x12, 0x4e, 0x0a, 0x24, 0x75, - 0x74, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x77, 0x68, 0x65, - 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, - 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x20, 0x75, 0x74, 0x63, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x57, 0x68, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x12, 0x33, 0x0a, 0x16, 0x70, - 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x69, 0x64, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x75, 0x62, 0x6d, - 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x70, 0x61, 0x73, - 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, 0x66, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x72, - 0x12, 0x4d, 0x0a, 0x24, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x69, 0x64, 0x5f, 0x6f, 0x66, - 0x5f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x75, 0x70, - 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x31, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, - 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, 0x66, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x31, 0x12, - 0x4d, 0x0a, 0x24, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x69, 0x64, 0x5f, 0x6f, 0x66, 0x5f, - 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x75, 0x70, 0x65, - 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x32, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, 0x70, - 0x61, 0x73, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, 0x66, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, - 0x72, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x32, 0x12, 0x4d, - 0x0a, 0x24, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x69, 0x64, 0x5f, 0x6f, 0x66, 0x5f, 0x72, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x75, 0x70, 0x65, 0x72, - 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x33, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, 0x70, 0x61, - 0x73, 0x74, 0x65, 0x6c, 0x49, 0x64, 0x4f, 0x66, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, - 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x33, 0x12, 0x39, 0x0a, - 0x19, 0x69, 0x73, 0x5f, 0x70, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x16, 0x69, 0x73, 0x50, 0x61, 0x73, 0x74, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x69, 0x6d, 0x61, 0x67, - 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x50, 0x61, 0x74, 0x68, - 0x12, 0x41, 0x0a, 0x1d, 0x64, 0x75, 0x70, 0x65, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x64, 0x75, 0x70, 0x65, 0x44, 0x65, 0x74, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0e, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x6b, 0x65, 0x6c, 0x79, - 0x5f, 0x64, 0x75, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x73, 0x4c, - 0x69, 0x6b, 0x65, 0x6c, 0x79, 0x44, 0x75, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x13, 0x69, 0x73, 0x5f, - 0x72, 0x61, 0x72, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, - 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x52, 0x61, 0x72, 0x65, 0x4f, 0x6e, - 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x6f, 0x76, 0x65, 0x72, - 0x61, 0x6c, 0x6c, 0x5f, 0x72, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x73, 0x63, 0x6f, - 0x72, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x02, 0x52, 0x14, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x6c, - 0x6c, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x6a, - 0x0a, 0x35, 0x70, 0x63, 0x74, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x6f, 0x70, 0x5f, 0x31, 0x30, 0x5f, - 0x6d, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x6d, 0x69, 0x6c, 0x61, 0x72, 0x5f, 0x77, 0x69, 0x74, - 0x68, 0x5f, 0x64, 0x75, 0x70, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x5f, 0x61, 0x62, 0x6f, 0x76, - 0x65, 0x5f, 0x32, 0x35, 0x70, 0x63, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x02, 0x52, 0x2b, 0x70, - 0x63, 0x74, 0x4f, 0x66, 0x54, 0x6f, 0x70, 0x31, 0x30, 0x4d, 0x6f, 0x73, 0x74, 0x53, 0x69, 0x6d, - 0x69, 0x6c, 0x61, 0x72, 0x57, 0x69, 0x74, 0x68, 0x44, 0x75, 0x70, 0x65, 0x50, 0x72, 0x6f, 0x62, - 0x41, 0x62, 0x6f, 0x76, 0x65, 0x32, 0x35, 0x70, 0x63, 0x74, 0x12, 0x6a, 0x0a, 0x35, 0x70, 0x63, - 0x74, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x6f, 0x70, 0x5f, 0x31, 0x30, 0x5f, 0x6d, 0x6f, 0x73, 0x74, - 0x5f, 0x73, 0x69, 0x6d, 0x69, 0x6c, 0x61, 0x72, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x64, 0x75, - 0x70, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x5f, 0x61, 0x62, 0x6f, 0x76, 0x65, 0x5f, 0x33, 0x33, - 0x70, 0x63, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x02, 0x52, 0x2b, 0x70, 0x63, 0x74, 0x4f, 0x66, - 0x54, 0x6f, 0x70, 0x31, 0x30, 0x4d, 0x6f, 0x73, 0x74, 0x53, 0x69, 0x6d, 0x69, 0x6c, 0x61, 0x72, - 0x57, 0x69, 0x74, 0x68, 0x44, 0x75, 0x70, 0x65, 0x50, 0x72, 0x6f, 0x62, 0x41, 0x62, 0x6f, 0x76, - 0x65, 0x33, 0x33, 0x70, 0x63, 0x74, 0x12, 0x6a, 0x0a, 0x35, 0x70, 0x63, 0x74, 0x5f, 0x6f, 0x66, - 0x5f, 0x74, 0x6f, 0x70, 0x5f, 0x31, 0x30, 0x5f, 0x6d, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x6d, - 0x69, 0x6c, 0x61, 0x72, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x64, 0x75, 0x70, 0x65, 0x5f, 0x70, - 0x72, 0x6f, 0x62, 0x5f, 0x61, 0x62, 0x6f, 0x76, 0x65, 0x5f, 0x35, 0x30, 0x70, 0x63, 0x74, 0x18, - 0x10, 0x20, 0x01, 0x28, 0x02, 0x52, 0x2b, 0x70, 0x63, 0x74, 0x4f, 0x66, 0x54, 0x6f, 0x70, 0x31, - 0x30, 0x4d, 0x6f, 0x73, 0x74, 0x53, 0x69, 0x6d, 0x69, 0x6c, 0x61, 0x72, 0x57, 0x69, 0x74, 0x68, - 0x44, 0x75, 0x70, 0x65, 0x50, 0x72, 0x6f, 0x62, 0x41, 0x62, 0x6f, 0x76, 0x65, 0x35, 0x30, 0x70, - 0x63, 0x74, 0x12, 0x57, 0x0a, 0x29, 0x72, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x73, - 0x63, 0x6f, 0x72, 0x65, 0x73, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, - 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x62, 0x36, 0x34, 0x18, - 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x24, 0x72, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x53, - 0x63, 0x6f, 0x72, 0x65, 0x73, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4a, 0x73, 0x6f, 0x6e, 0x43, 0x6f, - 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x42, 0x36, 0x34, 0x12, 0x4c, 0x0a, 0x11, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x5f, 0x72, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, - 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x52, - 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x52, 0x10, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, - 0x74, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6f, 0x70, 0x65, - 0x6e, 0x5f, 0x6e, 0x73, 0x66, 0x77, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x13, 0x20, 0x01, - 0x28, 0x02, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x4e, 0x73, 0x66, 0x77, 0x53, 0x63, 0x6f, 0x72, - 0x65, 0x12, 0x54, 0x0a, 0x17, 0x61, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, - 0x5f, 0x6e, 0x73, 0x66, 0x77, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x18, 0x14, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x41, 0x6c, 0x74, 0x4e, 0x73, 0x66, 0x77, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x73, - 0x52, 0x15, 0x61, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x4e, 0x73, 0x66, - 0x77, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x12, 0x57, 0x0a, 0x29, 0x69, 0x6d, 0x61, 0x67, 0x65, - 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x5f, 0x6f, 0x66, 0x5f, - 0x63, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, - 0x66, 0x69, 0x6c, 0x65, 0x18, 0x15, 0x20, 0x03, 0x28, 0x01, 0x52, 0x24, 0x69, 0x6d, 0x61, 0x67, - 0x65, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x4f, 0x66, 0x43, 0x61, - 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x46, 0x69, 0x6c, 0x65, - 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x14, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x3e, 0x0a, 0x1c, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x6f, - 0x66, 0x5f, 0x63, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x6d, 0x61, 0x67, - 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x68, 0x61, - 0x73, 0x68, 0x4f, 0x66, 0x43, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, - 0x67, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x36, 0x0a, 0x18, 0x6f, 0x70, 0x65, 0x6e, 0x5f, 0x61, - 0x70, 0x69, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x6f, 0x70, 0x65, 0x6e, 0x41, 0x70, - 0x69, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x30, - 0x0a, 0x14, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x72, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, - 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x19, 0x20, 0x01, 0x28, 0x02, 0x52, 0x12, 0x67, 0x72, - 0x6f, 0x75, 0x70, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x53, 0x63, 0x6f, 0x72, 0x65, - 0x12, 0x62, 0x0a, 0x2f, 0x63, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x6d, - 0x61, 0x67, 0x65, 0x5f, 0x74, 0x68, 0x75, 0x6d, 0x62, 0x6e, 0x61, 0x69, 0x6c, 0x5f, 0x77, 0x65, - 0x62, 0x70, 0x5f, 0x61, 0x73, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x36, 0x34, 0x5f, 0x73, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x29, 0x63, 0x61, 0x6e, 0x64, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x54, 0x68, 0x75, 0x6d, 0x62, 0x6e, 0x61, - 0x69, 0x6c, 0x57, 0x65, 0x62, 0x70, 0x41, 0x73, 0x42, 0x61, 0x73, 0x65, 0x36, 0x34, 0x53, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x12, 0x64, 0x0a, 0x30, 0x64, 0x6f, 0x65, 0x73, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x69, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x5f, 0x74, 0x68, 0x65, 0x5f, 0x66, 0x6f, 0x6c, 0x6c, - 0x6f, 0x77, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x2a, - 0x64, 0x6f, 0x65, 0x73, 0x4e, 0x6f, 0x74, 0x49, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x54, 0x68, 0x65, - 0x46, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x69, 0x73, - 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x73, 0x65, 0x6e, 0x73, 0x65, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x69, 0x73, - 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x53, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x1c, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x73, - 0x65, 0x6e, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x61, - 0x73, 0x6f, 0x6e, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x19, 0x69, 0x6e, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x53, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, - 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x5e, 0x0a, 0x2d, 0x73, 0x69, 0x6d, 0x69, 0x6c, 0x61, 0x72, 0x69, - 0x74, 0x79, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x66, 0x69, 0x72, 0x73, - 0x74, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x69, 0x6e, 0x5f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x02, 0x52, 0x27, 0x73, 0x69, 0x6d, - 0x69, 0x6c, 0x61, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x46, 0x69, - 0x72, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x49, 0x6e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, - 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0d, 0x63, 0x70, - 0x50, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x63, - 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, - 0x18, 0x20, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x72, 0x6f, - 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x1e, 0x69, 0x6d, 0x61, 0x67, - 0x65, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x5f, 0x73, 0x65, - 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x18, 0x21, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x1b, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, - 0x6e, 0x74, 0x53, 0x65, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x22, 0xf7, 0x03, - 0x0a, 0x10, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, - 0x73, 0x73, 0x12, 0x6c, 0x0a, 0x35, 0x72, 0x61, 0x72, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x73, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6d, - 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x62, 0x36, 0x34, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x2d, 0x72, 0x61, 0x72, 0x65, 0x4f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, - 0x74, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x73, 0x4a, - 0x73, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x42, 0x36, 0x34, - 0x12, 0x58, 0x0a, 0x2a, 0x72, 0x61, 0x72, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x65, 0x74, 0x5f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, - 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x62, 0x36, 0x34, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x24, 0x72, 0x61, 0x72, 0x65, 0x4f, 0x6e, 0x49, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x65, 0x74, 0x47, 0x72, 0x61, 0x70, 0x68, 0x4a, 0x73, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, - 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x42, 0x36, 0x34, 0x12, 0x72, 0x0a, 0x38, 0x61, 0x6c, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x72, 0x61, 0x72, 0x65, 0x5f, 0x6f, - 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x5f, 0x64, 0x69, 0x63, 0x74, 0x5f, - 0x61, 0x73, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, - 0x65, 0x64, 0x5f, 0x62, 0x36, 0x34, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x30, 0x61, 0x6c, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x52, 0x61, 0x72, 0x65, 0x4f, 0x6e, 0x49, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x44, 0x69, 0x63, 0x74, 0x41, 0x73, 0x4a, 0x73, 0x6f, - 0x6e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x42, 0x36, 0x34, 0x12, 0x4a, - 0x0a, 0x23, 0x6d, 0x69, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, - 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x69, 0x6e, - 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x1d, 0x6d, 0x69, 0x6e, - 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x45, 0x78, 0x61, 0x63, 0x74, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x73, 0x49, 0x6e, 0x50, 0x61, 0x67, 0x65, 0x12, 0x5b, 0x0a, 0x2b, 0x65, 0x61, - 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, - 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x26, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, - 0x6c, 0x65, 0x44, 0x61, 0x74, 0x65, 0x4f, 0x66, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x85, 0x01, 0x0a, 0x0d, 0x41, 0x6c, 0x74, 0x4e, - 0x73, 0x66, 0x77, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x72, 0x61, - 0x77, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x08, 0x64, 0x72, 0x61, - 0x77, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x6e, 0x74, 0x61, 0x69, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x06, 0x68, 0x65, 0x6e, 0x74, 0x61, 0x69, 0x12, 0x18, 0x0a, - 0x07, 0x6e, 0x65, 0x75, 0x74, 0x72, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x02, 0x52, 0x07, - 0x6e, 0x65, 0x75, 0x74, 0x72, 0x61, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x02, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x73, - 0x65, 0x78, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x02, 0x52, 0x04, 0x73, 0x65, 0x78, 0x79, 0x22, - 0x12, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x22, 0xce, 0x01, 0x0a, 0x09, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, - 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x43, 0x6f, - 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x28, 0x0a, 0x10, 0x77, 0x61, 0x69, 0x74, 0x69, 0x6e, - 0x67, 0x5f, 0x69, 0x6e, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0e, 0x77, 0x61, 0x69, 0x74, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x51, 0x75, 0x65, 0x75, 0x65, - 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x09, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x12, 0x16, - 0x0a, 0x06, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, - 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, - 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x63, 0x61, 0x6e, 0x63, 0x65, - 0x6c, 0x6c, 0x65, 0x64, 0x22, 0xf9, 0x03, 0x0a, 0x0b, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x12, 0x3c, 0x0a, 0x1b, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x5f, - 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, - 0x65, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x17, 0x61, 0x76, 0x65, 0x72, 0x61, - 0x67, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x57, 0x61, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, - 0x63, 0x73, 0x12, 0x34, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x77, - 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x02, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x54, 0x61, 0x73, 0x6b, 0x57, 0x61, 0x69, 0x74, - 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, 0x73, 0x12, 0x46, 0x0a, 0x20, 0x61, 0x76, 0x65, 0x72, - 0x61, 0x67, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x02, 0x52, 0x1c, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, 0x73, - 0x12, 0x53, 0x0a, 0x27, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, - 0x5f, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, - 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x22, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x56, 0x69, - 0x72, 0x74, 0x75, 0x61, 0x6c, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, 0x73, 0x61, 0x67, 0x65, - 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x23, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, - 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x73, 0x73, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, - 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x1e, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, - 0x73, 0x73, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x74, - 0x65, 0x73, 0x12, 0x45, 0x0a, 0x20, 0x70, 0x65, 0x61, 0x6b, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, - 0x72, 0x73, 0x73, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, - 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1b, 0x70, 0x65, - 0x61, 0x6b, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x73, 0x73, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, - 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x20, 0x70, 0x65, 0x61, - 0x6b, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x76, 0x6d, 0x73, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, - 0x79, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x1b, 0x70, 0x65, 0x61, 0x6b, 0x54, 0x61, 0x73, 0x6b, 0x56, 0x6d, 0x73, - 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, - 0x22, 0xa5, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x37, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x09, - 0x74, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x0c, 0x74, 0x61, 0x73, - 0x6b, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x73, - 0x6b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x32, 0xc8, 0x01, 0x0a, 0x13, 0x44, 0x75, 0x70, - 0x65, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x12, 0x61, 0x0a, 0x12, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, - 0x73, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x23, 0x2e, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x53, - 0x63, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x64, 0x75, - 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x49, 0x6d, 0x61, 0x67, - 0x65, 0x52, 0x61, 0x72, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x4e, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x1f, 0x2e, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x20, 0x2e, 0x64, 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x64, - 0x75, 0x70, 0x65, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_dd_server_proto_rawDescOnce sync.Once - file_dd_server_proto_rawDescData = file_dd_server_proto_rawDesc -) - -func file_dd_server_proto_rawDescGZIP() []byte { - file_dd_server_proto_rawDescOnce.Do(func() { - file_dd_server_proto_rawDescData = protoimpl.X.CompressGZIP(file_dd_server_proto_rawDescData) - }) - return file_dd_server_proto_rawDescData -} - -var file_dd_server_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_dd_server_proto_goTypes = []any{ - (*RarenessScoreRequest)(nil), // 0: dupedetection.RarenessScoreRequest - (*ImageRarenessScoreReply)(nil), // 1: dupedetection.ImageRarenessScoreReply - (*InternetRareness)(nil), // 2: dupedetection.InternetRareness - (*AltNsfwScores)(nil), // 3: dupedetection.AltNsfwScores - (*GetStatusRequest)(nil), // 4: dupedetection.GetStatusRequest - (*TaskCount)(nil), // 5: dupedetection.TaskCount - (*TaskMetrics)(nil), // 6: dupedetection.TaskMetrics - (*GetStatusResponse)(nil), // 7: dupedetection.GetStatusResponse -} -var file_dd_server_proto_depIdxs = []int32{ - 2, // 0: dupedetection.ImageRarenessScoreReply.internet_rareness:type_name -> dupedetection.InternetRareness - 3, // 1: dupedetection.ImageRarenessScoreReply.alternative_nsfw_scores:type_name -> dupedetection.AltNsfwScores - 5, // 2: dupedetection.GetStatusResponse.task_count:type_name -> dupedetection.TaskCount - 6, // 3: dupedetection.GetStatusResponse.task_metrics:type_name -> dupedetection.TaskMetrics - 0, // 4: dupedetection.DupeDetectionServer.ImageRarenessScore:input_type -> dupedetection.RarenessScoreRequest - 4, // 5: dupedetection.DupeDetectionServer.GetStatus:input_type -> dupedetection.GetStatusRequest - 1, // 6: dupedetection.DupeDetectionServer.ImageRarenessScore:output_type -> dupedetection.ImageRarenessScoreReply - 7, // 7: dupedetection.DupeDetectionServer.GetStatus:output_type -> dupedetection.GetStatusResponse - 6, // [6:8] is the sub-list for method output_type - 4, // [4:6] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name -} - -func init() { file_dd_server_proto_init() } -func file_dd_server_proto_init() { - if File_dd_server_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_dd_server_proto_rawDesc, - NumEnums: 0, - NumMessages: 8, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_dd_server_proto_goTypes, - DependencyIndexes: file_dd_server_proto_depIdxs, - MessageInfos: file_dd_server_proto_msgTypes, - }.Build() - File_dd_server_proto = out.File - file_dd_server_proto_rawDesc = nil - file_dd_server_proto_goTypes = nil - file_dd_server_proto_depIdxs = nil -} diff --git a/gen/dupedetection/dd-server_grpc.pb.go b/gen/dupedetection/dd-server_grpc.pb.go deleted file mode 100644 index 27ee79bf..00000000 --- a/gen/dupedetection/dd-server_grpc.pb.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright (c) 2021-2021 The Pastel Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v3.12.4 -// source: dd-server.proto - -package dupedetection - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - DupeDetectionServer_ImageRarenessScore_FullMethodName = "/dupedetection.DupeDetectionServer/ImageRarenessScore" - DupeDetectionServer_GetStatus_FullMethodName = "/dupedetection.DupeDetectionServer/GetStatus" -) - -// DupeDetectionServerClient is the client API for DupeDetectionServer service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type DupeDetectionServerClient interface { - ImageRarenessScore(ctx context.Context, in *RarenessScoreRequest, opts ...grpc.CallOption) (*ImageRarenessScoreReply, error) - GetStatus(ctx context.Context, in *GetStatusRequest, opts ...grpc.CallOption) (*GetStatusResponse, error) -} - -type dupeDetectionServerClient struct { - cc grpc.ClientConnInterface -} - -func NewDupeDetectionServerClient(cc grpc.ClientConnInterface) DupeDetectionServerClient { - return &dupeDetectionServerClient{cc} -} - -func (c *dupeDetectionServerClient) ImageRarenessScore(ctx context.Context, in *RarenessScoreRequest, opts ...grpc.CallOption) (*ImageRarenessScoreReply, error) { - out := new(ImageRarenessScoreReply) - err := c.cc.Invoke(ctx, DupeDetectionServer_ImageRarenessScore_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *dupeDetectionServerClient) GetStatus(ctx context.Context, in *GetStatusRequest, opts ...grpc.CallOption) (*GetStatusResponse, error) { - out := new(GetStatusResponse) - err := c.cc.Invoke(ctx, DupeDetectionServer_GetStatus_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// DupeDetectionServerServer is the server API for DupeDetectionServer service. -// All implementations must embed UnimplementedDupeDetectionServerServer -// for forward compatibility -type DupeDetectionServerServer interface { - ImageRarenessScore(context.Context, *RarenessScoreRequest) (*ImageRarenessScoreReply, error) - GetStatus(context.Context, *GetStatusRequest) (*GetStatusResponse, error) - mustEmbedUnimplementedDupeDetectionServerServer() -} - -// UnimplementedDupeDetectionServerServer must be embedded to have forward compatible implementations. -type UnimplementedDupeDetectionServerServer struct { -} - -func (UnimplementedDupeDetectionServerServer) ImageRarenessScore(context.Context, *RarenessScoreRequest) (*ImageRarenessScoreReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method ImageRarenessScore not implemented") -} -func (UnimplementedDupeDetectionServerServer) GetStatus(context.Context, *GetStatusRequest) (*GetStatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetStatus not implemented") -} -func (UnimplementedDupeDetectionServerServer) mustEmbedUnimplementedDupeDetectionServerServer() {} - -// UnsafeDupeDetectionServerServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to DupeDetectionServerServer will -// result in compilation errors. -type UnsafeDupeDetectionServerServer interface { - mustEmbedUnimplementedDupeDetectionServerServer() -} - -func RegisterDupeDetectionServerServer(s grpc.ServiceRegistrar, srv DupeDetectionServerServer) { - s.RegisterService(&DupeDetectionServer_ServiceDesc, srv) -} - -func _DupeDetectionServer_ImageRarenessScore_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RarenessScoreRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DupeDetectionServerServer).ImageRarenessScore(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: DupeDetectionServer_ImageRarenessScore_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DupeDetectionServerServer).ImageRarenessScore(ctx, req.(*RarenessScoreRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _DupeDetectionServer_GetStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetStatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DupeDetectionServerServer).GetStatus(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: DupeDetectionServer_GetStatus_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DupeDetectionServerServer).GetStatus(ctx, req.(*GetStatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// DupeDetectionServer_ServiceDesc is the grpc.ServiceDesc for DupeDetectionServer service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var DupeDetectionServer_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "dupedetection.DupeDetectionServer", - HandlerType: (*DupeDetectionServerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "ImageRarenessScore", - Handler: _DupeDetectionServer_ImageRarenessScore_Handler, - }, - { - MethodName: "GetStatus", - Handler: _DupeDetectionServer_GetStatus_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "dd-server.proto", -} diff --git a/gen/raptorq/raptorq.pb.go b/gen/raptorq/raptorq.pb.go deleted file mode 100644 index 8c9ba9d0..00000000 --- a/gen/raptorq/raptorq.pb.go +++ /dev/null @@ -1,476 +0,0 @@ -// Copyright (c) 2021-2021 The Pastel Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.35.1 -// protoc v3.12.4 -// source: raptorq.proto - -package raptorq - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type EncodeMetaDataRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - FilesNumber uint32 `protobuf:"varint,2,opt,name=files_number,json=filesNumber,proto3" json:"files_number,omitempty"` - BlockHash string `protobuf:"bytes,3,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` - PastelId string `protobuf:"bytes,4,opt,name=pastel_id,json=pastelId,proto3" json:"pastel_id,omitempty"` -} - -func (x *EncodeMetaDataRequest) Reset() { - *x = EncodeMetaDataRequest{} - mi := &file_raptorq_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EncodeMetaDataRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EncodeMetaDataRequest) ProtoMessage() {} - -func (x *EncodeMetaDataRequest) ProtoReflect() protoreflect.Message { - mi := &file_raptorq_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EncodeMetaDataRequest.ProtoReflect.Descriptor instead. -func (*EncodeMetaDataRequest) Descriptor() ([]byte, []int) { - return file_raptorq_proto_rawDescGZIP(), []int{0} -} - -func (x *EncodeMetaDataRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -func (x *EncodeMetaDataRequest) GetFilesNumber() uint32 { - if x != nil { - return x.FilesNumber - } - return 0 -} - -func (x *EncodeMetaDataRequest) GetBlockHash() string { - if x != nil { - return x.BlockHash - } - return "" -} - -func (x *EncodeMetaDataRequest) GetPastelId() string { - if x != nil { - return x.PastelId - } - return "" -} - -type EncodeMetaDataReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - EncoderParameters []byte `protobuf:"bytes,1,opt,name=encoder_parameters,json=encoderParameters,proto3" json:"encoder_parameters,omitempty"` - SymbolsCount uint32 `protobuf:"varint,2,opt,name=symbols_count,json=symbolsCount,proto3" json:"symbols_count,omitempty"` - Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` -} - -func (x *EncodeMetaDataReply) Reset() { - *x = EncodeMetaDataReply{} - mi := &file_raptorq_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EncodeMetaDataReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EncodeMetaDataReply) ProtoMessage() {} - -func (x *EncodeMetaDataReply) ProtoReflect() protoreflect.Message { - mi := &file_raptorq_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EncodeMetaDataReply.ProtoReflect.Descriptor instead. -func (*EncodeMetaDataReply) Descriptor() ([]byte, []int) { - return file_raptorq_proto_rawDescGZIP(), []int{1} -} - -func (x *EncodeMetaDataReply) GetEncoderParameters() []byte { - if x != nil { - return x.EncoderParameters - } - return nil -} - -func (x *EncodeMetaDataReply) GetSymbolsCount() uint32 { - if x != nil { - return x.SymbolsCount - } - return 0 -} - -func (x *EncodeMetaDataReply) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -type EncodeRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` -} - -func (x *EncodeRequest) Reset() { - *x = EncodeRequest{} - mi := &file_raptorq_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EncodeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EncodeRequest) ProtoMessage() {} - -func (x *EncodeRequest) ProtoReflect() protoreflect.Message { - mi := &file_raptorq_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EncodeRequest.ProtoReflect.Descriptor instead. -func (*EncodeRequest) Descriptor() ([]byte, []int) { - return file_raptorq_proto_rawDescGZIP(), []int{2} -} - -func (x *EncodeRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -type EncodeReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - EncoderParameters []byte `protobuf:"bytes,1,opt,name=encoder_parameters,json=encoderParameters,proto3" json:"encoder_parameters,omitempty"` - SymbolsCount uint32 `protobuf:"varint,2,opt,name=symbols_count,json=symbolsCount,proto3" json:"symbols_count,omitempty"` - Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` -} - -func (x *EncodeReply) Reset() { - *x = EncodeReply{} - mi := &file_raptorq_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EncodeReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EncodeReply) ProtoMessage() {} - -func (x *EncodeReply) ProtoReflect() protoreflect.Message { - mi := &file_raptorq_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EncodeReply.ProtoReflect.Descriptor instead. -func (*EncodeReply) Descriptor() ([]byte, []int) { - return file_raptorq_proto_rawDescGZIP(), []int{3} -} - -func (x *EncodeReply) GetEncoderParameters() []byte { - if x != nil { - return x.EncoderParameters - } - return nil -} - -func (x *EncodeReply) GetSymbolsCount() uint32 { - if x != nil { - return x.SymbolsCount - } - return 0 -} - -func (x *EncodeReply) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -type DecodeRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - EncoderParameters []byte `protobuf:"bytes,1,opt,name=encoder_parameters,json=encoderParameters,proto3" json:"encoder_parameters,omitempty"` - Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` -} - -func (x *DecodeRequest) Reset() { - *x = DecodeRequest{} - mi := &file_raptorq_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DecodeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DecodeRequest) ProtoMessage() {} - -func (x *DecodeRequest) ProtoReflect() protoreflect.Message { - mi := &file_raptorq_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DecodeRequest.ProtoReflect.Descriptor instead. -func (*DecodeRequest) Descriptor() ([]byte, []int) { - return file_raptorq_proto_rawDescGZIP(), []int{4} -} - -func (x *DecodeRequest) GetEncoderParameters() []byte { - if x != nil { - return x.EncoderParameters - } - return nil -} - -func (x *DecodeRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -type DecodeReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` -} - -func (x *DecodeReply) Reset() { - *x = DecodeReply{} - mi := &file_raptorq_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DecodeReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DecodeReply) ProtoMessage() {} - -func (x *DecodeReply) ProtoReflect() protoreflect.Message { - mi := &file_raptorq_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DecodeReply.ProtoReflect.Descriptor instead. -func (*DecodeReply) Descriptor() ([]byte, []int) { - return file_raptorq_proto_rawDescGZIP(), []int{5} -} - -func (x *DecodeReply) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -var File_raptorq_proto protoreflect.FileDescriptor - -var file_raptorq_proto_rawDesc = []byte{ - 0x0a, 0x0d, 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x07, 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x22, 0x8a, 0x01, 0x0a, 0x15, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x5f, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x66, 0x69, - 0x6c, 0x65, 0x73, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, - 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, - 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x73, 0x74, - 0x65, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, - 0x74, 0x65, 0x6c, 0x49, 0x64, 0x22, 0x7d, 0x0a, 0x13, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x4d, - 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2d, 0x0a, 0x12, - 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x65, - 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, - 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x0c, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x22, 0x23, 0x0a, 0x0d, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x75, 0x0a, 0x0b, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2d, 0x0a, 0x12, 0x65, 0x6e, 0x63, 0x6f, - 0x64, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x79, 0x6d, 0x62, 0x6f, - 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, - 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x22, 0x52, 0x0a, 0x0d, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x65, - 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, - 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x22, 0x21, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x32, 0xc9, 0x01, 0x0a, 0x07, 0x52, 0x61, 0x70, 0x74, - 0x6f, 0x72, 0x51, 0x12, 0x4e, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, - 0x61, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1e, 0x2e, 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x2e, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x2e, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x36, 0x0a, 0x06, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x2e, - 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x2e, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x36, 0x0a, 0x06, 0x44, - 0x65, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x2e, 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x2e, - 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, - 0x72, 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x2e, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x72, - 0x61, 0x70, 0x74, 0x6f, 0x72, 0x71, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_raptorq_proto_rawDescOnce sync.Once - file_raptorq_proto_rawDescData = file_raptorq_proto_rawDesc -) - -func file_raptorq_proto_rawDescGZIP() []byte { - file_raptorq_proto_rawDescOnce.Do(func() { - file_raptorq_proto_rawDescData = protoimpl.X.CompressGZIP(file_raptorq_proto_rawDescData) - }) - return file_raptorq_proto_rawDescData -} - -var file_raptorq_proto_msgTypes = make([]protoimpl.MessageInfo, 6) -var file_raptorq_proto_goTypes = []any{ - (*EncodeMetaDataRequest)(nil), // 0: raptorq.EncodeMetaDataRequest - (*EncodeMetaDataReply)(nil), // 1: raptorq.EncodeMetaDataReply - (*EncodeRequest)(nil), // 2: raptorq.EncodeRequest - (*EncodeReply)(nil), // 3: raptorq.EncodeReply - (*DecodeRequest)(nil), // 4: raptorq.DecodeRequest - (*DecodeReply)(nil), // 5: raptorq.DecodeReply -} -var file_raptorq_proto_depIdxs = []int32{ - 0, // 0: raptorq.RaptorQ.EncodeMetaData:input_type -> raptorq.EncodeMetaDataRequest - 2, // 1: raptorq.RaptorQ.Encode:input_type -> raptorq.EncodeRequest - 4, // 2: raptorq.RaptorQ.Decode:input_type -> raptorq.DecodeRequest - 1, // 3: raptorq.RaptorQ.EncodeMetaData:output_type -> raptorq.EncodeMetaDataReply - 3, // 4: raptorq.RaptorQ.Encode:output_type -> raptorq.EncodeReply - 5, // 5: raptorq.RaptorQ.Decode:output_type -> raptorq.DecodeReply - 3, // [3:6] is the sub-list for method output_type - 0, // [0:3] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_raptorq_proto_init() } -func file_raptorq_proto_init() { - if File_raptorq_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_raptorq_proto_rawDesc, - NumEnums: 0, - NumMessages: 6, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_raptorq_proto_goTypes, - DependencyIndexes: file_raptorq_proto_depIdxs, - MessageInfos: file_raptorq_proto_msgTypes, - }.Build() - File_raptorq_proto = out.File - file_raptorq_proto_rawDesc = nil - file_raptorq_proto_goTypes = nil - file_raptorq_proto_depIdxs = nil -} diff --git a/gen/raptorq/raptorq_grpc.pb.go b/gen/raptorq/raptorq_grpc.pb.go deleted file mode 100644 index 01c17ae8..00000000 --- a/gen/raptorq/raptorq_grpc.pb.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (c) 2021-2021 The Pastel Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v3.12.4 -// source: raptorq.proto - -package raptorq - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - RaptorQ_EncodeMetaData_FullMethodName = "/raptorq.RaptorQ/EncodeMetaData" - RaptorQ_Encode_FullMethodName = "/raptorq.RaptorQ/Encode" - RaptorQ_Decode_FullMethodName = "/raptorq.RaptorQ/Decode" -) - -// RaptorQClient is the client API for RaptorQ service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type RaptorQClient interface { - EncodeMetaData(ctx context.Context, in *EncodeMetaDataRequest, opts ...grpc.CallOption) (*EncodeMetaDataReply, error) - Encode(ctx context.Context, in *EncodeRequest, opts ...grpc.CallOption) (*EncodeReply, error) - Decode(ctx context.Context, in *DecodeRequest, opts ...grpc.CallOption) (*DecodeReply, error) -} - -type raptorQClient struct { - cc grpc.ClientConnInterface -} - -func NewRaptorQClient(cc grpc.ClientConnInterface) RaptorQClient { - return &raptorQClient{cc} -} - -func (c *raptorQClient) EncodeMetaData(ctx context.Context, in *EncodeMetaDataRequest, opts ...grpc.CallOption) (*EncodeMetaDataReply, error) { - out := new(EncodeMetaDataReply) - err := c.cc.Invoke(ctx, RaptorQ_EncodeMetaData_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *raptorQClient) Encode(ctx context.Context, in *EncodeRequest, opts ...grpc.CallOption) (*EncodeReply, error) { - out := new(EncodeReply) - err := c.cc.Invoke(ctx, RaptorQ_Encode_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *raptorQClient) Decode(ctx context.Context, in *DecodeRequest, opts ...grpc.CallOption) (*DecodeReply, error) { - out := new(DecodeReply) - err := c.cc.Invoke(ctx, RaptorQ_Decode_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// RaptorQServer is the server API for RaptorQ service. -// All implementations must embed UnimplementedRaptorQServer -// for forward compatibility -type RaptorQServer interface { - EncodeMetaData(context.Context, *EncodeMetaDataRequest) (*EncodeMetaDataReply, error) - Encode(context.Context, *EncodeRequest) (*EncodeReply, error) - Decode(context.Context, *DecodeRequest) (*DecodeReply, error) - mustEmbedUnimplementedRaptorQServer() -} - -// UnimplementedRaptorQServer must be embedded to have forward compatible implementations. -type UnimplementedRaptorQServer struct { -} - -func (UnimplementedRaptorQServer) EncodeMetaData(context.Context, *EncodeMetaDataRequest) (*EncodeMetaDataReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method EncodeMetaData not implemented") -} -func (UnimplementedRaptorQServer) Encode(context.Context, *EncodeRequest) (*EncodeReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method Encode not implemented") -} -func (UnimplementedRaptorQServer) Decode(context.Context, *DecodeRequest) (*DecodeReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method Decode not implemented") -} -func (UnimplementedRaptorQServer) mustEmbedUnimplementedRaptorQServer() {} - -// UnsafeRaptorQServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to RaptorQServer will -// result in compilation errors. -type UnsafeRaptorQServer interface { - mustEmbedUnimplementedRaptorQServer() -} - -func RegisterRaptorQServer(s grpc.ServiceRegistrar, srv RaptorQServer) { - s.RegisterService(&RaptorQ_ServiceDesc, srv) -} - -func _RaptorQ_EncodeMetaData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(EncodeMetaDataRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RaptorQServer).EncodeMetaData(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: RaptorQ_EncodeMetaData_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RaptorQServer).EncodeMetaData(ctx, req.(*EncodeMetaDataRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _RaptorQ_Encode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(EncodeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RaptorQServer).Encode(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: RaptorQ_Encode_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RaptorQServer).Encode(ctx, req.(*EncodeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _RaptorQ_Decode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DecodeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RaptorQServer).Decode(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: RaptorQ_Decode_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RaptorQServer).Decode(ctx, req.(*DecodeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// RaptorQ_ServiceDesc is the grpc.ServiceDesc for RaptorQ service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var RaptorQ_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "raptorq.RaptorQ", - HandlerType: (*RaptorQServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "EncodeMetaData", - Handler: _RaptorQ_EncodeMetaData_Handler, - }, - { - MethodName: "Encode", - Handler: _RaptorQ_Encode_Handler, - }, - { - MethodName: "Decode", - Handler: _RaptorQ_Decode_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "raptorq.proto", -} diff --git a/gen/supernode/action/cascade/service.pb.go b/gen/supernode/action/cascade/service.pb.go index dd083d04..f270a051 100644 --- a/gen/supernode/action/cascade/service.pb.go +++ b/gen/supernode/action/cascade/service.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 +// protoc-gen-go v1.36.6 // protoc v3.21.12 // source: supernode/action/cascade/service.proto @@ -11,6 +11,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -116,15 +117,14 @@ func (SupernodeEventType) EnumDescriptor() ([]byte, []int) { } type RegisterRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to RequestType: + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to RequestType: // // *RegisterRequest_Chunk // *RegisterRequest_Metadata - RequestType isRegisterRequest_RequestType `protobuf_oneof:"request_type"` + RequestType isRegisterRequest_RequestType `protobuf_oneof:"request_type"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *RegisterRequest) Reset() { @@ -157,23 +157,27 @@ func (*RegisterRequest) Descriptor() ([]byte, []int) { return file_supernode_action_cascade_service_proto_rawDescGZIP(), []int{0} } -func (m *RegisterRequest) GetRequestType() isRegisterRequest_RequestType { - if m != nil { - return m.RequestType +func (x *RegisterRequest) GetRequestType() isRegisterRequest_RequestType { + if x != nil { + return x.RequestType } return nil } func (x *RegisterRequest) GetChunk() *DataChunk { - if x, ok := x.GetRequestType().(*RegisterRequest_Chunk); ok { - return x.Chunk + if x != nil { + if x, ok := x.RequestType.(*RegisterRequest_Chunk); ok { + return x.Chunk + } } return nil } func (x *RegisterRequest) GetMetadata() *Metadata { - if x, ok := x.GetRequestType().(*RegisterRequest_Metadata); ok { - return x.Metadata + if x != nil { + if x, ok := x.RequestType.(*RegisterRequest_Metadata); ok { + return x.Metadata + } } return nil } @@ -195,11 +199,10 @@ func (*RegisterRequest_Chunk) isRegisterRequest_RequestType() {} func (*RegisterRequest_Metadata) isRegisterRequest_RequestType() {} type DataChunk struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` unknownFields protoimpl.UnknownFields - - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DataChunk) Reset() { @@ -240,12 +243,11 @@ func (x *DataChunk) GetData() []byte { } type Metadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + ActionId string `protobuf:"bytes,2,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"` unknownFields protoimpl.UnknownFields - - TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - ActionId string `protobuf:"bytes,2,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Metadata) Reset() { @@ -293,13 +295,12 @@ func (x *Metadata) GetActionId() string { } type RegisterResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + EventType SupernodeEventType `protobuf:"varint,1,opt,name=event_type,json=eventType,proto3,enum=cascade.SupernodeEventType" json:"event_type,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + TxHash string `protobuf:"bytes,3,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` unknownFields protoimpl.UnknownFields - - EventType SupernodeEventType `protobuf:"varint,1,opt,name=event_type,json=eventType,proto3,enum=cascade.SupernodeEventType" json:"event_type,omitempty"` - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - TxHash string `protobuf:"bytes,3,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` + sizeCache protoimpl.SizeCache } func (x *RegisterResponse) Reset() { @@ -354,12 +355,11 @@ func (x *RegisterResponse) GetTxHash() string { } type DownloadRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"` + Signature string `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` unknownFields protoimpl.UnknownFields - - ActionId string `protobuf:"bytes,1,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"` - Signature string `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DownloadRequest) Reset() { @@ -407,15 +407,14 @@ func (x *DownloadRequest) GetSignature() string { } type DownloadResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to ResponseType: + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to ResponseType: // // *DownloadResponse_Event // *DownloadResponse_Chunk - ResponseType isDownloadResponse_ResponseType `protobuf_oneof:"response_type"` + ResponseType isDownloadResponse_ResponseType `protobuf_oneof:"response_type"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *DownloadResponse) Reset() { @@ -448,23 +447,27 @@ func (*DownloadResponse) Descriptor() ([]byte, []int) { return file_supernode_action_cascade_service_proto_rawDescGZIP(), []int{5} } -func (m *DownloadResponse) GetResponseType() isDownloadResponse_ResponseType { - if m != nil { - return m.ResponseType +func (x *DownloadResponse) GetResponseType() isDownloadResponse_ResponseType { + if x != nil { + return x.ResponseType } return nil } func (x *DownloadResponse) GetEvent() *DownloadEvent { - if x, ok := x.GetResponseType().(*DownloadResponse_Event); ok { - return x.Event + if x != nil { + if x, ok := x.ResponseType.(*DownloadResponse_Event); ok { + return x.Event + } } return nil } func (x *DownloadResponse) GetChunk() *DataChunk { - if x, ok := x.GetResponseType().(*DownloadResponse_Chunk); ok { - return x.Chunk + if x != nil { + if x, ok := x.ResponseType.(*DownloadResponse_Chunk); ok { + return x.Chunk + } } return nil } @@ -486,12 +489,11 @@ func (*DownloadResponse_Event) isDownloadResponse_ResponseType() {} func (*DownloadResponse_Chunk) isDownloadResponse_ResponseType() {} type DownloadEvent struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + EventType SupernodeEventType `protobuf:"varint,1,opt,name=event_type,json=eventType,proto3,enum=cascade.SupernodeEventType" json:"event_type,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` unknownFields protoimpl.UnknownFields - - EventType SupernodeEventType `protobuf:"varint,1,opt,name=event_type,json=eventType,proto3,enum=cascade.SupernodeEventType" json:"event_type,omitempty"` - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DownloadEvent) Reset() { @@ -540,104 +542,66 @@ func (x *DownloadEvent) GetMessage() string { var File_supernode_action_cascade_service_proto protoreflect.FileDescriptor -var file_supernode_action_cascade_service_proto_rawDesc = []byte{ - 0x0a, 0x26, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2f, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, - 0x65, 0x22, 0x7e, 0x0a, 0x0f, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x44, 0x61, - 0x74, 0x61, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x48, 0x00, 0x52, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, - 0x12, 0x2f, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x42, 0x0e, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x22, 0x1f, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x12, - 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, - 0x74, 0x61, 0x22, 0x40, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x17, - 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x81, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, - 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, - 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, - 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, - 0x17, 0x0a, 0x07, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x74, 0x78, 0x48, 0x61, 0x73, 0x68, 0x22, 0x4c, 0x0a, 0x0f, 0x44, 0x6f, 0x77, 0x6e, - 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x7f, 0x0a, 0x10, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, - 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x61, 0x73, 0x63, - 0x61, 0x64, 0x65, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x48, 0x00, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x05, 0x63, 0x68, - 0x75, 0x6e, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x61, 0x73, 0x63, - 0x61, 0x64, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x48, 0x00, 0x52, - 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x42, 0x0f, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x65, 0x0a, 0x0d, 0x44, 0x6f, 0x77, 0x6e, 0x6c, - 0x6f, 0x61, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x63, - 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2a, 0xb3, - 0x03, 0x0a, 0x12, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x54, - 0x52, 0x49, 0x45, 0x56, 0x45, 0x44, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x43, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x46, 0x45, 0x45, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x54, 0x4f, 0x50, 0x5f, 0x53, 0x55, 0x50, 0x45, 0x52, 0x4e, 0x4f, - 0x44, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x45, 0x44, 0x10, - 0x03, 0x12, 0x14, 0x0a, 0x10, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x44, 0x45, - 0x43, 0x4f, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x41, 0x54, 0x41, 0x5f, - 0x48, 0x41, 0x53, 0x48, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x05, 0x12, - 0x11, 0x0a, 0x0d, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x45, 0x44, - 0x10, 0x06, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x49, 0x47, 0x4e, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, - 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x07, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x51, - 0x49, 0x44, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x44, 0x10, 0x08, 0x12, 0x11, - 0x0a, 0x0d, 0x52, 0x51, 0x49, 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x09, 0x12, 0x16, 0x0a, 0x12, 0x46, 0x49, 0x4e, 0x41, 0x4c, 0x49, 0x5a, 0x45, 0x5f, 0x53, 0x49, - 0x4d, 0x55, 0x4c, 0x41, 0x54, 0x45, 0x44, 0x10, 0x0a, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x52, 0x54, - 0x45, 0x46, 0x41, 0x43, 0x54, 0x53, 0x5f, 0x53, 0x54, 0x4f, 0x52, 0x45, 0x44, 0x10, 0x0b, 0x12, - 0x14, 0x0a, 0x10, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x49, 0x4e, 0x41, 0x4c, 0x49, - 0x5a, 0x45, 0x44, 0x10, 0x0c, 0x12, 0x18, 0x0a, 0x14, 0x41, 0x52, 0x54, 0x45, 0x46, 0x41, 0x43, - 0x54, 0x53, 0x5f, 0x44, 0x4f, 0x57, 0x4e, 0x4c, 0x4f, 0x41, 0x44, 0x45, 0x44, 0x10, 0x0d, 0x12, - 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x4e, 0x41, 0x4c, 0x49, 0x5a, 0x45, 0x5f, 0x53, 0x49, 0x4d, 0x55, - 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x0e, 0x12, - 0x1c, 0x0a, 0x18, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x52, 0x45, 0x54, 0x52, 0x49, - 0x45, 0x56, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x0f, 0x12, 0x14, 0x0a, - 0x10, 0x44, 0x45, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, - 0x44, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x45, 0x52, 0x56, 0x45, 0x5f, 0x52, 0x45, 0x41, - 0x44, 0x59, 0x10, 0x11, 0x32, 0x98, 0x01, 0x0a, 0x0e, 0x43, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x65, 0x72, 0x12, 0x18, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x52, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, - 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x12, 0x41, 0x0a, 0x08, - 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x18, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, - 0x64, 0x65, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x44, 0x6f, 0x77, - 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, - 0x45, 0x5a, 0x43, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75, - 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70, - 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75, - 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x63, - 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_supernode_action_cascade_service_proto_rawDesc = "" + + "\n" + + "&supernode/action/cascade/service.proto\x12\acascade\"~\n" + + "\x0fRegisterRequest\x12*\n" + + "\x05chunk\x18\x01 \x01(\v2\x12.cascade.DataChunkH\x00R\x05chunk\x12/\n" + + "\bmetadata\x18\x02 \x01(\v2\x11.cascade.MetadataH\x00R\bmetadataB\x0e\n" + + "\frequest_type\"\x1f\n" + + "\tDataChunk\x12\x12\n" + + "\x04data\x18\x01 \x01(\fR\x04data\"@\n" + + "\bMetadata\x12\x17\n" + + "\atask_id\x18\x01 \x01(\tR\x06taskId\x12\x1b\n" + + "\taction_id\x18\x02 \x01(\tR\bactionId\"\x81\x01\n" + + "\x10RegisterResponse\x12:\n" + + "\n" + + "event_type\x18\x01 \x01(\x0e2\x1b.cascade.SupernodeEventTypeR\teventType\x12\x18\n" + + "\amessage\x18\x02 \x01(\tR\amessage\x12\x17\n" + + "\atx_hash\x18\x03 \x01(\tR\x06txHash\"L\n" + + "\x0fDownloadRequest\x12\x1b\n" + + "\taction_id\x18\x01 \x01(\tR\bactionId\x12\x1c\n" + + "\tsignature\x18\x02 \x01(\tR\tsignature\"\x7f\n" + + "\x10DownloadResponse\x12.\n" + + "\x05event\x18\x01 \x01(\v2\x16.cascade.DownloadEventH\x00R\x05event\x12*\n" + + "\x05chunk\x18\x02 \x01(\v2\x12.cascade.DataChunkH\x00R\x05chunkB\x0f\n" + + "\rresponse_type\"e\n" + + "\rDownloadEvent\x12:\n" + + "\n" + + "event_type\x18\x01 \x01(\x0e2\x1b.cascade.SupernodeEventTypeR\teventType\x12\x18\n" + + "\amessage\x18\x02 \x01(\tR\amessage*\xb3\x03\n" + + "\x12SupernodeEventType\x12\v\n" + + "\aUNKNOWN\x10\x00\x12\x14\n" + + "\x10ACTION_RETRIEVED\x10\x01\x12\x17\n" + + "\x13ACTION_FEE_VERIFIED\x10\x02\x12\x1e\n" + + "\x1aTOP_SUPERNODE_CHECK_PASSED\x10\x03\x12\x14\n" + + "\x10METADATA_DECODED\x10\x04\x12\x16\n" + + "\x12DATA_HASH_VERIFIED\x10\x05\x12\x11\n" + + "\rINPUT_ENCODED\x10\x06\x12\x16\n" + + "\x12SIGNATURE_VERIFIED\x10\a\x12\x12\n" + + "\x0eRQID_GENERATED\x10\b\x12\x11\n" + + "\rRQID_VERIFIED\x10\t\x12\x16\n" + + "\x12FINALIZE_SIMULATED\x10\n" + + "\x12\x14\n" + + "\x10ARTEFACTS_STORED\x10\v\x12\x14\n" + + "\x10ACTION_FINALIZED\x10\f\x12\x18\n" + + "\x14ARTEFACTS_DOWNLOADED\x10\r\x12\x1e\n" + + "\x1aFINALIZE_SIMULATION_FAILED\x10\x0e\x12\x1c\n" + + "\x18NETWORK_RETRIEVE_STARTED\x10\x0f\x12\x14\n" + + "\x10DECODE_COMPLETED\x10\x10\x12\x0f\n" + + "\vSERVE_READY\x10\x112\x98\x01\n" + + "\x0eCascadeService\x12C\n" + + "\bRegister\x12\x18.cascade.RegisterRequest\x1a\x19.cascade.RegisterResponse(\x010\x01\x12A\n" + + "\bDownload\x12\x18.cascade.DownloadRequest\x1a\x19.cascade.DownloadResponse0\x01BEZCgithub.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascadeb\x06proto3" var ( file_supernode_action_cascade_service_proto_rawDescOnce sync.Once - file_supernode_action_cascade_service_proto_rawDescData = file_supernode_action_cascade_service_proto_rawDesc + file_supernode_action_cascade_service_proto_rawDescData []byte ) func file_supernode_action_cascade_service_proto_rawDescGZIP() []byte { file_supernode_action_cascade_service_proto_rawDescOnce.Do(func() { - file_supernode_action_cascade_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_supernode_action_cascade_service_proto_rawDescData) + file_supernode_action_cascade_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_supernode_action_cascade_service_proto_rawDesc), len(file_supernode_action_cascade_service_proto_rawDesc))) }) return file_supernode_action_cascade_service_proto_rawDescData } @@ -689,7 +653,7 @@ func file_supernode_action_cascade_service_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_supernode_action_cascade_service_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_supernode_action_cascade_service_proto_rawDesc), len(file_supernode_action_cascade_service_proto_rawDesc)), NumEnums: 1, NumMessages: 7, NumExtensions: 0, @@ -701,7 +665,6 @@ func file_supernode_action_cascade_service_proto_init() { MessageInfos: file_supernode_action_cascade_service_proto_msgTypes, }.Build() File_supernode_action_cascade_service_proto = out.File - file_supernode_action_cascade_service_proto_rawDesc = nil file_supernode_action_cascade_service_proto_goTypes = nil file_supernode_action_cascade_service_proto_depIdxs = nil } diff --git a/gen/supernode/agents/.gitkeep b/gen/supernode/agents/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/gen/supernode/service.pb.go b/gen/supernode/service.pb.go new file mode 100644 index 00000000..ad1ff814 --- /dev/null +++ b/gen/supernode/service.pb.go @@ -0,0 +1,412 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.6 +// protoc v3.21.12 +// source: supernode/service.proto + +package supernode + +import ( + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ListServicesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListServicesRequest) Reset() { + *x = ListServicesRequest{} + mi := &file_supernode_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListServicesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServicesRequest) ProtoMessage() {} + +func (x *ListServicesRequest) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServicesRequest.ProtoReflect.Descriptor instead. +func (*ListServicesRequest) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{0} +} + +type ListServicesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Services []*ServiceInfo `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"` + Count int32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListServicesResponse) Reset() { + *x = ListServicesResponse{} + mi := &file_supernode_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListServicesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServicesResponse) ProtoMessage() {} + +func (x *ListServicesResponse) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServicesResponse.ProtoReflect.Descriptor instead. +func (*ListServicesResponse) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListServicesResponse) GetServices() []*ServiceInfo { + if x != nil { + return x.Services + } + return nil +} + +func (x *ListServicesResponse) GetCount() int32 { + if x != nil { + return x.Count + } + return 0 +} + +type ServiceInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Methods []string `protobuf:"bytes,2,rep,name=methods,proto3" json:"methods,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ServiceInfo) Reset() { + *x = ServiceInfo{} + mi := &file_supernode_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ServiceInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceInfo) ProtoMessage() {} + +func (x *ServiceInfo) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceInfo.ProtoReflect.Descriptor instead. +func (*ServiceInfo) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ServiceInfo) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ServiceInfo) GetMethods() []string { + if x != nil { + return x.Methods + } + return nil +} + +// Raw pprof request/response messages +type RawPprofRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Debug int32 `protobuf:"varint,1,opt,name=debug,proto3" json:"debug,omitempty"` // Debug level (0 for binary, >0 for text) + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RawPprofRequest) Reset() { + *x = RawPprofRequest{} + mi := &file_supernode_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RawPprofRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RawPprofRequest) ProtoMessage() {} + +func (x *RawPprofRequest) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RawPprofRequest.ProtoReflect.Descriptor instead. +func (*RawPprofRequest) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{3} +} + +func (x *RawPprofRequest) GetDebug() int32 { + if x != nil { + return x.Debug + } + return 0 +} + +type RawPprofCpuRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Seconds int32 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` // CPU profile duration in seconds (default 30) + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RawPprofCpuRequest) Reset() { + *x = RawPprofCpuRequest{} + mi := &file_supernode_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RawPprofCpuRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RawPprofCpuRequest) ProtoMessage() {} + +func (x *RawPprofCpuRequest) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RawPprofCpuRequest.ProtoReflect.Descriptor instead. +func (*RawPprofCpuRequest) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{4} +} + +func (x *RawPprofCpuRequest) GetSeconds() int32 { + if x != nil { + return x.Seconds + } + return 0 +} + +type RawPprofResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` // Raw pprof data exactly as returned by runtime/pprof + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RawPprofResponse) Reset() { + *x = RawPprofResponse{} + mi := &file_supernode_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RawPprofResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RawPprofResponse) ProtoMessage() {} + +func (x *RawPprofResponse) ProtoReflect() protoreflect.Message { + mi := &file_supernode_service_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RawPprofResponse.ProtoReflect.Descriptor instead. +func (*RawPprofResponse) Descriptor() ([]byte, []int) { + return file_supernode_service_proto_rawDescGZIP(), []int{5} +} + +func (x *RawPprofResponse) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +var File_supernode_service_proto protoreflect.FileDescriptor + +const file_supernode_service_proto_rawDesc = "" + + "\n" + + "\x17supernode/service.proto\x12\tsupernode\x1a\x16supernode/status.proto\x1a\x1cgoogle/api/annotations.proto\"\x15\n" + + "\x13ListServicesRequest\"`\n" + + "\x14ListServicesResponse\x122\n" + + "\bservices\x18\x01 \x03(\v2\x16.supernode.ServiceInfoR\bservices\x12\x14\n" + + "\x05count\x18\x02 \x01(\x05R\x05count\";\n" + + "\vServiceInfo\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" + + "\amethods\x18\x02 \x03(\tR\amethods\"'\n" + + "\x0fRawPprofRequest\x12\x14\n" + + "\x05debug\x18\x01 \x01(\x05R\x05debug\".\n" + + "\x12RawPprofCpuRequest\x12\x18\n" + + "\aseconds\x18\x01 \x01(\x05R\aseconds\"&\n" + + "\x10RawPprofResponse\x12\x12\n" + + "\x04data\x18\x01 \x01(\fR\x04data2\xec\v\n" + + "\x10SupernodeService\x12X\n" + + "\tGetStatus\x12\x18.supernode.StatusRequest\x1a\x19.supernode.StatusResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/status\x12i\n" + + "\fListServices\x12\x1e.supernode.ListServicesRequest\x1a\x1f.supernode.ListServicesResponse\"\x18\x82\xd3\xe4\x93\x02\x12\x12\x10/api/v1/services\x12g\n" + + "\vGetRawPprof\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"\x1f\x82\xd3\xe4\x93\x02\x19\x12\x17/api/v1/debug/raw/pprof\x12p\n" + + "\x0fGetRawPprofHeap\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"$\x82\xd3\xe4\x93\x02\x1e\x12\x1c/api/v1/debug/raw/pprof/heap\x12z\n" + + "\x14GetRawPprofGoroutine\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\")\x82\xd3\xe4\x93\x02#\x12!/api/v1/debug/raw/pprof/goroutine\x12t\n" + + "\x11GetRawPprofAllocs\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"&\x82\xd3\xe4\x93\x02 \x12\x1e/api/v1/debug/raw/pprof/allocs\x12r\n" + + "\x10GetRawPprofBlock\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"%\x82\xd3\xe4\x93\x02\x1f\x12\x1d/api/v1/debug/raw/pprof/block\x12r\n" + + "\x10GetRawPprofMutex\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"%\x82\xd3\xe4\x93\x02\x1f\x12\x1d/api/v1/debug/raw/pprof/mutex\x12\x80\x01\n" + + "\x17GetRawPprofThreadcreate\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\",\x82\xd3\xe4\x93\x02&\x12$/api/v1/debug/raw/pprof/threadcreate\x12y\n" + + "\x12GetRawPprofProfile\x12\x1d.supernode.RawPprofCpuRequest\x1a\x1b.supernode.RawPprofResponse\"'\x82\xd3\xe4\x93\x02!\x12\x1f/api/v1/debug/raw/pprof/profile\x12v\n" + + "\x12GetRawPprofCmdline\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"'\x82\xd3\xe4\x93\x02!\x12\x1f/api/v1/debug/raw/pprof/cmdline\x12t\n" + + "\x11GetRawPprofSymbol\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"&\x82\xd3\xe4\x93\x02 \x12\x1e/api/v1/debug/raw/pprof/symbol\x12r\n" + + "\x10GetRawPprofTrace\x12\x1a.supernode.RawPprofRequest\x1a\x1b.supernode.RawPprofResponse\"%\x82\xd3\xe4\x93\x02\x1f\x12\x1d/api/v1/debug/raw/pprof/traceB6Z4github.com/LumeraProtocol/supernode/v2/gen/supernodeb\x06proto3" + +var ( + file_supernode_service_proto_rawDescOnce sync.Once + file_supernode_service_proto_rawDescData []byte +) + +func file_supernode_service_proto_rawDescGZIP() []byte { + file_supernode_service_proto_rawDescOnce.Do(func() { + file_supernode_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_supernode_service_proto_rawDesc), len(file_supernode_service_proto_rawDesc))) + }) + return file_supernode_service_proto_rawDescData +} + +var file_supernode_service_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_supernode_service_proto_goTypes = []any{ + (*ListServicesRequest)(nil), // 0: supernode.ListServicesRequest + (*ListServicesResponse)(nil), // 1: supernode.ListServicesResponse + (*ServiceInfo)(nil), // 2: supernode.ServiceInfo + (*RawPprofRequest)(nil), // 3: supernode.RawPprofRequest + (*RawPprofCpuRequest)(nil), // 4: supernode.RawPprofCpuRequest + (*RawPprofResponse)(nil), // 5: supernode.RawPprofResponse + (*StatusRequest)(nil), // 6: supernode.StatusRequest + (*StatusResponse)(nil), // 7: supernode.StatusResponse +} +var file_supernode_service_proto_depIdxs = []int32{ + 2, // 0: supernode.ListServicesResponse.services:type_name -> supernode.ServiceInfo + 6, // 1: supernode.SupernodeService.GetStatus:input_type -> supernode.StatusRequest + 0, // 2: supernode.SupernodeService.ListServices:input_type -> supernode.ListServicesRequest + 3, // 3: supernode.SupernodeService.GetRawPprof:input_type -> supernode.RawPprofRequest + 3, // 4: supernode.SupernodeService.GetRawPprofHeap:input_type -> supernode.RawPprofRequest + 3, // 5: supernode.SupernodeService.GetRawPprofGoroutine:input_type -> supernode.RawPprofRequest + 3, // 6: supernode.SupernodeService.GetRawPprofAllocs:input_type -> supernode.RawPprofRequest + 3, // 7: supernode.SupernodeService.GetRawPprofBlock:input_type -> supernode.RawPprofRequest + 3, // 8: supernode.SupernodeService.GetRawPprofMutex:input_type -> supernode.RawPprofRequest + 3, // 9: supernode.SupernodeService.GetRawPprofThreadcreate:input_type -> supernode.RawPprofRequest + 4, // 10: supernode.SupernodeService.GetRawPprofProfile:input_type -> supernode.RawPprofCpuRequest + 3, // 11: supernode.SupernodeService.GetRawPprofCmdline:input_type -> supernode.RawPprofRequest + 3, // 12: supernode.SupernodeService.GetRawPprofSymbol:input_type -> supernode.RawPprofRequest + 3, // 13: supernode.SupernodeService.GetRawPprofTrace:input_type -> supernode.RawPprofRequest + 7, // 14: supernode.SupernodeService.GetStatus:output_type -> supernode.StatusResponse + 1, // 15: supernode.SupernodeService.ListServices:output_type -> supernode.ListServicesResponse + 5, // 16: supernode.SupernodeService.GetRawPprof:output_type -> supernode.RawPprofResponse + 5, // 17: supernode.SupernodeService.GetRawPprofHeap:output_type -> supernode.RawPprofResponse + 5, // 18: supernode.SupernodeService.GetRawPprofGoroutine:output_type -> supernode.RawPprofResponse + 5, // 19: supernode.SupernodeService.GetRawPprofAllocs:output_type -> supernode.RawPprofResponse + 5, // 20: supernode.SupernodeService.GetRawPprofBlock:output_type -> supernode.RawPprofResponse + 5, // 21: supernode.SupernodeService.GetRawPprofMutex:output_type -> supernode.RawPprofResponse + 5, // 22: supernode.SupernodeService.GetRawPprofThreadcreate:output_type -> supernode.RawPprofResponse + 5, // 23: supernode.SupernodeService.GetRawPprofProfile:output_type -> supernode.RawPprofResponse + 5, // 24: supernode.SupernodeService.GetRawPprofCmdline:output_type -> supernode.RawPprofResponse + 5, // 25: supernode.SupernodeService.GetRawPprofSymbol:output_type -> supernode.RawPprofResponse + 5, // 26: supernode.SupernodeService.GetRawPprofTrace:output_type -> supernode.RawPprofResponse + 14, // [14:27] is the sub-list for method output_type + 1, // [1:14] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_supernode_service_proto_init() } +func file_supernode_service_proto_init() { + if File_supernode_service_proto != nil { + return + } + file_supernode_status_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_supernode_service_proto_rawDesc), len(file_supernode_service_proto_rawDesc)), + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_supernode_service_proto_goTypes, + DependencyIndexes: file_supernode_service_proto_depIdxs, + MessageInfos: file_supernode_service_proto_msgTypes, + }.Build() + File_supernode_service_proto = out.File + file_supernode_service_proto_goTypes = nil + file_supernode_service_proto_depIdxs = nil +} diff --git a/gen/supernode/service.pb.gw.go b/gen/supernode/service.pb.gw.go new file mode 100644 index 00000000..93983b0f --- /dev/null +++ b/gen/supernode/service.pb.gw.go @@ -0,0 +1,1199 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: supernode/service.proto + +/* +Package supernode is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package supernode + +import ( + "context" + "io" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = metadata.Join + +var ( + filter_SupernodeService_GetStatus_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetStatus_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq StatusRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetStatus_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetStatus_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq StatusRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetStatus_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetStatus(ctx, &protoReq) + return msg, metadata, err + +} + +func request_SupernodeService_ListServices_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListServicesRequest + var metadata runtime.ServerMetadata + + msg, err := client.ListServices(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_ListServices_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListServicesRequest + var metadata runtime.ServerMetadata + + msg, err := server.ListServices(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetRawPprof_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetRawPprof_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprof_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetRawPprof(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetRawPprof_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprof_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetRawPprof(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetRawPprofHeap_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetRawPprofHeap_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofHeap_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetRawPprofHeap(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetRawPprofHeap_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofHeap_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetRawPprofHeap(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetRawPprofGoroutine_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetRawPprofGoroutine_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofGoroutine_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetRawPprofGoroutine(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetRawPprofGoroutine_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofGoroutine_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetRawPprofGoroutine(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetRawPprofAllocs_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetRawPprofAllocs_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofAllocs_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetRawPprofAllocs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetRawPprofAllocs_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofAllocs_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetRawPprofAllocs(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetRawPprofBlock_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetRawPprofBlock_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofBlock_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetRawPprofBlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetRawPprofBlock_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofBlock_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetRawPprofBlock(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetRawPprofMutex_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetRawPprofMutex_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofMutex_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetRawPprofMutex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetRawPprofMutex_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofMutex_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetRawPprofMutex(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetRawPprofThreadcreate_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetRawPprofThreadcreate_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofThreadcreate_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetRawPprofThreadcreate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetRawPprofThreadcreate_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofThreadcreate_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetRawPprofThreadcreate(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetRawPprofProfile_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetRawPprofProfile_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofCpuRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofProfile_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetRawPprofProfile(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetRawPprofProfile_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofCpuRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofProfile_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetRawPprofProfile(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetRawPprofCmdline_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetRawPprofCmdline_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofCmdline_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetRawPprofCmdline(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetRawPprofCmdline_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofCmdline_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetRawPprofCmdline(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetRawPprofSymbol_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetRawPprofSymbol_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofSymbol_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetRawPprofSymbol(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetRawPprofSymbol_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofSymbol_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetRawPprofSymbol(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_SupernodeService_GetRawPprofTrace_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_SupernodeService_GetRawPprofTrace_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofTrace_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetRawPprofTrace(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_SupernodeService_GetRawPprofTrace_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RawPprofRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofTrace_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetRawPprofTrace(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterSupernodeServiceHandlerServer registers the http handlers for service SupernodeService to "mux". +// UnaryRPC :call SupernodeServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterSupernodeServiceHandlerFromEndpoint instead. +func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server SupernodeServiceServer) error { + + mux.Handle("GET", pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetStatus", runtime.WithHTTPPathPattern("/api/v1/status")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetStatus_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/ListServices", runtime.WithHTTPPathPattern("/api/v1/services")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_ListServices_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_ListServices_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprof_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprof", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetRawPprof_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprof_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofHeap", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/heap")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetRawPprofHeap_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofHeap_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofGoroutine", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/goroutine")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetRawPprofGoroutine_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofGoroutine_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofAllocs", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/allocs")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetRawPprofAllocs_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofAllocs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofBlock", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/block")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetRawPprofBlock_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofBlock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofMutex", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/mutex")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetRawPprofMutex_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofMutex_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofThreadcreate", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/threadcreate")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetRawPprofThreadcreate_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofThreadcreate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofProfile", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/profile")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetRawPprofProfile_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofProfile_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofCmdline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofCmdline", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/cmdline")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetRawPprofCmdline_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofCmdline_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofSymbol_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofSymbol", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/symbol")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetRawPprofSymbol_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofSymbol_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofTrace", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/trace")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_SupernodeService_GetRawPprofTrace_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofTrace_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterSupernodeServiceHandlerFromEndpoint is same as RegisterSupernodeServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterSupernodeServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.NewClient(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterSupernodeServiceHandler(ctx, mux, conn) +} + +// RegisterSupernodeServiceHandler registers the http handlers for service SupernodeService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterSupernodeServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterSupernodeServiceHandlerClient(ctx, mux, NewSupernodeServiceClient(conn)) +} + +// RegisterSupernodeServiceHandlerClient registers the http handlers for service SupernodeService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "SupernodeServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "SupernodeServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "SupernodeServiceClient" to call the correct interceptors. +func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client SupernodeServiceClient) error { + + mux.Handle("GET", pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetStatus", runtime.WithHTTPPathPattern("/api/v1/status")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetStatus_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/ListServices", runtime.WithHTTPPathPattern("/api/v1/services")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_ListServices_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_ListServices_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprof_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprof", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetRawPprof_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprof_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofHeap", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/heap")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetRawPprofHeap_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofHeap_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofGoroutine", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/goroutine")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetRawPprofGoroutine_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofGoroutine_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofAllocs", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/allocs")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetRawPprofAllocs_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofAllocs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofBlock", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/block")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetRawPprofBlock_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofBlock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofMutex", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/mutex")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetRawPprofMutex_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofMutex_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofThreadcreate", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/threadcreate")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetRawPprofThreadcreate_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofThreadcreate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofProfile", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/profile")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetRawPprofProfile_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofProfile_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofCmdline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofCmdline", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/cmdline")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetRawPprofCmdline_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofCmdline_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofSymbol_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofSymbol", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/symbol")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetRawPprofSymbol_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofSymbol_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_SupernodeService_GetRawPprofTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofTrace", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/trace")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_SupernodeService_GetRawPprofTrace_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_SupernodeService_GetRawPprofTrace_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_SupernodeService_GetStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "status"}, "")) + + pattern_SupernodeService_ListServices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "services"}, "")) + + pattern_SupernodeService_GetRawPprof_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "raw", "pprof"}, "")) + + pattern_SupernodeService_GetRawPprofHeap_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "heap"}, "")) + + pattern_SupernodeService_GetRawPprofGoroutine_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "goroutine"}, "")) + + pattern_SupernodeService_GetRawPprofAllocs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "allocs"}, "")) + + pattern_SupernodeService_GetRawPprofBlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "block"}, "")) + + pattern_SupernodeService_GetRawPprofMutex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "mutex"}, "")) + + pattern_SupernodeService_GetRawPprofThreadcreate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "threadcreate"}, "")) + + pattern_SupernodeService_GetRawPprofProfile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "profile"}, "")) + + pattern_SupernodeService_GetRawPprofCmdline_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "cmdline"}, "")) + + pattern_SupernodeService_GetRawPprofSymbol_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "symbol"}, "")) + + pattern_SupernodeService_GetRawPprofTrace_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "trace"}, "")) +) + +var ( + forward_SupernodeService_GetStatus_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_ListServices_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprof_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofHeap_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofGoroutine_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofAllocs_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofBlock_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofMutex_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofThreadcreate_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofProfile_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofCmdline_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofSymbol_0 = runtime.ForwardResponseMessage + + forward_SupernodeService_GetRawPprofTrace_0 = runtime.ForwardResponseMessage +) diff --git a/gen/supernode/supernode.swagger.json b/gen/supernode/service.swagger.json similarity index 57% rename from gen/supernode/supernode.swagger.json rename to gen/supernode/service.swagger.json index 00a47bb8..523499b8 100644 --- a/gen/supernode/supernode.swagger.json +++ b/gen/supernode/service.swagger.json @@ -1,7 +1,7 @@ { "swagger": "2.0", "info": { - "title": "supernode/supernode.proto", + "title": "supernode/service.proto", "version": "version not set" }, "tags": [ @@ -16,6 +16,359 @@ "application/json" ], "paths": { + "/api/v1/debug/raw/pprof": { + "get": { + "summary": "Raw pprof endpoints - return standard pprof output directly", + "operationId": "SupernodeService_GetRawPprof", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/allocs": { + "get": { + "operationId": "SupernodeService_GetRawPprofAllocs", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/block": { + "get": { + "operationId": "SupernodeService_GetRawPprofBlock", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/cmdline": { + "get": { + "operationId": "SupernodeService_GetRawPprofCmdline", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/goroutine": { + "get": { + "operationId": "SupernodeService_GetRawPprofGoroutine", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/heap": { + "get": { + "operationId": "SupernodeService_GetRawPprofHeap", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/mutex": { + "get": { + "operationId": "SupernodeService_GetRawPprofMutex", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/profile": { + "get": { + "operationId": "SupernodeService_GetRawPprofProfile", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "seconds", + "description": "CPU profile duration in seconds (default 30)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/symbol": { + "get": { + "operationId": "SupernodeService_GetRawPprofSymbol", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/threadcreate": { + "get": { + "operationId": "SupernodeService_GetRawPprofThreadcreate", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/trace": { + "get": { + "operationId": "SupernodeService_GetRawPprofTrace", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, "/api/v1/services": { "get": { "operationId": "SupernodeService_ListServices", @@ -249,92 +602,6 @@ }, "title": "Per-handler counters from network layer" }, - "P2PMetricsRecentBatchRetrieveEntry": { - "type": "object", - "properties": { - "timeUnix": { - "type": "string", - "format": "int64" - }, - "senderId": { - "type": "string" - }, - "senderIp": { - "type": "string" - }, - "requested": { - "type": "integer", - "format": "int32" - }, - "found": { - "type": "integer", - "format": "int32" - }, - "durationMs": { - "type": "string", - "format": "int64" - }, - "error": { - "type": "string" - } - }, - "title": "Last handled BatchGetValues requests (most recent first)" - }, - "P2PMetricsRecentBatchRetrieveList": { - "type": "object", - "properties": { - "entries": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/P2PMetricsRecentBatchRetrieveEntry" - } - } - } - }, - "P2PMetricsRecentBatchStoreEntry": { - "type": "object", - "properties": { - "timeUnix": { - "type": "string", - "format": "int64" - }, - "senderId": { - "type": "string" - }, - "senderIp": { - "type": "string" - }, - "keys": { - "type": "integer", - "format": "int32" - }, - "durationMs": { - "type": "string", - "format": "int64" - }, - "ok": { - "type": "boolean" - }, - "error": { - "type": "string" - } - }, - "title": "Last handled BatchStoreData requests (most recent first)" - }, - "P2PMetricsRecentBatchStoreList": { - "type": "object", - "properties": { - "entries": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/P2PMetricsRecentBatchStoreEntry" - } - } - }, - "title": "Per-IP buckets: last 10 per sender IP" - }, "ResourcesCPU": { "type": "object", "properties": { @@ -450,32 +717,6 @@ }, "disk": { "$ref": "#/definitions/P2PMetricsDiskStatus" - }, - "recentBatchStore": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/P2PMetricsRecentBatchStoreEntry" - } - }, - "recentBatchRetrieve": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/P2PMetricsRecentBatchRetrieveEntry" - } - }, - "recentBatchStoreByIp": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/P2PMetricsRecentBatchStoreList" - } - }, - "recentBatchRetrieveByIp": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/P2PMetricsRecentBatchRetrieveList" - } } }, "title": "P2P metrics and diagnostics (additive field)" @@ -566,6 +807,16 @@ } } }, + "supernodeRawPprofResponse": { + "type": "object", + "properties": { + "data": { + "type": "string", + "format": "byte", + "title": "Raw pprof data exactly as returned by runtime/pprof" + } + } + }, "supernodeServiceInfo": { "type": "object", "properties": { diff --git a/gen/supernode/service_grpc.pb.go b/gen/supernode/service_grpc.pb.go new file mode 100644 index 00000000..42857bf2 --- /dev/null +++ b/gen/supernode/service_grpc.pb.go @@ -0,0 +1,583 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v3.21.12 +// source: supernode/service.proto + +package supernode + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + SupernodeService_GetStatus_FullMethodName = "/supernode.SupernodeService/GetStatus" + SupernodeService_ListServices_FullMethodName = "/supernode.SupernodeService/ListServices" + SupernodeService_GetRawPprof_FullMethodName = "/supernode.SupernodeService/GetRawPprof" + SupernodeService_GetRawPprofHeap_FullMethodName = "/supernode.SupernodeService/GetRawPprofHeap" + SupernodeService_GetRawPprofGoroutine_FullMethodName = "/supernode.SupernodeService/GetRawPprofGoroutine" + SupernodeService_GetRawPprofAllocs_FullMethodName = "/supernode.SupernodeService/GetRawPprofAllocs" + SupernodeService_GetRawPprofBlock_FullMethodName = "/supernode.SupernodeService/GetRawPprofBlock" + SupernodeService_GetRawPprofMutex_FullMethodName = "/supernode.SupernodeService/GetRawPprofMutex" + SupernodeService_GetRawPprofThreadcreate_FullMethodName = "/supernode.SupernodeService/GetRawPprofThreadcreate" + SupernodeService_GetRawPprofProfile_FullMethodName = "/supernode.SupernodeService/GetRawPprofProfile" + SupernodeService_GetRawPprofCmdline_FullMethodName = "/supernode.SupernodeService/GetRawPprofCmdline" + SupernodeService_GetRawPprofSymbol_FullMethodName = "/supernode.SupernodeService/GetRawPprofSymbol" + SupernodeService_GetRawPprofTrace_FullMethodName = "/supernode.SupernodeService/GetRawPprofTrace" +) + +// SupernodeServiceClient is the client API for SupernodeService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// SupernodeService provides status information for all services +type SupernodeServiceClient interface { + GetStatus(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) + ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) + // Raw pprof endpoints - return standard pprof output directly + GetRawPprof(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofHeap(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofGoroutine(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofAllocs(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofBlock(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofMutex(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofThreadcreate(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofProfile(ctx context.Context, in *RawPprofCpuRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofCmdline(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofSymbol(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) + GetRawPprofTrace(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) +} + +type supernodeServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewSupernodeServiceClient(cc grpc.ClientConnInterface) SupernodeServiceClient { + return &supernodeServiceClient{cc} +} + +func (c *supernodeServiceClient) GetStatus(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(StatusResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetStatus_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ListServicesResponse) + err := c.cc.Invoke(ctx, SupernodeService_ListServices_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprof(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprof_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofHeap(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofHeap_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofGoroutine(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofGoroutine_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofAllocs(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofAllocs_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofBlock(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofBlock_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofMutex(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofMutex_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofThreadcreate(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofThreadcreate_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofProfile(ctx context.Context, in *RawPprofCpuRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofProfile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofCmdline(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofCmdline_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofSymbol(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofSymbol_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *supernodeServiceClient) GetRawPprofTrace(ctx context.Context, in *RawPprofRequest, opts ...grpc.CallOption) (*RawPprofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawPprofResponse) + err := c.cc.Invoke(ctx, SupernodeService_GetRawPprofTrace_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SupernodeServiceServer is the server API for SupernodeService service. +// All implementations must embed UnimplementedSupernodeServiceServer +// for forward compatibility. +// +// SupernodeService provides status information for all services +type SupernodeServiceServer interface { + GetStatus(context.Context, *StatusRequest) (*StatusResponse, error) + ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) + // Raw pprof endpoints - return standard pprof output directly + GetRawPprof(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofHeap(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofGoroutine(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofAllocs(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofBlock(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofMutex(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofThreadcreate(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofProfile(context.Context, *RawPprofCpuRequest) (*RawPprofResponse, error) + GetRawPprofCmdline(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofSymbol(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + GetRawPprofTrace(context.Context, *RawPprofRequest) (*RawPprofResponse, error) + mustEmbedUnimplementedSupernodeServiceServer() +} + +// UnimplementedSupernodeServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedSupernodeServiceServer struct{} + +func (UnimplementedSupernodeServiceServer) GetStatus(context.Context, *StatusRequest) (*StatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetStatus not implemented") +} +func (UnimplementedSupernodeServiceServer) ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListServices not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprof(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprof not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofHeap(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofHeap not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofGoroutine(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofGoroutine not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofAllocs(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofAllocs not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofBlock(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofBlock not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofMutex(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofMutex not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofThreadcreate(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofThreadcreate not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofProfile(context.Context, *RawPprofCpuRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofProfile not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofCmdline(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofCmdline not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofSymbol(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofSymbol not implemented") +} +func (UnimplementedSupernodeServiceServer) GetRawPprofTrace(context.Context, *RawPprofRequest) (*RawPprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRawPprofTrace not implemented") +} +func (UnimplementedSupernodeServiceServer) mustEmbedUnimplementedSupernodeServiceServer() {} +func (UnimplementedSupernodeServiceServer) testEmbeddedByValue() {} + +// UnsafeSupernodeServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to SupernodeServiceServer will +// result in compilation errors. +type UnsafeSupernodeServiceServer interface { + mustEmbedUnimplementedSupernodeServiceServer() +} + +func RegisterSupernodeServiceServer(s grpc.ServiceRegistrar, srv SupernodeServiceServer) { + // If the following call pancis, it indicates UnimplementedSupernodeServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&SupernodeService_ServiceDesc, srv) +} + +func _SupernodeService_GetStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetStatus_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetStatus(ctx, req.(*StatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_ListServices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServicesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).ListServices(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_ListServices_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).ListServices(ctx, req.(*ListServicesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprof(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprof_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprof(ctx, req.(*RawPprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofHeap_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofHeap(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofHeap_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofHeap(ctx, req.(*RawPprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofGoroutine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofGoroutine(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofGoroutine_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofGoroutine(ctx, req.(*RawPprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofAllocs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofAllocs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofAllocs_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofAllocs(ctx, req.(*RawPprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofBlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofBlock_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofBlock(ctx, req.(*RawPprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofMutex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofMutex(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofMutex_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofMutex(ctx, req.(*RawPprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofThreadcreate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofThreadcreate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofThreadcreate_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofThreadcreate(ctx, req.(*RawPprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofCpuRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofProfile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofProfile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofProfile(ctx, req.(*RawPprofCpuRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofCmdline_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofCmdline(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofCmdline_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofCmdline(ctx, req.(*RawPprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofSymbol_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofSymbol(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofSymbol_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofSymbol(ctx, req.(*RawPprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SupernodeService_GetRawPprofTrace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawPprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SupernodeServiceServer).GetRawPprofTrace(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SupernodeService_GetRawPprofTrace_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SupernodeServiceServer).GetRawPprofTrace(ctx, req.(*RawPprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// SupernodeService_ServiceDesc is the grpc.ServiceDesc for SupernodeService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var SupernodeService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "supernode.SupernodeService", + HandlerType: (*SupernodeServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetStatus", + Handler: _SupernodeService_GetStatus_Handler, + }, + { + MethodName: "ListServices", + Handler: _SupernodeService_ListServices_Handler, + }, + { + MethodName: "GetRawPprof", + Handler: _SupernodeService_GetRawPprof_Handler, + }, + { + MethodName: "GetRawPprofHeap", + Handler: _SupernodeService_GetRawPprofHeap_Handler, + }, + { + MethodName: "GetRawPprofGoroutine", + Handler: _SupernodeService_GetRawPprofGoroutine_Handler, + }, + { + MethodName: "GetRawPprofAllocs", + Handler: _SupernodeService_GetRawPprofAllocs_Handler, + }, + { + MethodName: "GetRawPprofBlock", + Handler: _SupernodeService_GetRawPprofBlock_Handler, + }, + { + MethodName: "GetRawPprofMutex", + Handler: _SupernodeService_GetRawPprofMutex_Handler, + }, + { + MethodName: "GetRawPprofThreadcreate", + Handler: _SupernodeService_GetRawPprofThreadcreate_Handler, + }, + { + MethodName: "GetRawPprofProfile", + Handler: _SupernodeService_GetRawPprofProfile_Handler, + }, + { + MethodName: "GetRawPprofCmdline", + Handler: _SupernodeService_GetRawPprofCmdline_Handler, + }, + { + MethodName: "GetRawPprofSymbol", + Handler: _SupernodeService_GetRawPprofSymbol_Handler, + }, + { + MethodName: "GetRawPprofTrace", + Handler: _SupernodeService_GetRawPprofTrace_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "supernode/service.proto", +} diff --git a/gen/supernode/status.pb.go b/gen/supernode/status.pb.go new file mode 100644 index 00000000..74e0d6d7 --- /dev/null +++ b/gen/supernode/status.pb.go @@ -0,0 +1,1321 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.6 +// protoc v3.21.12 +// source: supernode/status.proto + +package supernode + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// StatusRequest controls optional metrics in the status response +type StatusRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Optional: include detailed P2P metrics in the response + // Maps to query param via grpc-gateway: /api/v1/status?include_p2p_metrics=true + IncludeP2PMetrics bool `protobuf:"varint,1,opt,name=include_p2p_metrics,json=includeP2pMetrics,proto3" json:"include_p2p_metrics,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusRequest) Reset() { + *x = StatusRequest{} + mi := &file_supernode_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusRequest) ProtoMessage() {} + +func (x *StatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusRequest.ProtoReflect.Descriptor instead. +func (*StatusRequest) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{0} +} + +func (x *StatusRequest) GetIncludeP2PMetrics() bool { + if x != nil { + return x.IncludeP2PMetrics + } + return false +} + +// The StatusResponse represents system status with clear organization +type StatusResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` // Supernode version + UptimeSeconds uint64 `protobuf:"varint,2,opt,name=uptime_seconds,json=uptimeSeconds,proto3" json:"uptime_seconds,omitempty"` // Uptime in seconds + Resources *StatusResponse_Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` + RunningTasks []*StatusResponse_ServiceTasks `protobuf:"bytes,4,rep,name=running_tasks,json=runningTasks,proto3" json:"running_tasks,omitempty"` // Services with currently running tasks + RegisteredServices []string `protobuf:"bytes,5,rep,name=registered_services,json=registeredServices,proto3" json:"registered_services,omitempty"` // All registered/available services + Network *StatusResponse_Network `protobuf:"bytes,6,opt,name=network,proto3" json:"network,omitempty"` // P2P network information + Rank int32 `protobuf:"varint,7,opt,name=rank,proto3" json:"rank,omitempty"` // Rank in the top supernodes list (0 if not in top list) + IpAddress string `protobuf:"bytes,8,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` // Supernode IP address with port (e.g., "192.168.1.1:4445") + P2PMetrics *StatusResponse_P2PMetrics `protobuf:"bytes,9,opt,name=p2p_metrics,json=p2pMetrics,proto3" json:"p2p_metrics,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusResponse) Reset() { + *x = StatusResponse{} + mi := &file_supernode_status_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse) ProtoMessage() {} + +func (x *StatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse.ProtoReflect.Descriptor instead. +func (*StatusResponse) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1} +} + +func (x *StatusResponse) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *StatusResponse) GetUptimeSeconds() uint64 { + if x != nil { + return x.UptimeSeconds + } + return 0 +} + +func (x *StatusResponse) GetResources() *StatusResponse_Resources { + if x != nil { + return x.Resources + } + return nil +} + +func (x *StatusResponse) GetRunningTasks() []*StatusResponse_ServiceTasks { + if x != nil { + return x.RunningTasks + } + return nil +} + +func (x *StatusResponse) GetRegisteredServices() []string { + if x != nil { + return x.RegisteredServices + } + return nil +} + +func (x *StatusResponse) GetNetwork() *StatusResponse_Network { + if x != nil { + return x.Network + } + return nil +} + +func (x *StatusResponse) GetRank() int32 { + if x != nil { + return x.Rank + } + return 0 +} + +func (x *StatusResponse) GetIpAddress() string { + if x != nil { + return x.IpAddress + } + return "" +} + +func (x *StatusResponse) GetP2PMetrics() *StatusResponse_P2PMetrics { + if x != nil { + return x.P2PMetrics + } + return nil +} + +// System resource information +type StatusResponse_Resources struct { + state protoimpl.MessageState `protogen:"open.v1"` + Cpu *StatusResponse_Resources_CPU `protobuf:"bytes,1,opt,name=cpu,proto3" json:"cpu,omitempty"` + Memory *StatusResponse_Resources_Memory `protobuf:"bytes,2,opt,name=memory,proto3" json:"memory,omitempty"` + StorageVolumes []*StatusResponse_Resources_Storage `protobuf:"bytes,3,rep,name=storage_volumes,json=storageVolumes,proto3" json:"storage_volumes,omitempty"` + HardwareSummary string `protobuf:"bytes,4,opt,name=hardware_summary,json=hardwareSummary,proto3" json:"hardware_summary,omitempty"` // Formatted hardware summary (e.g., "8 cores / 32GB RAM") + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusResponse_Resources) Reset() { + *x = StatusResponse_Resources{} + mi := &file_supernode_status_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_Resources) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_Resources) ProtoMessage() {} + +func (x *StatusResponse_Resources) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_Resources.ProtoReflect.Descriptor instead. +func (*StatusResponse_Resources) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *StatusResponse_Resources) GetCpu() *StatusResponse_Resources_CPU { + if x != nil { + return x.Cpu + } + return nil +} + +func (x *StatusResponse_Resources) GetMemory() *StatusResponse_Resources_Memory { + if x != nil { + return x.Memory + } + return nil +} + +func (x *StatusResponse_Resources) GetStorageVolumes() []*StatusResponse_Resources_Storage { + if x != nil { + return x.StorageVolumes + } + return nil +} + +func (x *StatusResponse_Resources) GetHardwareSummary() string { + if x != nil { + return x.HardwareSummary + } + return "" +} + +// ServiceTasks contains task information for a specific service +type StatusResponse_ServiceTasks struct { + state protoimpl.MessageState `protogen:"open.v1"` + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + TaskIds []string `protobuf:"bytes,2,rep,name=task_ids,json=taskIds,proto3" json:"task_ids,omitempty"` + TaskCount int32 `protobuf:"varint,3,opt,name=task_count,json=taskCount,proto3" json:"task_count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusResponse_ServiceTasks) Reset() { + *x = StatusResponse_ServiceTasks{} + mi := &file_supernode_status_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_ServiceTasks) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_ServiceTasks) ProtoMessage() {} + +func (x *StatusResponse_ServiceTasks) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_ServiceTasks.ProtoReflect.Descriptor instead. +func (*StatusResponse_ServiceTasks) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *StatusResponse_ServiceTasks) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +func (x *StatusResponse_ServiceTasks) GetTaskIds() []string { + if x != nil { + return x.TaskIds + } + return nil +} + +func (x *StatusResponse_ServiceTasks) GetTaskCount() int32 { + if x != nil { + return x.TaskCount + } + return 0 +} + +// Network information +type StatusResponse_Network struct { + state protoimpl.MessageState `protogen:"open.v1"` + PeersCount int32 `protobuf:"varint,1,opt,name=peers_count,json=peersCount,proto3" json:"peers_count,omitempty"` // Number of connected peers in P2P network + PeerAddresses []string `protobuf:"bytes,2,rep,name=peer_addresses,json=peerAddresses,proto3" json:"peer_addresses,omitempty"` // List of connected peer addresses (optional, may be empty for privacy) + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusResponse_Network) Reset() { + *x = StatusResponse_Network{} + mi := &file_supernode_status_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_Network) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_Network) ProtoMessage() {} + +func (x *StatusResponse_Network) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_Network.ProtoReflect.Descriptor instead. +func (*StatusResponse_Network) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *StatusResponse_Network) GetPeersCount() int32 { + if x != nil { + return x.PeersCount + } + return 0 +} + +func (x *StatusResponse_Network) GetPeerAddresses() []string { + if x != nil { + return x.PeerAddresses + } + return nil +} + +// P2P metrics and diagnostics (additive field) +type StatusResponse_P2PMetrics struct { + state protoimpl.MessageState `protogen:"open.v1"` + DhtMetrics *StatusResponse_P2PMetrics_DhtMetrics `protobuf:"bytes,1,opt,name=dht_metrics,json=dhtMetrics,proto3" json:"dht_metrics,omitempty"` + NetworkHandleMetrics map[string]*StatusResponse_P2PMetrics_HandleCounters `protobuf:"bytes,2,rep,name=network_handle_metrics,json=networkHandleMetrics,proto3" json:"network_handle_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ConnPoolMetrics map[string]int64 `protobuf:"bytes,3,rep,name=conn_pool_metrics,json=connPoolMetrics,proto3" json:"conn_pool_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + BanList []*StatusResponse_P2PMetrics_BanEntry `protobuf:"bytes,4,rep,name=ban_list,json=banList,proto3" json:"ban_list,omitempty"` + Database *StatusResponse_P2PMetrics_DatabaseStats `protobuf:"bytes,5,opt,name=database,proto3" json:"database,omitempty"` + Disk *StatusResponse_P2PMetrics_DiskStatus `protobuf:"bytes,6,opt,name=disk,proto3" json:"disk,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusResponse_P2PMetrics) Reset() { + *x = StatusResponse_P2PMetrics{} + mi := &file_supernode_status_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3} +} + +func (x *StatusResponse_P2PMetrics) GetDhtMetrics() *StatusResponse_P2PMetrics_DhtMetrics { + if x != nil { + return x.DhtMetrics + } + return nil +} + +func (x *StatusResponse_P2PMetrics) GetNetworkHandleMetrics() map[string]*StatusResponse_P2PMetrics_HandleCounters { + if x != nil { + return x.NetworkHandleMetrics + } + return nil +} + +func (x *StatusResponse_P2PMetrics) GetConnPoolMetrics() map[string]int64 { + if x != nil { + return x.ConnPoolMetrics + } + return nil +} + +func (x *StatusResponse_P2PMetrics) GetBanList() []*StatusResponse_P2PMetrics_BanEntry { + if x != nil { + return x.BanList + } + return nil +} + +func (x *StatusResponse_P2PMetrics) GetDatabase() *StatusResponse_P2PMetrics_DatabaseStats { + if x != nil { + return x.Database + } + return nil +} + +func (x *StatusResponse_P2PMetrics) GetDisk() *StatusResponse_P2PMetrics_DiskStatus { + if x != nil { + return x.Disk + } + return nil +} + +type StatusResponse_Resources_CPU struct { + state protoimpl.MessageState `protogen:"open.v1"` + UsagePercent float64 `protobuf:"fixed64,1,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // CPU usage percentage (0-100) + Cores int32 `protobuf:"varint,2,opt,name=cores,proto3" json:"cores,omitempty"` // Number of CPU cores + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusResponse_Resources_CPU) Reset() { + *x = StatusResponse_Resources_CPU{} + mi := &file_supernode_status_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_Resources_CPU) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_Resources_CPU) ProtoMessage() {} + +func (x *StatusResponse_Resources_CPU) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_Resources_CPU.ProtoReflect.Descriptor instead. +func (*StatusResponse_Resources_CPU) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 0, 0} +} + +func (x *StatusResponse_Resources_CPU) GetUsagePercent() float64 { + if x != nil { + return x.UsagePercent + } + return 0 +} + +func (x *StatusResponse_Resources_CPU) GetCores() int32 { + if x != nil { + return x.Cores + } + return 0 +} + +type StatusResponse_Resources_Memory struct { + state protoimpl.MessageState `protogen:"open.v1"` + TotalGb float64 `protobuf:"fixed64,1,opt,name=total_gb,json=totalGb,proto3" json:"total_gb,omitempty"` // Total memory in GB + UsedGb float64 `protobuf:"fixed64,2,opt,name=used_gb,json=usedGb,proto3" json:"used_gb,omitempty"` // Used memory in GB + AvailableGb float64 `protobuf:"fixed64,3,opt,name=available_gb,json=availableGb,proto3" json:"available_gb,omitempty"` // Available memory in GB + UsagePercent float64 `protobuf:"fixed64,4,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Memory usage percentage (0-100) + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusResponse_Resources_Memory) Reset() { + *x = StatusResponse_Resources_Memory{} + mi := &file_supernode_status_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_Resources_Memory) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_Resources_Memory) ProtoMessage() {} + +func (x *StatusResponse_Resources_Memory) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_Resources_Memory.ProtoReflect.Descriptor instead. +func (*StatusResponse_Resources_Memory) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 0, 1} +} + +func (x *StatusResponse_Resources_Memory) GetTotalGb() float64 { + if x != nil { + return x.TotalGb + } + return 0 +} + +func (x *StatusResponse_Resources_Memory) GetUsedGb() float64 { + if x != nil { + return x.UsedGb + } + return 0 +} + +func (x *StatusResponse_Resources_Memory) GetAvailableGb() float64 { + if x != nil { + return x.AvailableGb + } + return 0 +} + +func (x *StatusResponse_Resources_Memory) GetUsagePercent() float64 { + if x != nil { + return x.UsagePercent + } + return 0 +} + +type StatusResponse_Resources_Storage struct { + state protoimpl.MessageState `protogen:"open.v1"` + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // Storage path being monitored + TotalBytes uint64 `protobuf:"varint,2,opt,name=total_bytes,json=totalBytes,proto3" json:"total_bytes,omitempty"` + UsedBytes uint64 `protobuf:"varint,3,opt,name=used_bytes,json=usedBytes,proto3" json:"used_bytes,omitempty"` + AvailableBytes uint64 `protobuf:"varint,4,opt,name=available_bytes,json=availableBytes,proto3" json:"available_bytes,omitempty"` + UsagePercent float64 `protobuf:"fixed64,5,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Storage usage percentage (0-100) + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusResponse_Resources_Storage) Reset() { + *x = StatusResponse_Resources_Storage{} + mi := &file_supernode_status_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_Resources_Storage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_Resources_Storage) ProtoMessage() {} + +func (x *StatusResponse_Resources_Storage) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_Resources_Storage.ProtoReflect.Descriptor instead. +func (*StatusResponse_Resources_Storage) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 0, 2} +} + +func (x *StatusResponse_Resources_Storage) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *StatusResponse_Resources_Storage) GetTotalBytes() uint64 { + if x != nil { + return x.TotalBytes + } + return 0 +} + +func (x *StatusResponse_Resources_Storage) GetUsedBytes() uint64 { + if x != nil { + return x.UsedBytes + } + return 0 +} + +func (x *StatusResponse_Resources_Storage) GetAvailableBytes() uint64 { + if x != nil { + return x.AvailableBytes + } + return 0 +} + +func (x *StatusResponse_Resources_Storage) GetUsagePercent() float64 { + if x != nil { + return x.UsagePercent + } + return 0 +} + +// Rolling DHT metrics snapshot +type StatusResponse_P2PMetrics_DhtMetrics struct { + state protoimpl.MessageState `protogen:"open.v1"` + StoreSuccessRecent []*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint `protobuf:"bytes,1,rep,name=store_success_recent,json=storeSuccessRecent,proto3" json:"store_success_recent,omitempty"` + BatchRetrieveRecent []*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint `protobuf:"bytes,2,rep,name=batch_retrieve_recent,json=batchRetrieveRecent,proto3" json:"batch_retrieve_recent,omitempty"` + HotPathBannedSkips int64 `protobuf:"varint,3,opt,name=hot_path_banned_skips,json=hotPathBannedSkips,proto3" json:"hot_path_banned_skips,omitempty"` // counter + HotPathBanIncrements int64 `protobuf:"varint,4,opt,name=hot_path_ban_increments,json=hotPathBanIncrements,proto3" json:"hot_path_ban_increments,omitempty"` // counter + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics) Reset() { + *x = StatusResponse_P2PMetrics_DhtMetrics{} + mi := &file_supernode_status_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_DhtMetrics) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_DhtMetrics) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_DhtMetrics) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 0} +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics) GetStoreSuccessRecent() []*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint { + if x != nil { + return x.StoreSuccessRecent + } + return nil +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics) GetBatchRetrieveRecent() []*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint { + if x != nil { + return x.BatchRetrieveRecent + } + return nil +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics) GetHotPathBannedSkips() int64 { + if x != nil { + return x.HotPathBannedSkips + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics) GetHotPathBanIncrements() int64 { + if x != nil { + return x.HotPathBanIncrements + } + return 0 +} + +// Per-handler counters from network layer +type StatusResponse_P2PMetrics_HandleCounters struct { + state protoimpl.MessageState `protogen:"open.v1"` + Total int64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` + Success int64 `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` + Failure int64 `protobuf:"varint,3,opt,name=failure,proto3" json:"failure,omitempty"` + Timeout int64 `protobuf:"varint,4,opt,name=timeout,proto3" json:"timeout,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusResponse_P2PMetrics_HandleCounters) Reset() { + *x = StatusResponse_P2PMetrics_HandleCounters{} + mi := &file_supernode_status_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_HandleCounters) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_HandleCounters) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_HandleCounters) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_HandleCounters.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_HandleCounters) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 1} +} + +func (x *StatusResponse_P2PMetrics_HandleCounters) GetTotal() int64 { + if x != nil { + return x.Total + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_HandleCounters) GetSuccess() int64 { + if x != nil { + return x.Success + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_HandleCounters) GetFailure() int64 { + if x != nil { + return x.Failure + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_HandleCounters) GetTimeout() int64 { + if x != nil { + return x.Timeout + } + return 0 +} + +// Ban list entry +type StatusResponse_P2PMetrics_BanEntry struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // printable ID + Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` // last seen IP + Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` // last seen port + Count int32 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` // failure count + CreatedAtUnix int64 `protobuf:"varint,5,opt,name=created_at_unix,json=createdAtUnix,proto3" json:"created_at_unix,omitempty"` // first ban time (unix seconds) + AgeSeconds int64 `protobuf:"varint,6,opt,name=age_seconds,json=ageSeconds,proto3" json:"age_seconds,omitempty"` // age in seconds + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusResponse_P2PMetrics_BanEntry) Reset() { + *x = StatusResponse_P2PMetrics_BanEntry{} + mi := &file_supernode_status_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_BanEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_BanEntry) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_BanEntry) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_BanEntry.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_BanEntry) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 2} +} + +func (x *StatusResponse_P2PMetrics_BanEntry) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *StatusResponse_P2PMetrics_BanEntry) GetIp() string { + if x != nil { + return x.Ip + } + return "" +} + +func (x *StatusResponse_P2PMetrics_BanEntry) GetPort() uint32 { + if x != nil { + return x.Port + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_BanEntry) GetCount() int32 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_BanEntry) GetCreatedAtUnix() int64 { + if x != nil { + return x.CreatedAtUnix + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_BanEntry) GetAgeSeconds() int64 { + if x != nil { + return x.AgeSeconds + } + return 0 +} + +// DB stats +type StatusResponse_P2PMetrics_DatabaseStats struct { + state protoimpl.MessageState `protogen:"open.v1"` + P2PDbSizeMb float64 `protobuf:"fixed64,1,opt,name=p2p_db_size_mb,json=p2pDbSizeMb,proto3" json:"p2p_db_size_mb,omitempty"` + P2PDbRecordsCount int64 `protobuf:"varint,2,opt,name=p2p_db_records_count,json=p2pDbRecordsCount,proto3" json:"p2p_db_records_count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusResponse_P2PMetrics_DatabaseStats) Reset() { + *x = StatusResponse_P2PMetrics_DatabaseStats{} + mi := &file_supernode_status_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_DatabaseStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_DatabaseStats) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_DatabaseStats) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_DatabaseStats.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_DatabaseStats) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 3} +} + +func (x *StatusResponse_P2PMetrics_DatabaseStats) GetP2PDbSizeMb() float64 { + if x != nil { + return x.P2PDbSizeMb + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DatabaseStats) GetP2PDbRecordsCount() int64 { + if x != nil { + return x.P2PDbRecordsCount + } + return 0 +} + +// Disk status +type StatusResponse_P2PMetrics_DiskStatus struct { + state protoimpl.MessageState `protogen:"open.v1"` + AllMb float64 `protobuf:"fixed64,1,opt,name=all_mb,json=allMb,proto3" json:"all_mb,omitempty"` + UsedMb float64 `protobuf:"fixed64,2,opt,name=used_mb,json=usedMb,proto3" json:"used_mb,omitempty"` + FreeMb float64 `protobuf:"fixed64,3,opt,name=free_mb,json=freeMb,proto3" json:"free_mb,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusResponse_P2PMetrics_DiskStatus) Reset() { + *x = StatusResponse_P2PMetrics_DiskStatus{} + mi := &file_supernode_status_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_DiskStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_DiskStatus) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_DiskStatus) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_DiskStatus.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_DiskStatus) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 4} +} + +func (x *StatusResponse_P2PMetrics_DiskStatus) GetAllMb() float64 { + if x != nil { + return x.AllMb + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DiskStatus) GetUsedMb() float64 { + if x != nil { + return x.UsedMb + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DiskStatus) GetFreeMb() float64 { + if x != nil { + return x.FreeMb + } + return 0 +} + +type StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint struct { + state protoimpl.MessageState `protogen:"open.v1"` + TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds) + Requests int32 `protobuf:"varint,2,opt,name=requests,proto3" json:"requests,omitempty"` // total node RPCs attempted + Successful int32 `protobuf:"varint,3,opt,name=successful,proto3" json:"successful,omitempty"` // successful node RPCs + SuccessRate float64 `protobuf:"fixed64,4,opt,name=success_rate,json=successRate,proto3" json:"success_rate,omitempty"` // percentage (0-100) + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) Reset() { + *x = StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint{} + mi := &file_supernode_status_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 0, 0} +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetTimeUnix() int64 { + if x != nil { + return x.TimeUnix + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetRequests() int32 { + if x != nil { + return x.Requests + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetSuccessful() int32 { + if x != nil { + return x.Successful + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetSuccessRate() float64 { + if x != nil { + return x.SuccessRate + } + return 0 +} + +type StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint struct { + state protoimpl.MessageState `protogen:"open.v1"` + TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds) + Keys int32 `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` // keys requested + Required int32 `protobuf:"varint,3,opt,name=required,proto3" json:"required,omitempty"` // required count + FoundLocal int32 `protobuf:"varint,4,opt,name=found_local,json=foundLocal,proto3" json:"found_local,omitempty"` // found locally + FoundNetwork int32 `protobuf:"varint,5,opt,name=found_network,json=foundNetwork,proto3" json:"found_network,omitempty"` // found on network + DurationMs int64 `protobuf:"varint,6,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"` // duration in milliseconds + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) Reset() { + *x = StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint{} + mi := &file_supernode_status_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoMessage() {} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoReflect() protoreflect.Message { + mi := &file_supernode_status_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint.ProtoReflect.Descriptor instead. +func (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) Descriptor() ([]byte, []int) { + return file_supernode_status_proto_rawDescGZIP(), []int{1, 3, 0, 1} +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetTimeUnix() int64 { + if x != nil { + return x.TimeUnix + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetKeys() int32 { + if x != nil { + return x.Keys + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetRequired() int32 { + if x != nil { + return x.Required + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetFoundLocal() int32 { + if x != nil { + return x.FoundLocal + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetFoundNetwork() int32 { + if x != nil { + return x.FoundNetwork + } + return 0 +} + +func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetDurationMs() int64 { + if x != nil { + return x.DurationMs + } + return 0 +} + +var File_supernode_status_proto protoreflect.FileDescriptor + +const file_supernode_status_proto_rawDesc = "" + + "\n" + + "\x16supernode/status.proto\x12\tsupernode\"?\n" + + "\rStatusRequest\x12.\n" + + "\x13include_p2p_metrics\x18\x01 \x01(\bR\x11includeP2pMetrics\"\x84\x19\n" + + "\x0eStatusResponse\x12\x18\n" + + "\aversion\x18\x01 \x01(\tR\aversion\x12%\n" + + "\x0euptime_seconds\x18\x02 \x01(\x04R\ruptimeSeconds\x12A\n" + + "\tresources\x18\x03 \x01(\v2#.supernode.StatusResponse.ResourcesR\tresources\x12K\n" + + "\rrunning_tasks\x18\x04 \x03(\v2&.supernode.StatusResponse.ServiceTasksR\frunningTasks\x12/\n" + + "\x13registered_services\x18\x05 \x03(\tR\x12registeredServices\x12;\n" + + "\anetwork\x18\x06 \x01(\v2!.supernode.StatusResponse.NetworkR\anetwork\x12\x12\n" + + "\x04rank\x18\a \x01(\x05R\x04rank\x12\x1d\n" + + "\n" + + "ip_address\x18\b \x01(\tR\tipAddress\x12E\n" + + "\vp2p_metrics\x18\t \x01(\v2$.supernode.StatusResponse.P2PMetricsR\n" + + "p2pMetrics\x1a\x82\x05\n" + + "\tResources\x129\n" + + "\x03cpu\x18\x01 \x01(\v2'.supernode.StatusResponse.Resources.CPUR\x03cpu\x12B\n" + + "\x06memory\x18\x02 \x01(\v2*.supernode.StatusResponse.Resources.MemoryR\x06memory\x12T\n" + + "\x0fstorage_volumes\x18\x03 \x03(\v2+.supernode.StatusResponse.Resources.StorageR\x0estorageVolumes\x12)\n" + + "\x10hardware_summary\x18\x04 \x01(\tR\x0fhardwareSummary\x1a@\n" + + "\x03CPU\x12#\n" + + "\rusage_percent\x18\x01 \x01(\x01R\fusagePercent\x12\x14\n" + + "\x05cores\x18\x02 \x01(\x05R\x05cores\x1a\x84\x01\n" + + "\x06Memory\x12\x19\n" + + "\btotal_gb\x18\x01 \x01(\x01R\atotalGb\x12\x17\n" + + "\aused_gb\x18\x02 \x01(\x01R\x06usedGb\x12!\n" + + "\favailable_gb\x18\x03 \x01(\x01R\vavailableGb\x12#\n" + + "\rusage_percent\x18\x04 \x01(\x01R\fusagePercent\x1a\xab\x01\n" + + "\aStorage\x12\x12\n" + + "\x04path\x18\x01 \x01(\tR\x04path\x12\x1f\n" + + "\vtotal_bytes\x18\x02 \x01(\x04R\n" + + "totalBytes\x12\x1d\n" + + "\n" + + "used_bytes\x18\x03 \x01(\x04R\tusedBytes\x12'\n" + + "\x0favailable_bytes\x18\x04 \x01(\x04R\x0eavailableBytes\x12#\n" + + "\rusage_percent\x18\x05 \x01(\x01R\fusagePercent\x1ak\n" + + "\fServiceTasks\x12!\n" + + "\fservice_name\x18\x01 \x01(\tR\vserviceName\x12\x19\n" + + "\btask_ids\x18\x02 \x03(\tR\ataskIds\x12\x1d\n" + + "\n" + + "task_count\x18\x03 \x01(\x05R\ttaskCount\x1aQ\n" + + "\aNetwork\x12\x1f\n" + + "\vpeers_count\x18\x01 \x01(\x05R\n" + + "peersCount\x12%\n" + + "\x0epeer_addresses\x18\x02 \x03(\tR\rpeerAddresses\x1a\xf3\x0e\n" + + "\n" + + "P2PMetrics\x12P\n" + + "\vdht_metrics\x18\x01 \x01(\v2/.supernode.StatusResponse.P2PMetrics.DhtMetricsR\n" + + "dhtMetrics\x12t\n" + + "\x16network_handle_metrics\x18\x02 \x03(\v2>.supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntryR\x14networkHandleMetrics\x12e\n" + + "\x11conn_pool_metrics\x18\x03 \x03(\v29.supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntryR\x0fconnPoolMetrics\x12H\n" + + "\bban_list\x18\x04 \x03(\v2-.supernode.StatusResponse.P2PMetrics.BanEntryR\abanList\x12N\n" + + "\bdatabase\x18\x05 \x01(\v22.supernode.StatusResponse.P2PMetrics.DatabaseStatsR\bdatabase\x12C\n" + + "\x04disk\x18\x06 \x01(\v2/.supernode.StatusResponse.P2PMetrics.DiskStatusR\x04disk\x1a\xc0\x05\n" + + "\n" + + "DhtMetrics\x12s\n" + + "\x14store_success_recent\x18\x01 \x03(\v2A.supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPointR\x12storeSuccessRecent\x12v\n" + + "\x15batch_retrieve_recent\x18\x02 \x03(\v2B.supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePointR\x13batchRetrieveRecent\x121\n" + + "\x15hot_path_banned_skips\x18\x03 \x01(\x03R\x12hotPathBannedSkips\x125\n" + + "\x17hot_path_ban_increments\x18\x04 \x01(\x03R\x14hotPathBanIncrements\x1a\x8f\x01\n" + + "\x11StoreSuccessPoint\x12\x1b\n" + + "\ttime_unix\x18\x01 \x01(\x03R\btimeUnix\x12\x1a\n" + + "\brequests\x18\x02 \x01(\x05R\brequests\x12\x1e\n" + + "\n" + + "successful\x18\x03 \x01(\x05R\n" + + "successful\x12!\n" + + "\fsuccess_rate\x18\x04 \x01(\x01R\vsuccessRate\x1a\xc8\x01\n" + + "\x12BatchRetrievePoint\x12\x1b\n" + + "\ttime_unix\x18\x01 \x01(\x03R\btimeUnix\x12\x12\n" + + "\x04keys\x18\x02 \x01(\x05R\x04keys\x12\x1a\n" + + "\brequired\x18\x03 \x01(\x05R\brequired\x12\x1f\n" + + "\vfound_local\x18\x04 \x01(\x05R\n" + + "foundLocal\x12#\n" + + "\rfound_network\x18\x05 \x01(\x05R\ffoundNetwork\x12\x1f\n" + + "\vduration_ms\x18\x06 \x01(\x03R\n" + + "durationMs\x1at\n" + + "\x0eHandleCounters\x12\x14\n" + + "\x05total\x18\x01 \x01(\x03R\x05total\x12\x18\n" + + "\asuccess\x18\x02 \x01(\x03R\asuccess\x12\x18\n" + + "\afailure\x18\x03 \x01(\x03R\afailure\x12\x18\n" + + "\atimeout\x18\x04 \x01(\x03R\atimeout\x1a\x9d\x01\n" + + "\bBanEntry\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12\x0e\n" + + "\x02ip\x18\x02 \x01(\tR\x02ip\x12\x12\n" + + "\x04port\x18\x03 \x01(\rR\x04port\x12\x14\n" + + "\x05count\x18\x04 \x01(\x05R\x05count\x12&\n" + + "\x0fcreated_at_unix\x18\x05 \x01(\x03R\rcreatedAtUnix\x12\x1f\n" + + "\vage_seconds\x18\x06 \x01(\x03R\n" + + "ageSeconds\x1ae\n" + + "\rDatabaseStats\x12#\n" + + "\x0ep2p_db_size_mb\x18\x01 \x01(\x01R\vp2pDbSizeMb\x12/\n" + + "\x14p2p_db_records_count\x18\x02 \x01(\x03R\x11p2pDbRecordsCount\x1aU\n" + + "\n" + + "DiskStatus\x12\x15\n" + + "\x06all_mb\x18\x01 \x01(\x01R\x05allMb\x12\x17\n" + + "\aused_mb\x18\x02 \x01(\x01R\x06usedMb\x12\x17\n" + + "\afree_mb\x18\x03 \x01(\x01R\x06freeMb\x1a|\n" + + "\x19NetworkHandleMetricsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12I\n" + + "\x05value\x18\x02 \x01(\v23.supernode.StatusResponse.P2PMetrics.HandleCountersR\x05value:\x028\x01\x1aB\n" + + "\x14ConnPoolMetricsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\x03R\x05value:\x028\x01B6Z4github.com/LumeraProtocol/supernode/v2/gen/supernodeb\x06proto3" + +var ( + file_supernode_status_proto_rawDescOnce sync.Once + file_supernode_status_proto_rawDescData []byte +) + +func file_supernode_status_proto_rawDescGZIP() []byte { + file_supernode_status_proto_rawDescOnce.Do(func() { + file_supernode_status_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_supernode_status_proto_rawDesc), len(file_supernode_status_proto_rawDesc))) + }) + return file_supernode_status_proto_rawDescData +} + +var file_supernode_status_proto_msgTypes = make([]protoimpl.MessageInfo, 18) +var file_supernode_status_proto_goTypes = []any{ + (*StatusRequest)(nil), // 0: supernode.StatusRequest + (*StatusResponse)(nil), // 1: supernode.StatusResponse + (*StatusResponse_Resources)(nil), // 2: supernode.StatusResponse.Resources + (*StatusResponse_ServiceTasks)(nil), // 3: supernode.StatusResponse.ServiceTasks + (*StatusResponse_Network)(nil), // 4: supernode.StatusResponse.Network + (*StatusResponse_P2PMetrics)(nil), // 5: supernode.StatusResponse.P2PMetrics + (*StatusResponse_Resources_CPU)(nil), // 6: supernode.StatusResponse.Resources.CPU + (*StatusResponse_Resources_Memory)(nil), // 7: supernode.StatusResponse.Resources.Memory + (*StatusResponse_Resources_Storage)(nil), // 8: supernode.StatusResponse.Resources.Storage + (*StatusResponse_P2PMetrics_DhtMetrics)(nil), // 9: supernode.StatusResponse.P2PMetrics.DhtMetrics + (*StatusResponse_P2PMetrics_HandleCounters)(nil), // 10: supernode.StatusResponse.P2PMetrics.HandleCounters + (*StatusResponse_P2PMetrics_BanEntry)(nil), // 11: supernode.StatusResponse.P2PMetrics.BanEntry + (*StatusResponse_P2PMetrics_DatabaseStats)(nil), // 12: supernode.StatusResponse.P2PMetrics.DatabaseStats + (*StatusResponse_P2PMetrics_DiskStatus)(nil), // 13: supernode.StatusResponse.P2PMetrics.DiskStatus + nil, // 14: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry + nil, // 15: supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry + (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint)(nil), // 16: supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint + (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint)(nil), // 17: supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint +} +var file_supernode_status_proto_depIdxs = []int32{ + 2, // 0: supernode.StatusResponse.resources:type_name -> supernode.StatusResponse.Resources + 3, // 1: supernode.StatusResponse.running_tasks:type_name -> supernode.StatusResponse.ServiceTasks + 4, // 2: supernode.StatusResponse.network:type_name -> supernode.StatusResponse.Network + 5, // 3: supernode.StatusResponse.p2p_metrics:type_name -> supernode.StatusResponse.P2PMetrics + 6, // 4: supernode.StatusResponse.Resources.cpu:type_name -> supernode.StatusResponse.Resources.CPU + 7, // 5: supernode.StatusResponse.Resources.memory:type_name -> supernode.StatusResponse.Resources.Memory + 8, // 6: supernode.StatusResponse.Resources.storage_volumes:type_name -> supernode.StatusResponse.Resources.Storage + 9, // 7: supernode.StatusResponse.P2PMetrics.dht_metrics:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics + 14, // 8: supernode.StatusResponse.P2PMetrics.network_handle_metrics:type_name -> supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry + 15, // 9: supernode.StatusResponse.P2PMetrics.conn_pool_metrics:type_name -> supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry + 11, // 10: supernode.StatusResponse.P2PMetrics.ban_list:type_name -> supernode.StatusResponse.P2PMetrics.BanEntry + 12, // 11: supernode.StatusResponse.P2PMetrics.database:type_name -> supernode.StatusResponse.P2PMetrics.DatabaseStats + 13, // 12: supernode.StatusResponse.P2PMetrics.disk:type_name -> supernode.StatusResponse.P2PMetrics.DiskStatus + 16, // 13: supernode.StatusResponse.P2PMetrics.DhtMetrics.store_success_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint + 17, // 14: supernode.StatusResponse.P2PMetrics.DhtMetrics.batch_retrieve_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint + 10, // 15: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.HandleCounters + 16, // [16:16] is the sub-list for method output_type + 16, // [16:16] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name +} + +func init() { file_supernode_status_proto_init() } +func file_supernode_status_proto_init() { + if File_supernode_status_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_supernode_status_proto_rawDesc), len(file_supernode_status_proto_rawDesc)), + NumEnums: 0, + NumMessages: 18, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_supernode_status_proto_goTypes, + DependencyIndexes: file_supernode_status_proto_depIdxs, + MessageInfos: file_supernode_status_proto_msgTypes, + }.Build() + File_supernode_status_proto = out.File + file_supernode_status_proto_goTypes = nil + file_supernode_status_proto_depIdxs = nil +} diff --git a/gen/supernode/status.swagger.json b/gen/supernode/status.swagger.json new file mode 100644 index 00000000..5b014db1 --- /dev/null +++ b/gen/supernode/status.swagger.json @@ -0,0 +1,44 @@ +{ + "swagger": "2.0", + "info": { + "title": "supernode/status.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string" + } + }, + "additionalProperties": {} + }, + "rpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + } + } + } + } + } +} diff --git a/gen/supernode/supernode.pb.go b/gen/supernode/supernode.pb.go deleted file mode 100644 index 431bc8b5..00000000 --- a/gen/supernode/supernode.pb.go +++ /dev/null @@ -1,2034 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.35.1 -// protoc v3.21.12 -// source: supernode/supernode.proto - -package supernode - -import ( - reflect "reflect" - sync "sync" - - _ "google.golang.org/genproto/googleapis/api/annotations" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type StatusRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional: include detailed P2P metrics in the response - // Maps to query param via grpc-gateway: /api/v1/status?include_p2p_metrics=true - IncludeP2PMetrics bool `protobuf:"varint,1,opt,name=include_p2p_metrics,json=includeP2pMetrics,proto3" json:"include_p2p_metrics,omitempty"` -} - -func (x *StatusRequest) Reset() { - *x = StatusRequest{} - mi := &file_supernode_supernode_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusRequest) ProtoMessage() {} - -func (x *StatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusRequest.ProtoReflect.Descriptor instead. -func (*StatusRequest) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{0} -} - -func (x *StatusRequest) GetIncludeP2PMetrics() bool { - if x != nil { - return x.IncludeP2PMetrics - } - return false -} - -type ListServicesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *ListServicesRequest) Reset() { - *x = ListServicesRequest{} - mi := &file_supernode_supernode_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListServicesRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListServicesRequest) ProtoMessage() {} - -func (x *ListServicesRequest) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListServicesRequest.ProtoReflect.Descriptor instead. -func (*ListServicesRequest) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{1} -} - -type ListServicesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Services []*ServiceInfo `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"` - Count int32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` -} - -func (x *ListServicesResponse) Reset() { - *x = ListServicesResponse{} - mi := &file_supernode_supernode_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListServicesResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListServicesResponse) ProtoMessage() {} - -func (x *ListServicesResponse) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListServicesResponse.ProtoReflect.Descriptor instead. -func (*ListServicesResponse) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{2} -} - -func (x *ListServicesResponse) GetServices() []*ServiceInfo { - if x != nil { - return x.Services - } - return nil -} - -func (x *ListServicesResponse) GetCount() int32 { - if x != nil { - return x.Count - } - return 0 -} - -type ServiceInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Methods []string `protobuf:"bytes,2,rep,name=methods,proto3" json:"methods,omitempty"` -} - -func (x *ServiceInfo) Reset() { - *x = ServiceInfo{} - mi := &file_supernode_supernode_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ServiceInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceInfo) ProtoMessage() {} - -func (x *ServiceInfo) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceInfo.ProtoReflect.Descriptor instead. -func (*ServiceInfo) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{3} -} - -func (x *ServiceInfo) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *ServiceInfo) GetMethods() []string { - if x != nil { - return x.Methods - } - return nil -} - -// The StatusResponse represents system status with clear organization -type StatusResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` // Supernode version - UptimeSeconds uint64 `protobuf:"varint,2,opt,name=uptime_seconds,json=uptimeSeconds,proto3" json:"uptime_seconds,omitempty"` // Uptime in seconds - Resources *StatusResponse_Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` - RunningTasks []*StatusResponse_ServiceTasks `protobuf:"bytes,4,rep,name=running_tasks,json=runningTasks,proto3" json:"running_tasks,omitempty"` // Services with currently running tasks - RegisteredServices []string `protobuf:"bytes,5,rep,name=registered_services,json=registeredServices,proto3" json:"registered_services,omitempty"` // All registered/available services - Network *StatusResponse_Network `protobuf:"bytes,6,opt,name=network,proto3" json:"network,omitempty"` // P2P network information - Rank int32 `protobuf:"varint,7,opt,name=rank,proto3" json:"rank,omitempty"` // Rank in the top supernodes list (0 if not in top list) - IpAddress string `protobuf:"bytes,8,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` // Supernode IP address with port (e.g., "192.168.1.1:4445") - P2PMetrics *StatusResponse_P2PMetrics `protobuf:"bytes,9,opt,name=p2p_metrics,json=p2pMetrics,proto3" json:"p2p_metrics,omitempty"` -} - -func (x *StatusResponse) Reset() { - *x = StatusResponse{} - mi := &file_supernode_supernode_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse) ProtoMessage() {} - -func (x *StatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse.ProtoReflect.Descriptor instead. -func (*StatusResponse) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4} -} - -func (x *StatusResponse) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -func (x *StatusResponse) GetUptimeSeconds() uint64 { - if x != nil { - return x.UptimeSeconds - } - return 0 -} - -func (x *StatusResponse) GetResources() *StatusResponse_Resources { - if x != nil { - return x.Resources - } - return nil -} - -func (x *StatusResponse) GetRunningTasks() []*StatusResponse_ServiceTasks { - if x != nil { - return x.RunningTasks - } - return nil -} - -func (x *StatusResponse) GetRegisteredServices() []string { - if x != nil { - return x.RegisteredServices - } - return nil -} - -func (x *StatusResponse) GetNetwork() *StatusResponse_Network { - if x != nil { - return x.Network - } - return nil -} - -func (x *StatusResponse) GetRank() int32 { - if x != nil { - return x.Rank - } - return 0 -} - -func (x *StatusResponse) GetIpAddress() string { - if x != nil { - return x.IpAddress - } - return "" -} - -func (x *StatusResponse) GetP2PMetrics() *StatusResponse_P2PMetrics { - if x != nil { - return x.P2PMetrics - } - return nil -} - -// System resource information -type StatusResponse_Resources struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Cpu *StatusResponse_Resources_CPU `protobuf:"bytes,1,opt,name=cpu,proto3" json:"cpu,omitempty"` - Memory *StatusResponse_Resources_Memory `protobuf:"bytes,2,opt,name=memory,proto3" json:"memory,omitempty"` - StorageVolumes []*StatusResponse_Resources_Storage `protobuf:"bytes,3,rep,name=storage_volumes,json=storageVolumes,proto3" json:"storage_volumes,omitempty"` - HardwareSummary string `protobuf:"bytes,4,opt,name=hardware_summary,json=hardwareSummary,proto3" json:"hardware_summary,omitempty"` // Formatted hardware summary (e.g., "8 cores / 32GB RAM") -} - -func (x *StatusResponse_Resources) Reset() { - *x = StatusResponse_Resources{} - mi := &file_supernode_supernode_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_Resources) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_Resources) ProtoMessage() {} - -func (x *StatusResponse_Resources) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_Resources.ProtoReflect.Descriptor instead. -func (*StatusResponse_Resources) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 0} -} - -func (x *StatusResponse_Resources) GetCpu() *StatusResponse_Resources_CPU { - if x != nil { - return x.Cpu - } - return nil -} - -func (x *StatusResponse_Resources) GetMemory() *StatusResponse_Resources_Memory { - if x != nil { - return x.Memory - } - return nil -} - -func (x *StatusResponse_Resources) GetStorageVolumes() []*StatusResponse_Resources_Storage { - if x != nil { - return x.StorageVolumes - } - return nil -} - -func (x *StatusResponse_Resources) GetHardwareSummary() string { - if x != nil { - return x.HardwareSummary - } - return "" -} - -// ServiceTasks contains task information for a specific service -type StatusResponse_ServiceTasks struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` - TaskIds []string `protobuf:"bytes,2,rep,name=task_ids,json=taskIds,proto3" json:"task_ids,omitempty"` - TaskCount int32 `protobuf:"varint,3,opt,name=task_count,json=taskCount,proto3" json:"task_count,omitempty"` -} - -func (x *StatusResponse_ServiceTasks) Reset() { - *x = StatusResponse_ServiceTasks{} - mi := &file_supernode_supernode_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_ServiceTasks) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_ServiceTasks) ProtoMessage() {} - -func (x *StatusResponse_ServiceTasks) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[6] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_ServiceTasks.ProtoReflect.Descriptor instead. -func (*StatusResponse_ServiceTasks) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 1} -} - -func (x *StatusResponse_ServiceTasks) GetServiceName() string { - if x != nil { - return x.ServiceName - } - return "" -} - -func (x *StatusResponse_ServiceTasks) GetTaskIds() []string { - if x != nil { - return x.TaskIds - } - return nil -} - -func (x *StatusResponse_ServiceTasks) GetTaskCount() int32 { - if x != nil { - return x.TaskCount - } - return 0 -} - -// Network information -type StatusResponse_Network struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeersCount int32 `protobuf:"varint,1,opt,name=peers_count,json=peersCount,proto3" json:"peers_count,omitempty"` // Number of connected peers in P2P network - PeerAddresses []string `protobuf:"bytes,2,rep,name=peer_addresses,json=peerAddresses,proto3" json:"peer_addresses,omitempty"` // List of connected peer addresses (optional, may be empty for privacy) -} - -func (x *StatusResponse_Network) Reset() { - *x = StatusResponse_Network{} - mi := &file_supernode_supernode_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_Network) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_Network) ProtoMessage() {} - -func (x *StatusResponse_Network) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[7] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_Network.ProtoReflect.Descriptor instead. -func (*StatusResponse_Network) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 2} -} - -func (x *StatusResponse_Network) GetPeersCount() int32 { - if x != nil { - return x.PeersCount - } - return 0 -} - -func (x *StatusResponse_Network) GetPeerAddresses() []string { - if x != nil { - return x.PeerAddresses - } - return nil -} - -// P2P metrics and diagnostics (additive field) -type StatusResponse_P2PMetrics struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - DhtMetrics *StatusResponse_P2PMetrics_DhtMetrics `protobuf:"bytes,1,opt,name=dht_metrics,json=dhtMetrics,proto3" json:"dht_metrics,omitempty"` - NetworkHandleMetrics map[string]*StatusResponse_P2PMetrics_HandleCounters `protobuf:"bytes,2,rep,name=network_handle_metrics,json=networkHandleMetrics,proto3" json:"network_handle_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - ConnPoolMetrics map[string]int64 `protobuf:"bytes,3,rep,name=conn_pool_metrics,json=connPoolMetrics,proto3" json:"conn_pool_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - BanList []*StatusResponse_P2PMetrics_BanEntry `protobuf:"bytes,4,rep,name=ban_list,json=banList,proto3" json:"ban_list,omitempty"` - Database *StatusResponse_P2PMetrics_DatabaseStats `protobuf:"bytes,5,opt,name=database,proto3" json:"database,omitempty"` - Disk *StatusResponse_P2PMetrics_DiskStatus `protobuf:"bytes,6,opt,name=disk,proto3" json:"disk,omitempty"` - RecentBatchStore []*StatusResponse_P2PMetrics_RecentBatchStoreEntry `protobuf:"bytes,7,rep,name=recent_batch_store,json=recentBatchStore,proto3" json:"recent_batch_store,omitempty"` - RecentBatchRetrieve []*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry `protobuf:"bytes,8,rep,name=recent_batch_retrieve,json=recentBatchRetrieve,proto3" json:"recent_batch_retrieve,omitempty"` - RecentBatchStoreByIp map[string]*StatusResponse_P2PMetrics_RecentBatchStoreList `protobuf:"bytes,9,rep,name=recent_batch_store_by_ip,json=recentBatchStoreByIp,proto3" json:"recent_batch_store_by_ip,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - RecentBatchRetrieveByIp map[string]*StatusResponse_P2PMetrics_RecentBatchRetrieveList `protobuf:"bytes,10,rep,name=recent_batch_retrieve_by_ip,json=recentBatchRetrieveByIp,proto3" json:"recent_batch_retrieve_by_ip,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *StatusResponse_P2PMetrics) Reset() { - *x = StatusResponse_P2PMetrics{} - mi := &file_supernode_supernode_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[8] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3} -} - -func (x *StatusResponse_P2PMetrics) GetDhtMetrics() *StatusResponse_P2PMetrics_DhtMetrics { - if x != nil { - return x.DhtMetrics - } - return nil -} - -func (x *StatusResponse_P2PMetrics) GetNetworkHandleMetrics() map[string]*StatusResponse_P2PMetrics_HandleCounters { - if x != nil { - return x.NetworkHandleMetrics - } - return nil -} - -func (x *StatusResponse_P2PMetrics) GetConnPoolMetrics() map[string]int64 { - if x != nil { - return x.ConnPoolMetrics - } - return nil -} - -func (x *StatusResponse_P2PMetrics) GetBanList() []*StatusResponse_P2PMetrics_BanEntry { - if x != nil { - return x.BanList - } - return nil -} - -func (x *StatusResponse_P2PMetrics) GetDatabase() *StatusResponse_P2PMetrics_DatabaseStats { - if x != nil { - return x.Database - } - return nil -} - -func (x *StatusResponse_P2PMetrics) GetDisk() *StatusResponse_P2PMetrics_DiskStatus { - if x != nil { - return x.Disk - } - return nil -} - -func (x *StatusResponse_P2PMetrics) GetRecentBatchStore() []*StatusResponse_P2PMetrics_RecentBatchStoreEntry { - if x != nil { - return x.RecentBatchStore - } - return nil -} - -func (x *StatusResponse_P2PMetrics) GetRecentBatchRetrieve() []*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry { - if x != nil { - return x.RecentBatchRetrieve - } - return nil -} - -func (x *StatusResponse_P2PMetrics) GetRecentBatchStoreByIp() map[string]*StatusResponse_P2PMetrics_RecentBatchStoreList { - if x != nil { - return x.RecentBatchStoreByIp - } - return nil -} - -func (x *StatusResponse_P2PMetrics) GetRecentBatchRetrieveByIp() map[string]*StatusResponse_P2PMetrics_RecentBatchRetrieveList { - if x != nil { - return x.RecentBatchRetrieveByIp - } - return nil -} - -type StatusResponse_Resources_CPU struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - UsagePercent float64 `protobuf:"fixed64,1,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // CPU usage percentage (0-100) - Cores int32 `protobuf:"varint,2,opt,name=cores,proto3" json:"cores,omitempty"` // Number of CPU cores -} - -func (x *StatusResponse_Resources_CPU) Reset() { - *x = StatusResponse_Resources_CPU{} - mi := &file_supernode_supernode_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_Resources_CPU) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_Resources_CPU) ProtoMessage() {} - -func (x *StatusResponse_Resources_CPU) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[9] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_Resources_CPU.ProtoReflect.Descriptor instead. -func (*StatusResponse_Resources_CPU) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 0, 0} -} - -func (x *StatusResponse_Resources_CPU) GetUsagePercent() float64 { - if x != nil { - return x.UsagePercent - } - return 0 -} - -func (x *StatusResponse_Resources_CPU) GetCores() int32 { - if x != nil { - return x.Cores - } - return 0 -} - -type StatusResponse_Resources_Memory struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TotalGb float64 `protobuf:"fixed64,1,opt,name=total_gb,json=totalGb,proto3" json:"total_gb,omitempty"` // Total memory in GB - UsedGb float64 `protobuf:"fixed64,2,opt,name=used_gb,json=usedGb,proto3" json:"used_gb,omitempty"` // Used memory in GB - AvailableGb float64 `protobuf:"fixed64,3,opt,name=available_gb,json=availableGb,proto3" json:"available_gb,omitempty"` // Available memory in GB - UsagePercent float64 `protobuf:"fixed64,4,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Memory usage percentage (0-100) -} - -func (x *StatusResponse_Resources_Memory) Reset() { - *x = StatusResponse_Resources_Memory{} - mi := &file_supernode_supernode_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_Resources_Memory) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_Resources_Memory) ProtoMessage() {} - -func (x *StatusResponse_Resources_Memory) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[10] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_Resources_Memory.ProtoReflect.Descriptor instead. -func (*StatusResponse_Resources_Memory) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 0, 1} -} - -func (x *StatusResponse_Resources_Memory) GetTotalGb() float64 { - if x != nil { - return x.TotalGb - } - return 0 -} - -func (x *StatusResponse_Resources_Memory) GetUsedGb() float64 { - if x != nil { - return x.UsedGb - } - return 0 -} - -func (x *StatusResponse_Resources_Memory) GetAvailableGb() float64 { - if x != nil { - return x.AvailableGb - } - return 0 -} - -func (x *StatusResponse_Resources_Memory) GetUsagePercent() float64 { - if x != nil { - return x.UsagePercent - } - return 0 -} - -type StatusResponse_Resources_Storage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // Storage path being monitored - TotalBytes uint64 `protobuf:"varint,2,opt,name=total_bytes,json=totalBytes,proto3" json:"total_bytes,omitempty"` - UsedBytes uint64 `protobuf:"varint,3,opt,name=used_bytes,json=usedBytes,proto3" json:"used_bytes,omitempty"` - AvailableBytes uint64 `protobuf:"varint,4,opt,name=available_bytes,json=availableBytes,proto3" json:"available_bytes,omitempty"` - UsagePercent float64 `protobuf:"fixed64,5,opt,name=usage_percent,json=usagePercent,proto3" json:"usage_percent,omitempty"` // Storage usage percentage (0-100) -} - -func (x *StatusResponse_Resources_Storage) Reset() { - *x = StatusResponse_Resources_Storage{} - mi := &file_supernode_supernode_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_Resources_Storage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_Resources_Storage) ProtoMessage() {} - -func (x *StatusResponse_Resources_Storage) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[11] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_Resources_Storage.ProtoReflect.Descriptor instead. -func (*StatusResponse_Resources_Storage) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 0, 2} -} - -func (x *StatusResponse_Resources_Storage) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -func (x *StatusResponse_Resources_Storage) GetTotalBytes() uint64 { - if x != nil { - return x.TotalBytes - } - return 0 -} - -func (x *StatusResponse_Resources_Storage) GetUsedBytes() uint64 { - if x != nil { - return x.UsedBytes - } - return 0 -} - -func (x *StatusResponse_Resources_Storage) GetAvailableBytes() uint64 { - if x != nil { - return x.AvailableBytes - } - return 0 -} - -func (x *StatusResponse_Resources_Storage) GetUsagePercent() float64 { - if x != nil { - return x.UsagePercent - } - return 0 -} - -// Rolling DHT metrics snapshot -type StatusResponse_P2PMetrics_DhtMetrics struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - StoreSuccessRecent []*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint `protobuf:"bytes,1,rep,name=store_success_recent,json=storeSuccessRecent,proto3" json:"store_success_recent,omitempty"` - BatchRetrieveRecent []*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint `protobuf:"bytes,2,rep,name=batch_retrieve_recent,json=batchRetrieveRecent,proto3" json:"batch_retrieve_recent,omitempty"` - HotPathBannedSkips int64 `protobuf:"varint,3,opt,name=hot_path_banned_skips,json=hotPathBannedSkips,proto3" json:"hot_path_banned_skips,omitempty"` // counter - HotPathBanIncrements int64 `protobuf:"varint,4,opt,name=hot_path_ban_increments,json=hotPathBanIncrements,proto3" json:"hot_path_ban_increments,omitempty"` // counter -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics) Reset() { - *x = StatusResponse_P2PMetrics_DhtMetrics{} - mi := &file_supernode_supernode_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics_DhtMetrics) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics_DhtMetrics) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[12] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics_DhtMetrics) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 0} -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics) GetStoreSuccessRecent() []*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint { - if x != nil { - return x.StoreSuccessRecent - } - return nil -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics) GetBatchRetrieveRecent() []*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint { - if x != nil { - return x.BatchRetrieveRecent - } - return nil -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics) GetHotPathBannedSkips() int64 { - if x != nil { - return x.HotPathBannedSkips - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics) GetHotPathBanIncrements() int64 { - if x != nil { - return x.HotPathBanIncrements - } - return 0 -} - -// Per-handler counters from network layer -type StatusResponse_P2PMetrics_HandleCounters struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Total int64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` - Success int64 `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` - Failure int64 `protobuf:"varint,3,opt,name=failure,proto3" json:"failure,omitempty"` - Timeout int64 `protobuf:"varint,4,opt,name=timeout,proto3" json:"timeout,omitempty"` -} - -func (x *StatusResponse_P2PMetrics_HandleCounters) Reset() { - *x = StatusResponse_P2PMetrics_HandleCounters{} - mi := &file_supernode_supernode_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics_HandleCounters) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics_HandleCounters) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics_HandleCounters) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[13] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics_HandleCounters.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics_HandleCounters) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 1} -} - -func (x *StatusResponse_P2PMetrics_HandleCounters) GetTotal() int64 { - if x != nil { - return x.Total - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_HandleCounters) GetSuccess() int64 { - if x != nil { - return x.Success - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_HandleCounters) GetFailure() int64 { - if x != nil { - return x.Failure - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_HandleCounters) GetTimeout() int64 { - if x != nil { - return x.Timeout - } - return 0 -} - -// Ban list entry -type StatusResponse_P2PMetrics_BanEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // printable ID - Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` // last seen IP - Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` // last seen port - Count int32 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` // failure count - CreatedAtUnix int64 `protobuf:"varint,5,opt,name=created_at_unix,json=createdAtUnix,proto3" json:"created_at_unix,omitempty"` // first ban time (unix seconds) - AgeSeconds int64 `protobuf:"varint,6,opt,name=age_seconds,json=ageSeconds,proto3" json:"age_seconds,omitempty"` // age in seconds -} - -func (x *StatusResponse_P2PMetrics_BanEntry) Reset() { - *x = StatusResponse_P2PMetrics_BanEntry{} - mi := &file_supernode_supernode_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics_BanEntry) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics_BanEntry) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics_BanEntry) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[14] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics_BanEntry.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics_BanEntry) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 2} -} - -func (x *StatusResponse_P2PMetrics_BanEntry) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *StatusResponse_P2PMetrics_BanEntry) GetIp() string { - if x != nil { - return x.Ip - } - return "" -} - -func (x *StatusResponse_P2PMetrics_BanEntry) GetPort() uint32 { - if x != nil { - return x.Port - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_BanEntry) GetCount() int32 { - if x != nil { - return x.Count - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_BanEntry) GetCreatedAtUnix() int64 { - if x != nil { - return x.CreatedAtUnix - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_BanEntry) GetAgeSeconds() int64 { - if x != nil { - return x.AgeSeconds - } - return 0 -} - -// DB stats -type StatusResponse_P2PMetrics_DatabaseStats struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - P2PDbSizeMb float64 `protobuf:"fixed64,1,opt,name=p2p_db_size_mb,json=p2pDbSizeMb,proto3" json:"p2p_db_size_mb,omitempty"` - P2PDbRecordsCount int64 `protobuf:"varint,2,opt,name=p2p_db_records_count,json=p2pDbRecordsCount,proto3" json:"p2p_db_records_count,omitempty"` -} - -func (x *StatusResponse_P2PMetrics_DatabaseStats) Reset() { - *x = StatusResponse_P2PMetrics_DatabaseStats{} - mi := &file_supernode_supernode_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics_DatabaseStats) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics_DatabaseStats) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics_DatabaseStats) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[15] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics_DatabaseStats.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics_DatabaseStats) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 3} -} - -func (x *StatusResponse_P2PMetrics_DatabaseStats) GetP2PDbSizeMb() float64 { - if x != nil { - return x.P2PDbSizeMb - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DatabaseStats) GetP2PDbRecordsCount() int64 { - if x != nil { - return x.P2PDbRecordsCount - } - return 0 -} - -// Disk status -type StatusResponse_P2PMetrics_DiskStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AllMb float64 `protobuf:"fixed64,1,opt,name=all_mb,json=allMb,proto3" json:"all_mb,omitempty"` - UsedMb float64 `protobuf:"fixed64,2,opt,name=used_mb,json=usedMb,proto3" json:"used_mb,omitempty"` - FreeMb float64 `protobuf:"fixed64,3,opt,name=free_mb,json=freeMb,proto3" json:"free_mb,omitempty"` -} - -func (x *StatusResponse_P2PMetrics_DiskStatus) Reset() { - *x = StatusResponse_P2PMetrics_DiskStatus{} - mi := &file_supernode_supernode_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics_DiskStatus) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics_DiskStatus) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics_DiskStatus) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[16] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics_DiskStatus.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics_DiskStatus) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 4} -} - -func (x *StatusResponse_P2PMetrics_DiskStatus) GetAllMb() float64 { - if x != nil { - return x.AllMb - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DiskStatus) GetUsedMb() float64 { - if x != nil { - return x.UsedMb - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DiskStatus) GetFreeMb() float64 { - if x != nil { - return x.FreeMb - } - return 0 -} - -// Last handled BatchStoreData requests (most recent first) -type StatusResponse_P2PMetrics_RecentBatchStoreEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` - SenderId string `protobuf:"bytes,2,opt,name=sender_id,json=senderId,proto3" json:"sender_id,omitempty"` - SenderIp string `protobuf:"bytes,3,opt,name=sender_ip,json=senderIp,proto3" json:"sender_ip,omitempty"` - Keys int32 `protobuf:"varint,4,opt,name=keys,proto3" json:"keys,omitempty"` - DurationMs int64 `protobuf:"varint,5,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"` - Ok bool `protobuf:"varint,6,opt,name=ok,proto3" json:"ok,omitempty"` - Error string `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"` -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) Reset() { - *x = StatusResponse_P2PMetrics_RecentBatchStoreEntry{} - mi := &file_supernode_supernode_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics_RecentBatchStoreEntry) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[19] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics_RecentBatchStoreEntry.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics_RecentBatchStoreEntry) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 7} -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetTimeUnix() int64 { - if x != nil { - return x.TimeUnix - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetSenderId() string { - if x != nil { - return x.SenderId - } - return "" -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetSenderIp() string { - if x != nil { - return x.SenderIp - } - return "" -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetKeys() int32 { - if x != nil { - return x.Keys - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetDurationMs() int64 { - if x != nil { - return x.DurationMs - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetOk() bool { - if x != nil { - return x.Ok - } - return false -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreEntry) GetError() string { - if x != nil { - return x.Error - } - return "" -} - -// Last handled BatchGetValues requests (most recent first) -type StatusResponse_P2PMetrics_RecentBatchRetrieveEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` - SenderId string `protobuf:"bytes,2,opt,name=sender_id,json=senderId,proto3" json:"sender_id,omitempty"` - SenderIp string `protobuf:"bytes,3,opt,name=sender_ip,json=senderIp,proto3" json:"sender_ip,omitempty"` - Requested int32 `protobuf:"varint,4,opt,name=requested,proto3" json:"requested,omitempty"` - Found int32 `protobuf:"varint,5,opt,name=found,proto3" json:"found,omitempty"` - DurationMs int64 `protobuf:"varint,6,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"` - Error string `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"` -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) Reset() { - *x = StatusResponse_P2PMetrics_RecentBatchRetrieveEntry{} - mi := &file_supernode_supernode_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[20] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics_RecentBatchRetrieveEntry.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 8} -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetTimeUnix() int64 { - if x != nil { - return x.TimeUnix - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetSenderId() string { - if x != nil { - return x.SenderId - } - return "" -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetSenderIp() string { - if x != nil { - return x.SenderIp - } - return "" -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetRequested() int32 { - if x != nil { - return x.Requested - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetFound() int32 { - if x != nil { - return x.Found - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetDurationMs() int64 { - if x != nil { - return x.DurationMs - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveEntry) GetError() string { - if x != nil { - return x.Error - } - return "" -} - -// Per-IP buckets: last 10 per sender IP -type StatusResponse_P2PMetrics_RecentBatchStoreList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Entries []*StatusResponse_P2PMetrics_RecentBatchStoreEntry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreList) Reset() { - *x = StatusResponse_P2PMetrics_RecentBatchStoreList{} - mi := &file_supernode_supernode_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreList) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics_RecentBatchStoreList) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreList) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[21] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics_RecentBatchStoreList.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics_RecentBatchStoreList) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 9} -} - -func (x *StatusResponse_P2PMetrics_RecentBatchStoreList) GetEntries() []*StatusResponse_P2PMetrics_RecentBatchStoreEntry { - if x != nil { - return x.Entries - } - return nil -} - -type StatusResponse_P2PMetrics_RecentBatchRetrieveList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Entries []*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveList) Reset() { - *x = StatusResponse_P2PMetrics_RecentBatchRetrieveList{} - mi := &file_supernode_supernode_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveList) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics_RecentBatchRetrieveList) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveList) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[22] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics_RecentBatchRetrieveList.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics_RecentBatchRetrieveList) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 10} -} - -func (x *StatusResponse_P2PMetrics_RecentBatchRetrieveList) GetEntries() []*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry { - if x != nil { - return x.Entries - } - return nil -} - -type StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds) - Requests int32 `protobuf:"varint,2,opt,name=requests,proto3" json:"requests,omitempty"` // total node RPCs attempted - Successful int32 `protobuf:"varint,3,opt,name=successful,proto3" json:"successful,omitempty"` // successful node RPCs - SuccessRate float64 `protobuf:"fixed64,4,opt,name=success_rate,json=successRate,proto3" json:"success_rate,omitempty"` // percentage (0-100) -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) Reset() { - *x = StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint{} - mi := &file_supernode_supernode_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[25] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 0, 0} -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetTimeUnix() int64 { - if x != nil { - return x.TimeUnix - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetRequests() int32 { - if x != nil { - return x.Requests - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetSuccessful() int32 { - if x != nil { - return x.Successful - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint) GetSuccessRate() float64 { - if x != nil { - return x.SuccessRate - } - return 0 -} - -type StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TimeUnix int64 `protobuf:"varint,1,opt,name=time_unix,json=timeUnix,proto3" json:"time_unix,omitempty"` // event time (unix seconds) - Keys int32 `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` // keys requested - Required int32 `protobuf:"varint,3,opt,name=required,proto3" json:"required,omitempty"` // required count - FoundLocal int32 `protobuf:"varint,4,opt,name=found_local,json=foundLocal,proto3" json:"found_local,omitempty"` // found locally - FoundNetwork int32 `protobuf:"varint,5,opt,name=found_network,json=foundNetwork,proto3" json:"found_network,omitempty"` // found on network - DurationMs int64 `protobuf:"varint,6,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"` // duration in milliseconds -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) Reset() { - *x = StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint{} - mi := &file_supernode_supernode_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoMessage() {} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) ProtoReflect() protoreflect.Message { - mi := &file_supernode_supernode_proto_msgTypes[26] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint.ProtoReflect.Descriptor instead. -func (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) Descriptor() ([]byte, []int) { - return file_supernode_supernode_proto_rawDescGZIP(), []int{4, 3, 0, 1} -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetTimeUnix() int64 { - if x != nil { - return x.TimeUnix - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetKeys() int32 { - if x != nil { - return x.Keys - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetRequired() int32 { - if x != nil { - return x.Required - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetFoundLocal() int32 { - if x != nil { - return x.FoundLocal - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetFoundNetwork() int32 { - if x != nil { - return x.FoundNetwork - } - return 0 -} - -func (x *StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint) GetDurationMs() int64 { - if x != nil { - return x.DurationMs - } - return 0 -} - -var File_supernode_supernode_proto protoreflect.FileDescriptor - -var file_supernode_supernode_proto_rawDesc = []byte{ - 0x0a, 0x19, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x73, 0x75, 0x70, 0x65, - 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x75, 0x70, - 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x3f, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, - 0x5f, 0x70, 0x32, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x11, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x32, 0x70, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x60, 0x0a, 0x14, - 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x3b, - 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x22, 0xf7, 0x23, 0x0a, 0x0e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, - 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x75, 0x70, 0x74, 0x69, - 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x0d, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, - 0x41, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x0d, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, - 0x73, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x73, 0x75, 0x70, 0x65, - 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, - 0x73, 0x52, 0x0c, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, - 0x2f, 0x0a, 0x13, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x12, 0x3b, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x12, 0x0a, - 0x04, 0x72, 0x61, 0x6e, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x72, 0x61, 0x6e, - 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x12, 0x45, 0x0a, 0x0b, 0x70, 0x32, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, - 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x0a, 0x70, 0x32, 0x70, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x1a, 0x82, 0x05, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x43, 0x50, 0x55, 0x52, 0x03, 0x63, 0x70, 0x75, - 0x12, 0x42, 0x0a, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x52, 0x06, 0x6d, 0x65, - 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x54, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, - 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, - 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x73, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x0e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x68, 0x61, - 0x72, 0x64, 0x77, 0x61, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x68, 0x61, 0x72, 0x64, 0x77, 0x61, 0x72, 0x65, 0x53, 0x75, - 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x1a, 0x40, 0x0a, 0x03, 0x43, 0x50, 0x55, 0x12, 0x23, 0x0a, 0x0d, - 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, - 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x1a, 0x84, 0x01, 0x0a, 0x06, 0x4d, 0x65, 0x6d, 0x6f, - 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x67, 0x62, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x47, 0x62, 0x12, 0x17, 0x0a, - 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x67, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, - 0x75, 0x73, 0x65, 0x64, 0x47, 0x62, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, - 0x62, 0x6c, 0x65, 0x5f, 0x67, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x61, 0x76, - 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x47, 0x62, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, - 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x0c, 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0xab, - 0x01, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, - 0x0a, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, - 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x09, 0x75, 0x73, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x27, - 0x0a, 0x0f, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, - 0x6c, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x61, 0x67, 0x65, - 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, - 0x75, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x1a, 0x6b, 0x0a, 0x0c, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x19, 0x0a, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, - 0x73, 0x6b, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, - 0x74, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x51, 0x0a, 0x07, 0x4e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x65, 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x73, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x61, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x70, - 0x65, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x1a, 0xe6, 0x19, 0x0a, - 0x0a, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x50, 0x0a, 0x0b, 0x64, - 0x68, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x52, 0x0a, 0x64, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x74, 0x0a, - 0x16, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x5f, - 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, - 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, - 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x6e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x12, 0x65, 0x0a, 0x11, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, - 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, - 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x50, - 0x6f, 0x6f, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x48, 0x0a, 0x08, 0x62, 0x61, - 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, - 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x73, 0x2e, 0x42, 0x61, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x62, 0x61, 0x6e, - 0x4c, 0x69, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x61, 0x74, - 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, - 0x62, 0x61, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x64, 0x69, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, - 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x04, 0x64, 0x69, 0x73, 0x6b, 0x12, 0x68, 0x0a, 0x12, 0x72, 0x65, 0x63, - 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x18, - 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, - 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, - 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x10, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, - 0x6f, 0x72, 0x65, 0x12, 0x71, 0x0a, 0x15, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, - 0x74, 0x63, 0x68, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x18, 0x08, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, - 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, - 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x13, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, - 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x12, 0x76, 0x0a, 0x18, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, - 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x62, 0x79, 0x5f, - 0x69, 0x70, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x52, - 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x42, - 0x79, 0x49, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, - 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x79, 0x49, 0x70, 0x12, 0x7f, - 0x0a, 0x1b, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x72, - 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x69, 0x70, 0x18, 0x0a, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, - 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, - 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x42, 0x79, 0x49, - 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x17, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, - 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x42, 0x79, 0x49, 0x70, 0x1a, - 0xc0, 0x05, 0x0a, 0x0a, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x73, - 0x0a, 0x14, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, - 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x73, - 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x53, 0x74, - 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, - 0x12, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x63, - 0x65, 0x6e, 0x74, 0x12, 0x76, 0x0a, 0x15, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x72, 0x65, 0x74, - 0x72, 0x69, 0x65, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, - 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x44, 0x68, 0x74, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, - 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x13, 0x62, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, - 0x72, 0x69, 0x65, 0x76, 0x65, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x15, 0x68, - 0x6f, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x62, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, - 0x6b, 0x69, 0x70, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x68, 0x6f, 0x74, 0x50, - 0x61, 0x74, 0x68, 0x42, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x6b, 0x69, 0x70, 0x73, 0x12, 0x35, - 0x0a, 0x17, 0x68, 0x6f, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x62, 0x61, 0x6e, 0x5f, 0x69, - 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x14, 0x68, 0x6f, 0x74, 0x50, 0x61, 0x74, 0x68, 0x42, 0x61, 0x6e, 0x49, 0x6e, 0x63, 0x72, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x8f, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53, - 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, - 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, - 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, - 0x75, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x66, 0x75, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, - 0x72, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x73, 0x75, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x1a, 0xc8, 0x01, 0x0a, 0x12, 0x42, 0x61, 0x74, 0x63, - 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1b, - 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x6b, - 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, - 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x66, - 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0a, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x12, 0x23, 0x0a, 0x0d, - 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x0c, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x73, 0x1a, 0x74, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x65, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x75, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x18, - 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x1a, 0x9d, 0x01, 0x0a, 0x08, 0x42, 0x61, 0x6e, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, - 0x26, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x75, 0x6e, - 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x41, 0x74, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x67, 0x65, 0x5f, 0x73, - 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x61, 0x67, - 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x65, 0x0a, 0x0d, 0x44, 0x61, 0x74, 0x61, - 0x62, 0x61, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0e, 0x70, 0x32, 0x70, - 0x5f, 0x64, 0x62, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x01, 0x52, 0x0b, 0x70, 0x32, 0x70, 0x44, 0x62, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x62, 0x12, 0x2f, - 0x0a, 0x14, 0x70, 0x32, 0x70, 0x5f, 0x64, 0x62, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, - 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x70, 0x32, - 0x70, 0x44, 0x62, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, - 0x55, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x15, 0x0a, - 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6d, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x61, - 0x6c, 0x6c, 0x4d, 0x62, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x6d, 0x62, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x75, 0x73, 0x65, 0x64, 0x4d, 0x62, 0x12, 0x17, 0x0a, - 0x07, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, - 0x66, 0x72, 0x65, 0x65, 0x4d, 0x62, 0x1a, 0x7c, 0x0a, 0x19, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, - 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xc9, 0x01, 0x0a, 0x15, 0x52, 0x65, 0x63, - 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, - 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, - 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x49, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x1f, 0x0a, - 0x0b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x12, 0x0e, - 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x12, 0x14, - 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x1a, 0xdc, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, - 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x12, 0x1b, - 0x0a, 0x09, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, - 0x65, 0x6e, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x49, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x1f, 0x0a, 0x0b, - 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x12, 0x14, 0x0a, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x1a, 0x6c, 0x0a, 0x14, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, - 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x54, 0x0a, 0x07, 0x65, - 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x73, - 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, - 0x6f, 0x72, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, - 0x73, 0x1a, 0x72, 0x0a, 0x17, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, - 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x57, 0x0a, 0x07, - 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, - 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, - 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, - 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x82, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, - 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x79, 0x49, 0x70, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, - 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x88, 0x01, 0x0a, 0x1c, 0x52, - 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, - 0x76, 0x65, 0x42, 0x79, 0x49, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x52, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, - 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x32, 0x50, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x73, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, - 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x32, 0xd7, 0x01, 0x0a, 0x10, 0x53, 0x75, 0x70, 0x65, 0x72, 0x6e, - 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x47, 0x65, - 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, - 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x19, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x69, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x12, 0x10, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, - 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75, - 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70, - 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75, - 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_supernode_supernode_proto_rawDescOnce sync.Once - file_supernode_supernode_proto_rawDescData = file_supernode_supernode_proto_rawDesc -) - -func file_supernode_supernode_proto_rawDescGZIP() []byte { - file_supernode_supernode_proto_rawDescOnce.Do(func() { - file_supernode_supernode_proto_rawDescData = protoimpl.X.CompressGZIP(file_supernode_supernode_proto_rawDescData) - }) - return file_supernode_supernode_proto_rawDescData -} - -var file_supernode_supernode_proto_msgTypes = make([]protoimpl.MessageInfo, 27) -var file_supernode_supernode_proto_goTypes = []any{ - (*StatusRequest)(nil), // 0: supernode.StatusRequest - (*ListServicesRequest)(nil), // 1: supernode.ListServicesRequest - (*ListServicesResponse)(nil), // 2: supernode.ListServicesResponse - (*ServiceInfo)(nil), // 3: supernode.ServiceInfo - (*StatusResponse)(nil), // 4: supernode.StatusResponse - (*StatusResponse_Resources)(nil), // 5: supernode.StatusResponse.Resources - (*StatusResponse_ServiceTasks)(nil), // 6: supernode.StatusResponse.ServiceTasks - (*StatusResponse_Network)(nil), // 7: supernode.StatusResponse.Network - (*StatusResponse_P2PMetrics)(nil), // 8: supernode.StatusResponse.P2PMetrics - (*StatusResponse_Resources_CPU)(nil), // 9: supernode.StatusResponse.Resources.CPU - (*StatusResponse_Resources_Memory)(nil), // 10: supernode.StatusResponse.Resources.Memory - (*StatusResponse_Resources_Storage)(nil), // 11: supernode.StatusResponse.Resources.Storage - (*StatusResponse_P2PMetrics_DhtMetrics)(nil), // 12: supernode.StatusResponse.P2PMetrics.DhtMetrics - (*StatusResponse_P2PMetrics_HandleCounters)(nil), // 13: supernode.StatusResponse.P2PMetrics.HandleCounters - (*StatusResponse_P2PMetrics_BanEntry)(nil), // 14: supernode.StatusResponse.P2PMetrics.BanEntry - (*StatusResponse_P2PMetrics_DatabaseStats)(nil), // 15: supernode.StatusResponse.P2PMetrics.DatabaseStats - (*StatusResponse_P2PMetrics_DiskStatus)(nil), // 16: supernode.StatusResponse.P2PMetrics.DiskStatus - nil, // 17: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry - nil, // 18: supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry - (*StatusResponse_P2PMetrics_RecentBatchStoreEntry)(nil), // 19: supernode.StatusResponse.P2PMetrics.RecentBatchStoreEntry - (*StatusResponse_P2PMetrics_RecentBatchRetrieveEntry)(nil), // 20: supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveEntry - (*StatusResponse_P2PMetrics_RecentBatchStoreList)(nil), // 21: supernode.StatusResponse.P2PMetrics.RecentBatchStoreList - (*StatusResponse_P2PMetrics_RecentBatchRetrieveList)(nil), // 22: supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveList - nil, // 23: supernode.StatusResponse.P2PMetrics.RecentBatchStoreByIpEntry - nil, // 24: supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveByIpEntry - (*StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint)(nil), // 25: supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint - (*StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint)(nil), // 26: supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint -} -var file_supernode_supernode_proto_depIdxs = []int32{ - 3, // 0: supernode.ListServicesResponse.services:type_name -> supernode.ServiceInfo - 5, // 1: supernode.StatusResponse.resources:type_name -> supernode.StatusResponse.Resources - 6, // 2: supernode.StatusResponse.running_tasks:type_name -> supernode.StatusResponse.ServiceTasks - 7, // 3: supernode.StatusResponse.network:type_name -> supernode.StatusResponse.Network - 8, // 4: supernode.StatusResponse.p2p_metrics:type_name -> supernode.StatusResponse.P2PMetrics - 9, // 5: supernode.StatusResponse.Resources.cpu:type_name -> supernode.StatusResponse.Resources.CPU - 10, // 6: supernode.StatusResponse.Resources.memory:type_name -> supernode.StatusResponse.Resources.Memory - 11, // 7: supernode.StatusResponse.Resources.storage_volumes:type_name -> supernode.StatusResponse.Resources.Storage - 12, // 8: supernode.StatusResponse.P2PMetrics.dht_metrics:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics - 17, // 9: supernode.StatusResponse.P2PMetrics.network_handle_metrics:type_name -> supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry - 18, // 10: supernode.StatusResponse.P2PMetrics.conn_pool_metrics:type_name -> supernode.StatusResponse.P2PMetrics.ConnPoolMetricsEntry - 14, // 11: supernode.StatusResponse.P2PMetrics.ban_list:type_name -> supernode.StatusResponse.P2PMetrics.BanEntry - 15, // 12: supernode.StatusResponse.P2PMetrics.database:type_name -> supernode.StatusResponse.P2PMetrics.DatabaseStats - 16, // 13: supernode.StatusResponse.P2PMetrics.disk:type_name -> supernode.StatusResponse.P2PMetrics.DiskStatus - 19, // 14: supernode.StatusResponse.P2PMetrics.recent_batch_store:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchStoreEntry - 20, // 15: supernode.StatusResponse.P2PMetrics.recent_batch_retrieve:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveEntry - 23, // 16: supernode.StatusResponse.P2PMetrics.recent_batch_store_by_ip:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchStoreByIpEntry - 24, // 17: supernode.StatusResponse.P2PMetrics.recent_batch_retrieve_by_ip:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveByIpEntry - 25, // 18: supernode.StatusResponse.P2PMetrics.DhtMetrics.store_success_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.StoreSuccessPoint - 26, // 19: supernode.StatusResponse.P2PMetrics.DhtMetrics.batch_retrieve_recent:type_name -> supernode.StatusResponse.P2PMetrics.DhtMetrics.BatchRetrievePoint - 13, // 20: supernode.StatusResponse.P2PMetrics.NetworkHandleMetricsEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.HandleCounters - 19, // 21: supernode.StatusResponse.P2PMetrics.RecentBatchStoreList.entries:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchStoreEntry - 20, // 22: supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveList.entries:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveEntry - 21, // 23: supernode.StatusResponse.P2PMetrics.RecentBatchStoreByIpEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchStoreList - 22, // 24: supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveByIpEntry.value:type_name -> supernode.StatusResponse.P2PMetrics.RecentBatchRetrieveList - 0, // 25: supernode.SupernodeService.GetStatus:input_type -> supernode.StatusRequest - 1, // 26: supernode.SupernodeService.ListServices:input_type -> supernode.ListServicesRequest - 4, // 27: supernode.SupernodeService.GetStatus:output_type -> supernode.StatusResponse - 2, // 28: supernode.SupernodeService.ListServices:output_type -> supernode.ListServicesResponse - 27, // [27:29] is the sub-list for method output_type - 25, // [25:27] is the sub-list for method input_type - 25, // [25:25] is the sub-list for extension type_name - 25, // [25:25] is the sub-list for extension extendee - 0, // [0:25] is the sub-list for field type_name -} - -func init() { file_supernode_supernode_proto_init() } -func file_supernode_supernode_proto_init() { - if File_supernode_supernode_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_supernode_supernode_proto_rawDesc, - NumEnums: 0, - NumMessages: 27, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_supernode_supernode_proto_goTypes, - DependencyIndexes: file_supernode_supernode_proto_depIdxs, - MessageInfos: file_supernode_supernode_proto_msgTypes, - }.Build() - File_supernode_supernode_proto = out.File - file_supernode_supernode_proto_rawDesc = nil - file_supernode_supernode_proto_goTypes = nil - file_supernode_supernode_proto_depIdxs = nil -} diff --git a/gen/supernode/supernode.pb.gw.go b/gen/supernode/supernode.pb.gw.go deleted file mode 100644 index 0976b8b7..00000000 --- a/gen/supernode/supernode.pb.gw.go +++ /dev/null @@ -1,236 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: supernode/supernode.proto - -/* -Package supernode is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package supernode - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -var ( - filter_SupernodeService_GetStatus_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_SupernodeService_GetStatus_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq StatusRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetStatus_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.GetStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_SupernodeService_GetStatus_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq StatusRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetStatus_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.GetStatus(ctx, &protoReq) - return msg, metadata, err - -} - -func request_SupernodeService_ListServices_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListServicesRequest - var metadata runtime.ServerMetadata - - msg, err := client.ListServices(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_SupernodeService_ListServices_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListServicesRequest - var metadata runtime.ServerMetadata - - msg, err := server.ListServices(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterSupernodeServiceHandlerServer registers the http handlers for service SupernodeService to "mux". -// UnaryRPC :call SupernodeServiceServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterSupernodeServiceHandlerFromEndpoint instead. -func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server SupernodeServiceServer) error { - - mux.Handle("GET", pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_SupernodeService_GetStatus_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_SupernodeService_GetStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_SupernodeService_ListServices_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_SupernodeService_ListServices_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterSupernodeServiceHandlerFromEndpoint is same as RegisterSupernodeServiceHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterSupernodeServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterSupernodeServiceHandler(ctx, mux, conn) -} - -// RegisterSupernodeServiceHandler registers the http handlers for service SupernodeService to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterSupernodeServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterSupernodeServiceHandlerClient(ctx, mux, NewSupernodeServiceClient(conn)) -} - -// RegisterSupernodeServiceHandlerClient registers the http handlers for service SupernodeService -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "SupernodeServiceClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "SupernodeServiceClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "SupernodeServiceClient" to call the correct interceptors. -func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client SupernodeServiceClient) error { - - mux.Handle("GET", pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_SupernodeService_GetStatus_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_SupernodeService_GetStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_SupernodeService_ListServices_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_SupernodeService_ListServices_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_SupernodeService_GetStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "status"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_SupernodeService_ListServices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "services"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_SupernodeService_GetStatus_0 = runtime.ForwardResponseMessage - - forward_SupernodeService_ListServices_0 = runtime.ForwardResponseMessage -) diff --git a/gen/supernode/supernode_grpc.pb.go b/gen/supernode/supernode_grpc.pb.go deleted file mode 100644 index 97eb3a0a..00000000 --- a/gen/supernode/supernode_grpc.pb.go +++ /dev/null @@ -1,163 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc v3.21.12 -// source: supernode/supernode.proto - -package supernode - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.64.0 or later. -const _ = grpc.SupportPackageIsVersion9 - -const ( - SupernodeService_GetStatus_FullMethodName = "/supernode.SupernodeService/GetStatus" - SupernodeService_ListServices_FullMethodName = "/supernode.SupernodeService/ListServices" -) - -// SupernodeServiceClient is the client API for SupernodeService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -// -// SupernodeService provides status information for all services -type SupernodeServiceClient interface { - GetStatus(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) - ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) -} - -type supernodeServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewSupernodeServiceClient(cc grpc.ClientConnInterface) SupernodeServiceClient { - return &supernodeServiceClient{cc} -} - -func (c *supernodeServiceClient) GetStatus(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(StatusResponse) - err := c.cc.Invoke(ctx, SupernodeService_GetStatus_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *supernodeServiceClient) ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(ListServicesResponse) - err := c.cc.Invoke(ctx, SupernodeService_ListServices_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -// SupernodeServiceServer is the server API for SupernodeService service. -// All implementations must embed UnimplementedSupernodeServiceServer -// for forward compatibility. -// -// SupernodeService provides status information for all services -type SupernodeServiceServer interface { - GetStatus(context.Context, *StatusRequest) (*StatusResponse, error) - ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) - mustEmbedUnimplementedSupernodeServiceServer() -} - -// UnimplementedSupernodeServiceServer must be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedSupernodeServiceServer struct{} - -func (UnimplementedSupernodeServiceServer) GetStatus(context.Context, *StatusRequest) (*StatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetStatus not implemented") -} -func (UnimplementedSupernodeServiceServer) ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListServices not implemented") -} -func (UnimplementedSupernodeServiceServer) mustEmbedUnimplementedSupernodeServiceServer() {} -func (UnimplementedSupernodeServiceServer) testEmbeddedByValue() {} - -// UnsafeSupernodeServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to SupernodeServiceServer will -// result in compilation errors. -type UnsafeSupernodeServiceServer interface { - mustEmbedUnimplementedSupernodeServiceServer() -} - -func RegisterSupernodeServiceServer(s grpc.ServiceRegistrar, srv SupernodeServiceServer) { - // If the following call pancis, it indicates UnimplementedSupernodeServiceServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } - s.RegisterService(&SupernodeService_ServiceDesc, srv) -} - -func _SupernodeService_GetStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SupernodeServiceServer).GetStatus(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SupernodeService_GetStatus_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SupernodeServiceServer).GetStatus(ctx, req.(*StatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SupernodeService_ListServices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListServicesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SupernodeServiceServer).ListServices(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SupernodeService_ListServices_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SupernodeServiceServer).ListServices(ctx, req.(*ListServicesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// SupernodeService_ServiceDesc is the grpc.ServiceDesc for SupernodeService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var SupernodeService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "supernode.SupernodeService", - HandlerType: (*SupernodeServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetStatus", - Handler: _SupernodeService_GetStatus_Handler, - }, - { - MethodName: "ListServices", - Handler: _SupernodeService_ListServices_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "supernode/supernode.proto", -} diff --git a/go.mod b/go.mod index 46091df2..9c163675 100644 --- a/go.mod +++ b/go.mod @@ -1,21 +1,18 @@ module github.com/LumeraProtocol/supernode/v2 -go 1.24.1 +go 1.25.1 -replace ( - github.com/bytedance/sonic => github.com/bytedance/sonic v1.14.0 - github.com/bytedance/sonic/loader => github.com/bytedance/sonic/loader v0.3.0 -) +replace github.com/cosmos/cosmos-sdk => github.com/cosmos/cosmos-sdk v0.50.14 require ( cosmossdk.io/math v1.5.3 github.com/AlecAivazis/survey/v2 v2.3.7 - github.com/LumeraProtocol/lumera v1.7.0 + github.com/LumeraProtocol/lumera v1.8.0 github.com/LumeraProtocol/rq-go v0.2.1 github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce github.com/cenkalti/backoff/v4 v4.3.0 github.com/cosmos/btcutil v1.0.5 - github.com/cosmos/cosmos-sdk v0.50.13 + github.com/cosmos/cosmos-sdk v0.53.0 github.com/cosmos/go-bip39 v1.0.0 github.com/cosmos/gogoproto v1.7.0 github.com/dgraph-io/ristretto/v2 v2.2.0 @@ -24,7 +21,7 @@ require ( github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.4 github.com/google/uuid v1.6.0 - github.com/grpc-ecosystem/grpc-gateway v1.16.0 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 github.com/jmoiron/sqlx v1.4.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.18.0 @@ -33,139 +30,145 @@ require ( github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pkg/errors v0.9.1 github.com/shirou/gopsutil/v3 v3.24.5 - github.com/spf13/cobra v1.8.1 - github.com/spf13/viper v1.19.0 - github.com/stretchr/testify v1.10.0 - go.uber.org/mock v0.5.2 + github.com/spf13/cobra v1.10.1 + github.com/stretchr/testify v1.11.1 + go.uber.org/mock v0.6.0 go.uber.org/ratelimit v0.3.1 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.36.0 - golang.org/x/sync v0.12.0 - golang.org/x/sys v0.31.0 - google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 - google.golang.org/grpc v1.71.0 - google.golang.org/protobuf v1.36.6 + golang.org/x/crypto v0.42.0 + golang.org/x/sync v0.17.0 + golang.org/x/sys v0.36.0 + google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 + google.golang.org/grpc v1.76.0 + google.golang.org/protobuf v1.36.10 gopkg.in/yaml.v3 v3.0.1 - lukechampine.com/blake3 v1.4.0 + lukechampine.com/blake3 v1.4.1 ) require ( - cosmossdk.io/api v0.9.0 // indirect - cosmossdk.io/collections v0.4.0 // indirect + cosmossdk.io/api v0.9.2 // indirect + cosmossdk.io/collections v1.3.0 // indirect cosmossdk.io/core v0.11.3 // indirect - cosmossdk.io/depinject v1.1.0 // indirect + cosmossdk.io/depinject v1.2.0 // indirect cosmossdk.io/errors v1.0.2 // indirect - cosmossdk.io/log v1.5.0 // indirect - cosmossdk.io/store v1.1.1 // indirect - cosmossdk.io/x/tx v0.13.7 // indirect + cosmossdk.io/log v1.6.0 // indirect + cosmossdk.io/schema v1.1.0 // indirect + cosmossdk.io/store v1.1.2 // indirect + cosmossdk.io/x/tx v0.14.0 // indirect + cosmossdk.io/x/upgrade v0.2.0 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.2 // indirect - github.com/DataDog/datadog-go v3.2.0+incompatible // indirect - github.com/DataDog/zstd v1.5.5 // indirect + github.com/DataDog/datadog-go v4.8.3+incompatible // indirect + github.com/DataDog/zstd v1.5.7 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 // indirect - github.com/bytedance/sonic v1.12.3 // indirect + github.com/bgentry/speakeasy v0.2.0 // indirect + github.com/bytedance/gopkg v0.1.3 // indirect + github.com/bytedance/sonic v1.14.1 // indirect github.com/bytedance/sonic/loader v0.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cloudwego/base64x v0.1.5 // indirect - github.com/cockroachdb/errors v1.11.3 // indirect - github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect - github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/pebble v1.1.2 // indirect - github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cloudwego/base64x v0.1.6 // indirect + github.com/cockroachdb/errors v1.12.0 // indirect + github.com/cockroachdb/fifo v0.0.0-20240616162244-4768e80dfb9a // indirect + github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 // indirect + github.com/cockroachdb/pebble v1.1.5 // indirect + github.com/cockroachdb/redact v1.1.6 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect - github.com/cometbft/cometbft v0.38.15 // indirect + github.com/cometbft/cometbft v0.38.18 // indirect github.com/cometbft/cometbft-db v0.14.1 // indirect - github.com/cosmos/cosmos-db v1.1.1 // indirect + github.com/cosmos/cosmos-db v1.1.2 // indirect github.com/cosmos/cosmos-proto v1.0.0-beta.5 // indirect github.com/cosmos/gogogateway v1.2.0 // indirect - github.com/cosmos/iavl v1.2.2 // indirect + github.com/cosmos/iavl v1.2.4 // indirect + github.com/cosmos/ibc-go/v10 v10.3.0 // indirect github.com/cosmos/ics23/go v0.11.0 // indirect github.com/cosmos/ledger-cosmos-go v0.14.0 // indirect - github.com/danieljoos/wincred v1.2.1 // indirect + github.com/danieljoos/wincred v1.2.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect - github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect + github.com/desertbit/timer v1.0.1 // indirect github.com/dgraph-io/badger/v4 v4.2.0 // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/dvsekhvalnov/jose2go v1.6.0 // indirect + github.com/dvsekhvalnov/jose2go v1.7.0 // indirect github.com/emicklei/dot v1.6.2 // indirect - github.com/fatih/color v1.16.0 // indirect + github.com/fatih/color v1.18.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/getsentry/sentry-go v0.32.0 // indirect github.com/go-kit/kit v0.13.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.2.4 // indirect + github.com/golang/glog v1.2.5 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/flatbuffers v1.12.1 // indirect + github.com/google/flatbuffers v24.3.25+incompatible // indirect github.com/google/go-cmp v0.7.0 // indirect + github.com/google/orderedcode v0.0.1 // indirect github.com/gorilla/handlers v1.5.2 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect - github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect - github.com/hashicorp/go-metrics v0.5.3 // indirect - github.com/hashicorp/go-plugin v1.6.0 // indirect + github.com/hashicorp/go-metrics v0.5.4 // indirect + github.com/hashicorp/go-plugin v1.6.3 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect - github.com/hashicorp/yamux v0.1.1 // indirect - github.com/hdevalence/ed25519consensus v0.1.0 // indirect - github.com/huandu/skiplist v1.2.0 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hashicorp/yamux v0.1.2 // indirect + github.com/hdevalence/ed25519consensus v0.2.0 // indirect + github.com/huandu/skiplist v1.2.1 // indirect github.com/iancoleman/strcase v0.3.0 // indirect github.com/improbable-eng/grpc-web v0.15.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect - github.com/klauspost/cpuid/v2 v2.2.4 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect + github.com/lib/pq v1.10.9 // indirect github.com/linxGnu/grocksdb v1.9.8 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/magiconair/properties v1.8.7 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect - github.com/mitchellh/go-testing-interface v1.14.1 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/minio/highwayhash v1.0.3 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mtibben/percent v0.2.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a // indirect github.com/oklog/run v1.1.0 // indirect - github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/prometheus/client_golang v1.20.5 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.63.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/rs/cors v1.11.1 // indirect - github.com/rs/zerolog v1.33.0 // indirect - github.com/sagikazarmark/locafero v0.4.0 // indirect - github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/rs/zerolog v1.34.0 // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/sasha-s/go-deadlock v0.3.5 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/spf13/viper v1.21.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect @@ -177,21 +180,21 @@ require ( github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/zondax/hid v0.9.2 // indirect github.com/zondax/ledger-go v0.14.3 // indirect - go.etcd.io/bbolt v1.4.0-alpha.0.0.20240404170359-43604f3112c5 // indirect + go.etcd.io/bbolt v1.4.0-alpha.1 // indirect go.opencensus.io v0.24.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/arch v0.3.0 // indirect - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/arch v0.15.0 // indirect + golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b // indirect golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d // indirect - golang.org/x/net v0.38.0 // indirect - golang.org/x/term v0.30.0 // indirect - golang.org/x/text v0.23.0 // indirect - google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect - gotest.tools/v3 v3.5.1 // indirect + golang.org/x/net v0.44.0 // indirect + golang.org/x/term v0.35.0 // indirect + golang.org/x/text v0.29.0 // indirect + google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 // indirect + gotest.tools/v3 v3.5.2 // indirect lukechampine.com/uint128 v1.3.0 // indirect - nhooyr.io/websocket v1.8.10 // indirect + nhooyr.io/websocket v1.8.17 // indirect pgregory.net/rapid v1.2.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index d8170371..51e25901 100644 --- a/go.sum +++ b/go.sum @@ -1,48 +1,52 @@ +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= -cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= -cloud.google.com/go/auth v0.6.0 h1:5x+d6b5zdezZ7gmLWD1m/xNjnaQ2YDhmIz/HH3doy1g= -cloud.google.com/go/auth v0.6.0/go.mod h1:b4acV+jLQDyjwm4OXHYjNvRi4jvGBzHWJRtJcy+2P4g= -cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= -cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= -cloud.google.com/go/compute v1.27.1 h1:0WbBLIPNANheCRZ4h8QhgzjN53KMutbiVBOLtPiVzBU= -cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= -cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= -cloud.google.com/go/iam v1.1.9 h1:oSkYLVtVme29uGYrOcKcvJRht7cHJpYD09GM9JaR0TE= -cloud.google.com/go/iam v1.1.9/go.mod h1:Nt1eDWNYH9nGQg3d/mY7U1hvfGmsaG9o/kLGoLoLXjQ= -cloud.google.com/go/storage v1.41.0 h1:RusiwatSu6lHeEXe3kglxakAmAbfV+rhtPqA6i8RBx0= -cloud.google.com/go/storage v1.41.0/go.mod h1:J1WCa/Z2FcgdEDuPUY8DxT5I+d9mFKsCepp5vR6Sq80= -cosmossdk.io/api v0.9.0 h1:QYs9APeSlDNGbsBOBFjp3jXgGd4hnEPnnku3+W3tT4Y= -cosmossdk.io/api v0.9.0/go.mod h1:pLkU/NSqYHWxyN7XftVt8iD7oldKJzqMZgzeiOmT2nk= -cosmossdk.io/client/v2 v2.0.0-beta.5 h1:0LVv3nEByn//hFDIrYLs2WvsEU3HodOelh4SDHnA/1I= -cosmossdk.io/client/v2 v2.0.0-beta.5/go.mod h1:4p0P6o0ro+FizakJUYS9SeM94RNbv0thLmkHRw5o5as= -cosmossdk.io/collections v0.4.0 h1:PFmwj2W8szgpD5nOd8GWH6AbYNi1f2J6akWXJ7P5t9s= -cosmossdk.io/collections v0.4.0/go.mod h1:oa5lUING2dP+gdDquow+QjlF45eL1t4TJDypgGd+tv0= +cloud.google.com/go v0.120.0 h1:wc6bgG9DHyKqF5/vQvX1CiZrtHnxJjBlKUyF9nP6meA= +cloud.google.com/go v0.120.0/go.mod h1:/beW32s8/pGRuj4IILWQNd4uuebeT4dkOhKmkfit64Q= +cloud.google.com/go/auth v0.16.0 h1:Pd8P1s9WkcrBE2n/PhAwKsdrR35V3Sg2II9B+ndM3CU= +cloud.google.com/go/auth v0.16.0/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute v1.37.0 h1:XxtZlXYkZXub3LNaLu90TTemcFqIU1yZ4E4q9VlR39A= +cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= +cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= +cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= +cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= +cloud.google.com/go/storage v1.50.0 h1:3TbVkzTooBvnZsk7WaAQfOsNrdoM8QHusXA1cpk6QJs= +cloud.google.com/go/storage v1.50.0/go.mod h1:l7XeiD//vx5lfqE3RavfmU9yvk5Pp0Zhcv482poyafY= +cosmossdk.io/api v0.9.2 h1:9i9ptOBdmoIEVEVWLtYYHjxZonlF/aOVODLFaxpmNtg= +cosmossdk.io/api v0.9.2/go.mod h1:CWt31nVohvoPMTlPv+mMNCtC0a7BqRdESjCsstHcTkU= +cosmossdk.io/client/v2 v2.0.0-beta.8.0.20250402172810-41e3e9d004a1 h1:nlMUeKu6CGrO7Gxt5S31qT3g27CHmBJHsZPjqHApVTI= +cosmossdk.io/client/v2 v2.0.0-beta.8.0.20250402172810-41e3e9d004a1/go.mod h1:xgv0ejeOk5yeDraPW5tv+PfBkCDt4yYa/+u45MyP+bM= +cosmossdk.io/collections v1.3.0 h1:RUY23xXBy/bu5oSHZ5y+mkJRyA4ZboKDO4Yvx4+g2uc= +cosmossdk.io/collections v1.3.0/go.mod h1:cqVpBMDGEYhuNmNSXIOmqpnQ7Eav43hpJIetzLuEkns= cosmossdk.io/core v0.11.3 h1:mei+MVDJOwIjIniaKelE3jPDqShCc/F4LkNNHh+4yfo= cosmossdk.io/core v0.11.3/go.mod h1:9rL4RE1uDt5AJ4Tg55sYyHWXA16VmpHgbe0PbJc6N2Y= -cosmossdk.io/depinject v1.1.0 h1:wLan7LG35VM7Yo6ov0jId3RHWCGRhe8E8bsuARorl5E= -cosmossdk.io/depinject v1.1.0/go.mod h1:kkI5H9jCGHeKeYWXTqYdruogYrEeWvBQCw1Pj4/eCFI= +cosmossdk.io/depinject v1.2.0 h1:6NW/FSK1IkWTrX7XxUpBmX1QMBozpEI9SsWkKTBc5zw= +cosmossdk.io/depinject v1.2.0/go.mod h1:pvitjtUxZZZTQESKNS9KhGjWVslJZxtO9VooRJYyPjk= cosmossdk.io/errors v1.0.2 h1:wcYiJz08HThbWxd/L4jObeLaLySopyyuUFB5w4AGpCo= cosmossdk.io/errors v1.0.2/go.mod h1:0rjgiHkftRYPj//3DrD6y8hcm40HcPv/dR4R/4efr0k= -cosmossdk.io/log v1.5.0 h1:dVdzPJW9kMrnAYyMf1duqacoidB9uZIl+7c6z0mnq0g= -cosmossdk.io/log v1.5.0/go.mod h1:Tr46PUJjiUthlwQ+hxYtUtPn4D/oCZXAkYevBeh5+FI= +cosmossdk.io/log v1.6.0 h1:SJIOmJ059wi1piyRgNRXKXhlDXGqnB5eQwhcZKv2tOk= +cosmossdk.io/log v1.6.0/go.mod h1:5cXXBvfBkR2/BcXmosdCSLXllvgSjphrrDVdfVRmBGM= cosmossdk.io/math v1.5.3 h1:WH6tu6Z3AUCeHbeOSHg2mt9rnoiUWVWaQ2t6Gkll96U= cosmossdk.io/math v1.5.3/go.mod h1:uqcZv7vexnhMFJF+6zh9EWdm/+Ylyln34IvPnBauPCQ= -cosmossdk.io/store v1.1.1 h1:NA3PioJtWDVU7cHHeyvdva5J/ggyLDkyH0hGHl2804Y= -cosmossdk.io/store v1.1.1/go.mod h1:8DwVTz83/2PSI366FERGbWSH7hL6sB7HbYp8bqksNwM= +cosmossdk.io/schema v1.1.0 h1:mmpuz3dzouCoyjjcMcA/xHBEmMChN+EHh8EHxHRHhzE= +cosmossdk.io/schema v1.1.0/go.mod h1:Gb7pqO+tpR+jLW5qDcNOSv0KtppYs7881kfzakguhhI= +cosmossdk.io/store v1.1.2 h1:3HOZG8+CuThREKv6cn3WSohAc6yccxO3hLzwK6rBC7o= +cosmossdk.io/store v1.1.2/go.mod h1:60rAGzTHevGm592kFhiUVkNC9w7gooSEn5iUBPzHQ6A= cosmossdk.io/x/circuit v0.1.1 h1:KPJCnLChWrxD4jLwUiuQaf5mFD/1m7Omyo7oooefBVQ= cosmossdk.io/x/circuit v0.1.1/go.mod h1:B6f/urRuQH8gjt4eLIXfZJucrbreuYrKh5CSjaOxr+Q= cosmossdk.io/x/evidence v0.1.1 h1:Ks+BLTa3uftFpElLTDp9L76t2b58htjVbSZ86aoK/E4= cosmossdk.io/x/evidence v0.1.1/go.mod h1:OoDsWlbtuyqS70LY51aX8FBTvguQqvFrt78qL7UzeNc= cosmossdk.io/x/feegrant v0.1.1 h1:EKFWOeo/pup0yF0svDisWWKAA9Zags6Zd0P3nRvVvw8= cosmossdk.io/x/feegrant v0.1.1/go.mod h1:2GjVVxX6G2fta8LWj7pC/ytHjryA6MHAJroBWHFNiEQ= -cosmossdk.io/x/nft v0.1.1 h1:pslAVS8P5NkW080+LWOamInjDcq+v2GSCo+BjN9sxZ8= -cosmossdk.io/x/nft v0.1.1/go.mod h1:Kac6F6y2gsKvoxU+fy8uvxRTi4BIhLOor2zgCNQwVgY= -cosmossdk.io/x/tx v0.13.7 h1:8WSk6B/OHJLYjiZeMKhq7DK7lHDMyK0UfDbBMxVmeOI= -cosmossdk.io/x/tx v0.13.7/go.mod h1:V6DImnwJMTq5qFjeGWpXNiT/fjgE4HtmclRmTqRVM3w= -cosmossdk.io/x/upgrade v0.1.4 h1:/BWJim24QHoXde8Bc64/2BSEB6W4eTydq0X/2f8+g38= -cosmossdk.io/x/upgrade v0.1.4/go.mod h1:9v0Aj+fs97O+Ztw+tG3/tp5JSlrmT7IcFhAebQHmOPo= +cosmossdk.io/x/tx v0.14.0 h1:hB3O25kIcyDW/7kMTLMaO8Ripj3yqs5imceVd6c/heA= +cosmossdk.io/x/tx v0.14.0/go.mod h1:Tn30rSRA1PRfdGB3Yz55W4Sn6EIutr9xtMKSHij+9PM= +cosmossdk.io/x/upgrade v0.2.0 h1:ZHy0xny3wBCSLomyhE06+UmQHWO8cYlVYjfFAJxjz5g= +cosmossdk.io/x/upgrade v0.2.0/go.mod h1:DXDtkvi//TrFyHWSOaeCZGBoiGAE6Rs8/0ABt2pcDD0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= @@ -52,27 +56,42 @@ github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XB github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ= github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/CosmWasm/wasmd v0.53.0 h1:kdaoAi20bIb4VCsxw9pRaT2g5PpIp82Wqrr9DRVN9ao= -github.com/CosmWasm/wasmd v0.53.0/go.mod h1:FJl/aWjdpGof3usAMFQpDe07Rkx77PUzp0cygFMOvtw= -github.com/CosmWasm/wasmvm/v2 v2.1.2 h1:GkJ5bAsRlLHfIQVg/FY1VHwLyBwlCjAhDea0B8L+e20= -github.com/CosmWasm/wasmvm/v2 v2.1.2/go.mod h1:bMhLQL4Yp9CzJi9A83aR7VO9wockOsSlZbT4ztOl6bg= -github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= +github.com/CosmWasm/wasmd v0.55.0-ibc2.0 h1:9bH+QDnSGxmZhjSykLYGtW4sltzGFFVm10Awk683q2Y= +github.com/CosmWasm/wasmd v0.55.0-ibc2.0/go.mod h1:c9l+eycjUB2zNVLIGjAXd7QrFEbxVTEa1Fh1Mx74VwQ= +github.com/CosmWasm/wasmvm/v3 v3.0.0-ibc2.0 h1:QoagSm5iYuRSPYDxgRxsa6hVfDppUp4+bOwY7bDuMO0= +github.com/CosmWasm/wasmvm/v3 v3.0.0-ibc2.0/go.mod h1:oknpb1bFERvvKcY7vHRp1F/Y/z66xVrsl7n9uWkOAlM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= -github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q= +github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.5.7 h1:ybO8RBeh29qrxIhCA9E8gKY6xfONU9T6G6aP9DTKfLE= +github.com/DataDog/zstd v1.5.7/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 h1:5IT7xOdq17MtcdtL/vtl6mGfzhaq4m4vpollPRmlsBQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0/go.mod h1:ZV4VOm0/eHR06JLrXWe09068dHpr3TRpY9Uo7T+anuA= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 h1:ig/FpDD2JofP/NExKQUbn7uOSZzJAQqogfqluZK4ed4= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/LumeraProtocol/lumera v1.7.0 h1:F5zgRBnCtgGfdMB6jz01PFWIzbS8VjQfCu1H9OYt3BU= -github.com/LumeraProtocol/lumera v1.7.0/go.mod h1:c1M+sjewuCvxw+pznwlspUzenDJI8Y+suKB3RFKS2Wo= +github.com/LumeraProtocol/lumera v1.8.0 h1:uE7mrK2/F6naY6Y09+D1DP9n1/EVkE3IS8w0dzCMESY= +github.com/LumeraProtocol/lumera v1.8.0/go.mod h1:38uAZxxleZyXaWKbqOQKwjw7CSX92lTxdF+B7c4SRPw= github.com/LumeraProtocol/rq-go v0.2.1 h1:8B3UzRChLsGMmvZ+UVbJsJj6JZzL9P9iYxbdUwGsQI4= github.com/LumeraProtocol/rq-go v0.2.1/go.mod h1:APnKCZRh1Es2Vtrd2w4kCLgAyaL5Bqrkz/BURoRJ+O8= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/adlio/schema v1.3.6 h1:k1/zc2jNfeiZBA5aFTRy37jlBIuCkXCm0XmvpzCKI9I= +github.com/adlio/schema v1.3.6/go.mod h1:qkxwLgPBd1FgLRHYVCmQT/rrBr3JH38J9LjmVzWNudg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -85,12 +104,13 @@ github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.44.224 h1:09CiaaF35nRmxrzWZ2uRq5v6Ghg/d2RiPjZnSgtt+RQ= -github.com/aws/aws-sdk-go v1.44.224/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.49.0 h1:g9BkW1fo9GqKfwg2+zCD+TW/D36Ux+vtfJ8guF4AYmY= +github.com/aws/aws-sdk-go v1.49.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= @@ -102,10 +122,10 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 h1:41iFGWnSlI2gVpmOtVTJZNodLdLQLn/KsJqFvXwnd/s= -github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bits-and-blooms/bitset v1.8.0 h1:FD+XqgOZDUxxZ8hzoBFuV9+cGWY9CslN6d5MS5JVb4c= -github.com/bits-and-blooms/bitset v1.8.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bgentry/speakeasy v0.2.0 h1:tgObeVOf8WAvtuAX6DhJ4xks4CFNwPDZiqzGqIHE51E= +github.com/bgentry/speakeasy v0.2.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bits-and-blooms/bitset v1.22.0 h1:Tquv9S8+SGaS3EhyA+up3FXzmkhxPGjQQCkcs2uw7w4= +github.com/bits-and-blooms/bitset v1.22.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= @@ -121,13 +141,16 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/bufbuild/protocompile v0.14.0 h1:z3DW4IvXE5G/uTOnSQn+qwQQxvhckkTWLS/0No/o7KU= -github.com/bufbuild/protocompile v0.14.0/go.mod h1:N6J1NYzkspJo3ZwyL4Xjvli86XOj1xq4qAasUFxGups= -github.com/bytedance/sonic v1.14.0 h1:/OfKt8HFw0kh2rj8N0F6C/qPGRESq0BbaNZgcNXXzQQ= -github.com/bytedance/sonic v1.14.0/go.mod h1:WoEbx8WTcFJfzCe0hbmyTGrfjt8PzNEBdxlNUO24NhA= +github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= +github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= +github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M= +github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM= +github.com/bytedance/sonic v1.14.1 h1:FBMC0zVz5XUmE4z9wF4Jey0An5FueFvOsTKKKtwIl7w= +github.com/bytedance/sonic v1.14.1/go.mod h1:gi6uhQLMbTdeP0muCnrjHLeCUPyb70ujhnNlhOylAFc= github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA= github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= @@ -145,8 +168,8 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4= -github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= +github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M= +github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= @@ -154,40 +177,44 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= -github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= -github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= -github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= -github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= -github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= -github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v1.1.2 h1:CUh2IPtR4swHlEj48Rhfzw6l/d0qA31fItcIszQVIsA= -github.com/cockroachdb/pebble v1.1.2/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= -github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= -github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/errors v1.12.0 h1:d7oCs6vuIMUQRVbi6jWWWEJZahLCfJpnJSVobd1/sUo= +github.com/cockroachdb/errors v1.12.0/go.mod h1:SvzfYNNBshAVbZ8wzNc/UPK3w1vf0dKDUP41ucAIf7g= +github.com/cockroachdb/fifo v0.0.0-20240616162244-4768e80dfb9a h1:f52TdbU4D5nozMAhO9TvTJ2ZMCXtN4VIAmfrrZ0JXQ4= +github.com/cockroachdb/fifo v0.0.0-20240616162244-4768e80dfb9a/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= +github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 h1:ASDL+UJcILMqgNeV5jiqR4j+sTuvQNHdf2chuKj1M5k= +github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506/go.mod h1:Mw7HqKr2kdtu6aYGn3tPmAftiP3QPX63LdK/zcariIo= +github.com/cockroachdb/pebble v1.1.5 h1:5AAWCBWbat0uE0blr8qzufZP5tBjkRyy/jWe1QWLnvw= +github.com/cockroachdb/pebble v1.1.5/go.mod h1:17wO9el1YEigxkP/YtV8NtCivQDgoCyBg5c4VR/eOWo= +github.com/cockroachdb/redact v1.1.6 h1:zXJBwDZ84xJNlHl1rMyCojqyIxv+7YUpQiJLQ7n4314= +github.com/cockroachdb/redact v1.1.6/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/cometbft/cometbft v0.38.15 h1:5veFd8k1uXM27PBg9sMO3hAfRJ3vbh4OmmLf6cVrqXg= -github.com/cometbft/cometbft v0.38.15/go.mod h1:+wh6ap6xctVG+JOHwbl8pPKZ0GeqdPYqISu7F4b43cQ= +github.com/cometbft/cometbft v0.38.18 h1:1ZHYMdu0S75YxFM13LlPXnOwiIpUW5z9TKMQtTIALpw= +github.com/cometbft/cometbft v0.38.18/go.mod h1:PlOQgf3jQorep+g6oVnJgtP65TJvBJoLiXjGaMdNxBE= github.com/cometbft/cometbft-db v0.14.1 h1:SxoamPghqICBAIcGpleHbmoPqy+crij/++eZz3DlerQ= github.com/cometbft/cometbft-db v0.14.1/go.mod h1:KHP1YghilyGV/xjD5DP3+2hyigWx0WTp9X+0Gnx0RxQ= +github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= -github.com/cosmos/cosmos-db v1.1.1 h1:FezFSU37AlBC8S98NlSagL76oqBRWq/prTPvFcEJNCM= -github.com/cosmos/cosmos-db v1.1.1/go.mod h1:AghjcIPqdhSLP/2Z0yha5xPH3nLnskz81pBx3tcVSAw= +github.com/cosmos/cosmos-db v1.1.2 h1:KZm4xLlPp6rLkyIOmPOhh+XDK9oH1++pNH/csLdX0Dk= +github.com/cosmos/cosmos-db v1.1.2/go.mod h1:dMg2gav979Ig2N076POEw4CEKbCsieaOfDWSfFZxs8M= github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= -github.com/cosmos/cosmos-sdk v0.50.13 h1:xQ32hhzVy7agEe7behMdZN0ezWhPss3KoLZsF9KoBnw= -github.com/cosmos/cosmos-sdk v0.50.13/go.mod h1:hrWEFMU1eoXqLJeE6VVESpJDQH67FS1nnMrQIjO2daw= +github.com/cosmos/cosmos-sdk v0.50.14 h1:G8CtGHFWbExa+ZpVOVAb4kFmko/R30igsYOwyzRMtgY= +github.com/cosmos/cosmos-sdk v0.50.14/go.mod h1:hrWEFMU1eoXqLJeE6VVESpJDQH67FS1nnMrQIjO2daw= github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= @@ -195,35 +222,36 @@ github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ github.com/cosmos/gogoproto v1.4.2/go.mod h1:cLxOsn1ljAHSV527CHOtaIP91kK6cCrZETRBrkzItWU= github.com/cosmos/gogoproto v1.7.0 h1:79USr0oyXAbxg3rspGh/m4SWNyoz/GLaAh0QlCe2fro= github.com/cosmos/gogoproto v1.7.0/go.mod h1:yWChEv5IUEYURQasfyBW5ffkMHR/90hiHgbNgrtp4j0= -github.com/cosmos/iavl v1.2.2 h1:qHhKW3I70w+04g5KdsdVSHRbFLgt3yY3qTMd4Xa4rC8= -github.com/cosmos/iavl v1.2.2/go.mod h1:GiM43q0pB+uG53mLxLDzimxM9l/5N9UuSY3/D0huuVw= -github.com/cosmos/ibc-go/modules/capability v1.0.1 h1:ibwhrpJ3SftEEZRxCRkH0fQZ9svjthrX2+oXdZvzgGI= -github.com/cosmos/ibc-go/modules/capability v1.0.1/go.mod h1:rquyOV262nGJplkumH+/LeYs04P3eV8oB7ZM4Ygqk4E= -github.com/cosmos/ibc-go/v8 v8.5.1 h1:3JleEMKBjRKa3FeTKt4fjg22za/qygLBo7mDkoYTNBs= -github.com/cosmos/ibc-go/v8 v8.5.1/go.mod h1:P5hkAvq0Qbg0h18uLxDVA9q1kOJ0l36htMsskiNwXbo= +github.com/cosmos/iavl v1.2.4 h1:IHUrG8dkyueKEY72y92jajrizbkZKPZbMmG14QzsEkw= +github.com/cosmos/iavl v1.2.4/go.mod h1:GiM43q0pB+uG53mLxLDzimxM9l/5N9UuSY3/D0huuVw= +github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v10 v10.1.0 h1:epKcbFAeWRRw1i1jZnYzLIEm9sgUPaL1RftuRjjUKGw= +github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v10 v10.1.0/go.mod h1:S4ZQwf5/LhpOi8JXSAese/6QQDk87nTdicJPlZ5q9UQ= +github.com/cosmos/ibc-go/v10 v10.3.0 h1:w5DkHih8qn15deAeFoTk778WJU+xC1krJ5kDnicfUBc= +github.com/cosmos/ibc-go/v10 v10.3.0/go.mod h1:CthaR7n4d23PJJ7wZHegmNgbVcLXCQql7EwHrAXnMtw= github.com/cosmos/ics23/go v0.11.0 h1:jk5skjT0TqX5e5QJbEnwXIS2yI2vnmLOgpQPeM5RtnU= github.com/cosmos/ics23/go v0.11.0/go.mod h1:A8OjxPE67hHST4Icw94hOxxFEJMBG031xIGF/JHNIY0= github.com/cosmos/ledger-cosmos-go v0.14.0 h1:WfCHricT3rPbkPSVKRH+L4fQGKYHuGOK9Edpel8TYpE= github.com/cosmos/ledger-cosmos-go v0.14.0/go.mod h1:E07xCWSBl3mTGofZ2QnL4cIUzMbbGVyik84QYKbX3RA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/danieljoos/wincred v1.2.1 h1:dl9cBrupW8+r5250DYkYxocLeZ1Y4vB1kxgtjxw8GQs= -github.com/danieljoos/wincred v1.2.1/go.mod h1:uGaFL9fDn3OLTvzCGulzE+SzjEe5NGlh5FdCcyfPwps= +github.com/danieljoos/wincred v1.2.2 h1:774zMFJrqaeYCK2W57BgAem/MLi6mtSE47MB6BOJ0i0= +github.com/danieljoos/wincred v1.2.2/go.mod h1:w7w4Utbrz8lqeMbDAK0lkNJUv5sAOkFi7nd/ogr0Uh8= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= -github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= -github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= +github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= +github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= +github.com/desertbit/timer v1.0.1 h1:yRpYNn5Vaaj6QXecdLMPMJsW81JLiI1eokUft5nBmeo= +github.com/desertbit/timer v1.0.1/go.mod h1:htRrYeY5V/t4iu1xCJ5XsQvp4xve8QulXXctAzxqcwE= github.com/dgraph-io/badger/v4 v4.2.0 h1:kJrlajbXXL9DFTNuhhu9yCx7JJa4qpYWxtE8BzuWsEs= github.com/dgraph-io/badger/v4 v4.2.0/go.mod h1:qfCqhPoWDFJRx1gp5QwwyGo8xk1lbHUxvK9nK0OGAak= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= @@ -238,12 +266,16 @@ github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1 github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= -github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/dvsekhvalnov/jose2go v1.7.0 h1:bnQc8+GMnidJZA8zc6lLEAb4xNrIqHwO+9TzqvtQZPo= +github.com/dvsekhvalnov/jose2go v1.7.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= @@ -257,11 +289,18 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= +github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/ethereum/go-ethereum v1.15.11 h1:JK73WKeu0WC0O1eyX+mdQAVHUV+UR1a9VB/domDngBU= +github.com/ethereum/go-ethereum v1.15.11/go.mod h1:mf8YiHIb0GR4x4TipcvBUPxJLw1mFdmxzoDi11sDRoI= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= @@ -273,16 +312,18 @@ github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7z github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= -github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/getsentry/sentry-go v0.32.0 h1:YKs+//QmwE3DcYtfKRH8/KyOOF/I6Qnx7qYGNHCGmCY= +github.com/getsentry/sentry-go v0.32.0/go.mod h1:CYNcMMz73YigoHljQRG+qPF+eMq8gG72XcGN/p71BAY= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-jose/go-jose/v4 v4.1.2 h1:TK/7NqRQZfgAh+Td8AlsrvtPoUyiHh0LqVvokh+1vHI= +github.com/go-jose/go-jose/v4 v4.1.2/go.mod h1:22cg9HWM1pOlnRiY+9cQYJ9XHmya1bYW8OeDM6Ku6Oo= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= @@ -296,8 +337,8 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= @@ -311,6 +352,8 @@ github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpv github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= @@ -328,8 +371,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= -github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= +github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -358,20 +401,21 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e h1:4bw4WeyTYPp0smaXiJZCNnLrvVBqirQVreixayXezGc= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw= -github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI= +github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -385,16 +429,16 @@ github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA= -github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E= +github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= +github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= +github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= @@ -415,6 +459,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= @@ -424,19 +470,19 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-getter v1.7.5 h1:dT58k9hQ/vbxNMwoI5+xFYAJuv6152UNvdHokfI5wE4= -github.com/hashicorp/go-getter v1.7.5/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-getter v1.7.8 h1:mshVHx1Fto0/MydBekWan5zUipGq7jO0novchgMmSiY= +github.com/hashicorp/go-getter v1.7.8/go.mod h1:2c6CboOEb9jG6YvmC9xdD+tyAFsrUaJPedwXDGr0TM4= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-metrics v0.5.3 h1:M5uADWMOGCTUNU1YuC4hfknOeHNaX54LDm4oYSucoNE= -github.com/hashicorp/go-metrics v0.5.3/go.mod h1:KEjodfebIOuBYSAe/bHTm+HChmKSxAOXPBieMLYozDE= +github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY= +github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= -github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= +github.com/hashicorp/go-plugin v1.6.3 h1:xgHB+ZUSYeuJi96WtxEjzi23uh7YQpznjGh0U0UUrwg= +github.com/hashicorp/go-plugin v1.6.3/go.mod h1:MRobyh+Wc/nYy1V4KAXUiYfzxoYhs7V1mlH1Z7iY2h0= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= @@ -444,11 +490,12 @@ github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoD github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -456,24 +503,26 @@ github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iP github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= -github.com/hdevalence/ed25519consensus v0.1.0 h1:jtBwzzcHuTmFrQN6xQZn6CQEO/V9f7HsjsjeEZ6auqU= -github.com/hdevalence/ed25519consensus v0.1.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= +github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8= +github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns= +github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= +github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= +github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= +github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= -github.com/huandu/skiplist v1.2.0 h1:gox56QD77HzSC0w+Ws3MH3iie755GBJU1OER3h5VsYw= -github.com/huandu/skiplist v1.2.0/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= +github.com/huandu/skiplist v1.2.1 h1:dTi93MgjwErA/8idWTzIw4Y1kZsMWx35fmI2c8Rij7w= +github.com/huandu/skiplist v1.2.1/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/iancoleman/orderedmap v0.3.0 h1:5cbR2grmZR/DiVt+VJopEhtVs9YGInGIxAoMJn+Ichc= +github.com/iancoleman/orderedmap v0.3.0/go.mod h1:XuLcCUkdL5owUCQeF2Ue9uuw1EptkJDkXXS7VoV7XGE= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -501,6 +550,7 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -517,9 +567,8 @@ github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= -github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/kolesa-team/go-webp v1.0.4 h1:wQvU4PLG/X7RS0vAeyhiivhLRoxfLVRlDq4I3frdxIQ= github.com/kolesa-team/go-webp v1.0.4/go.mod h1:oMvdivD6K+Q5qIIkVC2w4k2ZUnI1H+MyP7inwgWq9aA= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -532,6 +581,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= @@ -550,8 +601,9 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= @@ -582,8 +634,6 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -629,11 +679,15 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q= -github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= +github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= +github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= +github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -642,6 +696,8 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= +github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= @@ -649,8 +705,8 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= -github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 h1:Dx7Ovyv/SFnMFw3fD4oEoeorXc6saIiQ23LrGLth0Gw= github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= @@ -664,6 +720,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -676,8 +734,9 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= -github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -692,14 +751,16 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2 github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= +github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -714,16 +775,14 @@ github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7 github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= -github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= +github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= -github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= -github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU= github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U= @@ -741,24 +800,30 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= -github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= -github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= -github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= +github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= @@ -778,10 +843,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= @@ -811,47 +874,51 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw= github.com/zondax/ledger-go v0.14.3/go.mod h1:IKKaoxupuB43g4NxeQmbLXv7T9AlQyie1UpHb342ycI= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.4.0-alpha.0.0.20240404170359-43604f3112c5 h1:qxen9oVGzDdIRP6ejyAJc760RwW4SnVDiTYTzwnXuxo= -go.etcd.io/bbolt v1.4.0-alpha.0.0.20240404170359-43604f3112c5/go.mod h1:eW0HG9/oHQhvRCvb1/pIXW4cOvtDqeQK+XSi3TnwaXY= +go.etcd.io/bbolt v1.4.0-alpha.1 h1:3yrqQzbRRPFPdOMWS/QQIVxVnzSkAZQYeWlZFv1kbj4= +go.etcd.io/bbolt v1.4.0-alpha.1/go.mod h1:S/Z/Nm3iuOnyO1W4XuFfPci51Gj6F1Hv0z8hisyYYOw= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= -go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= -go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= -go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= -go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= -go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= -go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= -go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= -go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= -go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= -go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= +go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= +go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= @@ -865,8 +932,10 @@ go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= -golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/arch v0.15.0 h1:QtOrQd0bTUnhNVNndMpLHNWrDmYzZ2KDqSrEymqInZw= +golang.org/x/arch v0.15.0/go.mod h1:JmwW7aLIoRUKgaTzhkiEFxvcEiQGyOg9BMonBJUS7EE= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -878,13 +947,13 @@ golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= +golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b h1:DXr+pvt3nC887026GRP39Ej11UATqWDmWuS99x26cD0= +golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -933,13 +1002,13 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= -golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I= +golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -947,10 +1016,11 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -988,9 +1058,11 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1001,7 +1073,6 @@ golang.org/x/sys v0.0.0-20220315194320-039c03cc5b86/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1009,12 +1080,13 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ= +golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -1022,12 +1094,12 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= +golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1055,9 +1127,11 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.186.0 h1:n2OPp+PPXX0Axh4GuSsL5QL8xQCTb2oDwyzPnQvqUug= -google.golang.org/api v0.186.0/go.mod h1:hvRbBmgoje49RV3xqVXrmP6w93n6ehGgIVPYrGtBFFc= +google.golang.org/api v0.229.0 h1:p98ymMtqeJ5i3lIBMj5MpR9kzIIgzpHHh8vQ+vgAzx8= +google.golang.org/api v0.229.0/go.mod h1:wyDfmq5g1wYJWn29O22FDWN48P7Xcz0xz+LBpptYvB0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1072,12 +1146,12 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094 h1:6whtk83KtD3FkGrVb2hFXuQ+ZMbCNdakARIn/aHMmG8= -google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094/go.mod h1:Zs4wYw8z1zr6RNF4cwYb31mvN/EGaKAdQjNCF3DW6K4= -google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 h1:hE3bRWtU6uceqlh4fhrSnUyjKHMKB9KrTLLG+bc0ddM= -google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463/go.mod h1:U90ffi8eUL9MwPcrJylN5+Mk2v3vuPDptd5yyNUiRR8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= +google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 h1:8XJ4pajGwOlasW+L13MnEGA8W4115jJySQtVfS2/IBU= +google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4/go.mod h1:NnuHhy+bxcg30o7FnVAZbXsPHUDQ9qKWAQKCD7VxFtk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 h1:i8QOKZfYg6AbGVZzUAY3LrNWCKF8O6zFisU9Wl9RER4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1096,8 +1170,8 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= -google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= +google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A= +google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1112,8 +1186,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1125,8 +1199,6 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -1145,19 +1217,19 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= -lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= +lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= +lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo= lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= -nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= -nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= +nhooyr.io/websocket v1.8.17 h1:KEVeLJkUywCKVsnLIDlD/5gtayKp8VoCkksHCGGfT9Y= +nhooyr.io/websocket v1.8.17/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/p2p/client.go b/p2p/client.go index 5d4a44be..b9834eec 100644 --- a/p2p/client.go +++ b/p2p/client.go @@ -17,7 +17,7 @@ type Client interface { // BatchRetrieve retrieve data from the kademlia network by keys // reqCount is the minimum number of keys that are actually required by the caller // to successfully perform the reuquired operation - BatchRetrieve(ctx context.Context, keys []string, reqCount int, txID string, localOnly ...bool) (map[string][]byte, error) + BatchRetrieve(ctx context.Context, keys []string, reqCount int, txID string, symbolWriter func(symbolID string, data []byte) error, localOnly ...bool) (map[string][]byte, error) // Store store data to the network, which will trigger the iterative store message // - the base58 encoded identifier will be returned Store(ctx context.Context, data []byte, typ int) (string, error) diff --git a/p2p/kademlia/bootstrap.go b/p2p/kademlia/bootstrap.go index 5b29f44d..25dc3b54 100644 --- a/p2p/kademlia/bootstrap.go +++ b/p2p/kademlia/bootstrap.go @@ -102,7 +102,7 @@ func (s *DHT) setBootstrapNodesFromConfigVar(ctx context.Context, bootstrapNodes }) } s.options.BootstrapNodes = nodes - logtrace.Info(ctx, "Bootstrap nodes set from config var", logtrace.Fields{ + logtrace.Debug(ctx, "Bootstrap nodes set from config var", logtrace.Fields{ logtrace.FieldModule: "p2p", "bootstrap_nodes": nodes, }) diff --git a/p2p/kademlia/dht.go b/p2p/kademlia/dht.go index 69c45023..7ef8ceb4 100644 --- a/p2p/kademlia/dht.go +++ b/p2p/kademlia/dht.go @@ -17,17 +17,18 @@ import ( "github.com/btcsuite/btcutil/base58" "github.com/cenkalti/backoff/v4" "github.com/cosmos/cosmos-sdk/crypto/keyring" + "golang.org/x/sync/semaphore" "github.com/LumeraProtocol/lumera/x/lumeraid/securekeyx" "github.com/LumeraProtocol/supernode/v2/pkg/errors" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "github.com/LumeraProtocol/supernode/v2/pkg/lumera" ltc "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials" - "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics" - "github.com/LumeraProtocol/supernode/v2/pkg/storage" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/memory" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/LumeraProtocol/supernode/v2/pkg/storage" + "github.com/LumeraProtocol/supernode/v2/pkg/storage/memory" + "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics" ) const ( @@ -42,9 +43,10 @@ const ( delKeysCountThreshold = 10 lowSpaceThreshold = 50 // GB batchRetrieveSize = 1000 - storeSameSymbolsBatchConcurrency = 3 - fetchSymbolsBatchConcurrency = 6 - minimumDataStoreSuccessRate = 75.0 + + storeSameSymbolsBatchConcurrency = 3 + fetchSymbolsBatchConcurrency = 6 + minimumDataStoreSuccessRate = 75.0 maxIterations = 4 macConcurrentNetworkStoreCalls = 16 @@ -103,7 +105,7 @@ func (s *DHT) bootstrapIgnoreList(ctx context.Context) error { } if added > 0 { - logtrace.Info(ctx, "Ignore list bootstrapped from replication info", logtrace.Fields{ + logtrace.Debug(ctx, "Ignore list bootstrapped from replication info", logtrace.Fields{ logtrace.FieldModule: "p2p", "ignored_count": added, }) @@ -358,7 +360,7 @@ func (s *DHT) Store(ctx context.Context, data []byte, typ int) (string, error) { // measured success rate for node RPCs is below the configured minimum, an error // is returned. Metrics are not returned through the API. func (s *DHT) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID string) error { - logtrace.Info(ctx, "Store DB batch begin", logtrace.Fields{ + logtrace.Debug(ctx, "DHT StoreBatch begin", logtrace.Fields{ logtrace.FieldModule: "dht", logtrace.FieldTaskID: taskID, "records": len(values), @@ -366,7 +368,7 @@ func (s *DHT) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID s if err := s.store.StoreBatch(ctx, values, typ, true); err != nil { return fmt.Errorf("store batch: %v", err) } - logtrace.Info(ctx, "Store DB batch done, store network batch begin", logtrace.Fields{ + logtrace.Debug(ctx, "DHT StoreBatch: local stored; network begin", logtrace.Fields{ logtrace.FieldModule: "dht", logtrace.FieldTaskID: taskID, }) @@ -376,7 +378,7 @@ func (s *DHT) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID s return fmt.Errorf("iterate batch store: %v", err) } - logtrace.Info(ctx, "Store network batch workers done", logtrace.Fields{ + logtrace.Debug(ctx, "DHT StoreBatch: network done", logtrace.Fields{ logtrace.FieldModule: "dht", logtrace.FieldTaskID: taskID, }) @@ -387,6 +389,7 @@ func (s *DHT) StoreBatch(ctx context.Context, values [][]byte, typ int, taskID s // Retrieve data from the networking using key. Key is the base58 encoded // identifier of the data. func (s *DHT) Retrieve(ctx context.Context, key string, localOnly ...bool) ([]byte, error) { + start := time.Now() decoded := base58.Decode(key) if len(decoded) != B/8 { return nil, fmt.Errorf("invalid key: %v", key) @@ -402,6 +405,7 @@ func (s *DHT) Retrieve(ctx context.Context, key string, localOnly ...bool) ([]by // retrieve the key/value from queries storage value, err := s.store.Retrieve(ctx, decoded) if err == nil && len(value) > 0 { + logtrace.Debug(ctx, "DHT Retrieve local hit", logtrace.Fields{"key": hex.EncodeToString(decoded), "ms": time.Since(start).Milliseconds()}) return value, nil } else if err != nil { logtrace.Error(ctx, "Error retrieving key from local storage", logtrace.Fields{ @@ -417,20 +421,23 @@ func (s *DHT) Retrieve(ctx context.Context, key string, localOnly ...bool) ([]by } // if not found locally, iterative find value from kademlia network + logtrace.Debug(ctx, "DHT Retrieve network lookup", logtrace.Fields{"key": dbKey}) peerValue, err := s.iterate(ctx, IterateFindValue, decoded, nil, 0) if err != nil { return nil, errors.Errorf("retrieve from peer: %w", err) } if len(peerValue) > 0 { - logtrace.Info(ctx, "Not found locally, retrieved from other nodes", logtrace.Fields{ + logtrace.Debug(ctx, "DHT Retrieve network hit", logtrace.Fields{ logtrace.FieldModule: "dht", "key": dbKey, "data_len": len(peerValue), + "ms": time.Since(start).Milliseconds(), }) } else { - logtrace.Info(ctx, "Not found locally, not found in other nodes", logtrace.Fields{ + logtrace.Debug(ctx, "DHT Retrieve miss", logtrace.Fields{ logtrace.FieldModule: "dht", "key": dbKey, + "ms": time.Since(start).Milliseconds(), }) } @@ -465,17 +472,7 @@ func (s *DHT) Stats(ctx context.Context) (map[string]interface{}, error) { dhtStats["peers_count"] = len(s.ht.nodes()) dhtStats["peers"] = s.ht.nodes() dhtStats["network"] = s.network.HandleMetricsSnapshot() - // Include recent request snapshots for observability - if s.network != nil { - if overall, byIP := s.network.RecentBatchStoreSnapshot(); len(overall) > 0 || len(byIP) > 0 { - dhtStats["recent_batch_store_overall"] = overall - dhtStats["recent_batch_store_by_ip"] = byIP - } - if overall, byIP := s.network.RecentBatchRetrieveSnapshot(); len(overall) > 0 || len(byIP) > 0 { - dhtStats["recent_batch_retrieve_overall"] = overall - dhtStats["recent_batch_retrieve_by_ip"] = byIP - } - } + // Removed: recent per-request snapshots (logs provide visibility) dhtStats["database"] = dbStats return dhtStats, nil @@ -503,9 +500,10 @@ func (s *DHT) newMessage(messageType int, receiver *Node, data interface{}) *Mes } sender := &Node{ - IP: hostIP, - ID: s.ht.self.ID, - Port: s.ht.self.Port, + IP: hostIP, + ID: s.ht.self.ID, + Port: s.ht.self.Port, + Version: localVersion(), } return &Message{ Sender: sender, @@ -525,15 +523,18 @@ func (s *DHT) GetValueFromNode(ctx context.Context, target []byte, n *Node) ([]b cctx, ccancel := context.WithTimeout(ctx, time.Second*5) defer ccancel() + // Minimal per-RPC visibility + logtrace.Debug(ctx, "RPC FindValue send", logtrace.Fields{"node": n.String(), "key": hex.EncodeToString(target)}) response, err := s.network.Call(cctx, request, false) if err != nil { - logtrace.Info(ctx, "Network call request failed", logtrace.Fields{ + logtrace.Debug(ctx, "Network call request failed", logtrace.Fields{ logtrace.FieldModule: "p2p", logtrace.FieldError: err.Error(), "request": request.String(), }) return nil, fmt.Errorf("network call request %s failed: %w", request.String(), err) } + logtrace.Debug(ctx, "RPC FindValue completed", logtrace.Fields{"node": n.String()}) v, ok := response.Data.(*FindValueResponse) if ok && v.Status.Result == ResultOk && len(v.Value) > 0 { @@ -569,7 +570,7 @@ func (s *DHT) doMultiWorkers(ctx context.Context, iterativeType int, target []by // update the running goroutines number++ - logtrace.Info(ctx, "Start work for node", logtrace.Fields{ + logtrace.Debug(ctx, "Start work for node", logtrace.Fields{ logtrace.FieldModule: "p2p", "iterate_type": iterativeType, "node": node.String(), @@ -593,18 +594,35 @@ func (s *DHT) doMultiWorkers(ctx context.Context, iterativeType int, target []by // new a request message request := s.newMessage(messageType, receiver, data) + // Minimal per-RPC visibility + op := "" + switch messageType { + case FindNode: + op = "FindNode" + case FindValue: + op = "FindValue" + default: + op = "RPC" + } + fields := logtrace.Fields{"node": receiver.String()} + if messageType == FindValue { + fields["key"] = hex.EncodeToString(target) + } + logtrace.Debug(ctx, "RPC "+op+" send", fields) // send the request and receive the response response, err := s.network.Call(ctx, request, false) if err != nil { - logtrace.Info(ctx, "Network call request failed", logtrace.Fields{ + logtrace.Debug(ctx, "Iterate worker RPC failed", logtrace.Fields{ logtrace.FieldModule: "p2p", logtrace.FieldError: err.Error(), "request": request.String(), + "node": receiver.String(), }) // node is unreachable, remove the node //removedNodes = append(removedNodes, receiver) return } + logtrace.Debug(ctx, "RPC "+op+" completed", logtrace.Fields{"node": receiver.String()}) // send the response to message channel responses <- response @@ -621,7 +639,7 @@ func (s *DHT) doMultiWorkers(ctx context.Context, iterativeType int, target []by return responses } -func (s *DHT) fetchAndAddLocalKeys(ctx context.Context, hexKeys []string, result *sync.Map, req int32) (count int32, err error) { +func (s *DHT) fetchAndAddLocalKeys(ctx context.Context, hexKeys []string, result *sync.Map, req int32, writer func(symbolID string, data []byte) error) (count int32, err error) { batchSize := 5000 // Process in batches @@ -633,7 +651,7 @@ func (s *DHT) fetchAndAddLocalKeys(ctx context.Context, hexKeys []string, result batchHexKeys := hexKeys[start:end] - logtrace.Info(ctx, "Processing batch of local keys", logtrace.Fields{ + logtrace.Debug(ctx, "Processing batch of local keys", logtrace.Fields{ logtrace.FieldModule: "dht", "batch_size": len(batchHexKeys), "total_keys": len(hexKeys), @@ -653,8 +671,19 @@ func (s *DHT) fetchAndAddLocalKeys(ctx context.Context, hexKeys []string, result // Populate the result map with the local values and count the found keys for i, val := range localValues { if len(val) > 0 { - count++ - result.Store(batchHexKeys[i], val) + // When writer is provided, call it and store empty marker + // Otherwise store full data in memory + if writer != nil { + if err := writer(batchHexKeys[i], val); err != nil { + logtrace.Error(ctx, "writer error for local key", logtrace.Fields{"key": batchHexKeys[i], logtrace.FieldError: err.Error()}) + continue // Skip counting failed writes + } + result.Store(batchHexKeys[i], []byte{}) // Empty marker + count++ // Only count successful writes + } else { + result.Store(batchHexKeys[i], val) // Full data + count++ // Count found data + } if count >= req { return count, nil } @@ -665,20 +694,22 @@ func (s *DHT) fetchAndAddLocalKeys(ctx context.Context, hexKeys []string, result return count, err } -func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, txID string, localOnly ...bool) (result map[string][]byte, err error) { - start := time.Now() +func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, txID string, writer func(symbolID string, data []byte) error, localOnly ...bool) (result map[string][]byte, err error) { + logtrace.Debug(ctx, "DHT BatchRetrieve begin", logtrace.Fields{"txid": txID, "keys": len(keys), "required": required}) result = make(map[string][]byte) var resMap sync.Map var foundLocalCount int32 hexKeys := make([]string, len(keys)) - globalClosestContacts := make(map[string]*NodeList) hashes := make([][]byte, len(keys)) - knownNodes := make(map[string]*Node) - var knownMu sync.Mutex - var closestMu sync.RWMutex defer func() { + // Skip building result map when writer is provided + // Writer stores data to disk; resMap only has empty markers for deduplication + if writer != nil { + return + } + resMap.Range(func(key, value interface{}) bool { hexKey := key.(string) valBytes := value.([]byte) @@ -700,15 +731,6 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, } }() - for _, key := range keys { - result[key] = nil - } - - supernodeAddr, _ := s.getSupernodeAddress(ctx) - hostIP := parseSupernodeAddress(supernodeAddr) - self := &Node{ID: s.ht.self.ID, IP: hostIP, Port: s.ht.self.Port} - self.SetHashedID() - for i, key := range keys { decoded := base58.Decode(key) if len(decoded) != B/8 { @@ -716,16 +738,61 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, } hashes[i] = decoded hexKeys[i] = hex.EncodeToString(decoded) + result[key] = nil } + foundLocalCount, err = s.fetchAndAddLocalKeys(ctx, hexKeys, &resMap, required, writer) + if err != nil { + return nil, fmt.Errorf("fetch and add local keys: %v", err) + } + // Report locally found count for metrics + p2pmetrics.ReportFoundLocal(p2pmetrics.TaskIDFromContext(ctx), int(foundLocalCount)) + + if foundLocalCount >= required { + logtrace.Debug(ctx, "DHT BatchRetrieve satisfied from local storage", logtrace.Fields{ + "txid": txID, "found_local": foundLocalCount, "required": required, + }) + return result, nil + } + + if len(localOnly) > 0 && localOnly[0] { + logtrace.Debug(ctx, "DHT BatchRetrieve local-only mode, insufficient keys", logtrace.Fields{ + "txid": txID, "found_local": foundLocalCount, "required": required, + }) + return result, fmt.Errorf("local-only: found %d, required %d", foundLocalCount, required) + } + + supernodeAddr, addrErr := s.getSupernodeAddress(ctx) + if addrErr != nil { + logtrace.Warn(ctx, "Failed to get supernode address", logtrace.Fields{ + logtrace.FieldModule: "dht", + logtrace.FieldError: addrErr.Error(), + }) + } + hostIP := parseSupernodeAddress(supernodeAddr) + self := &Node{ID: s.ht.self.ID, IP: hostIP, Port: s.ht.self.Port} + self.SetHashedID() + + knownNodes := make(map[string]*Node) + var knownMu sync.Mutex + for _, n := range s.ht.nodes() { nn := &Node{ID: n.ID, IP: n.IP, Port: n.Port} nn.SetHashedID() knownNodes[string(nn.ID)] = nn } + ignoreList := s.ignorelist.ToNodeList() + + globalClosestContacts := make(map[string]*NodeList) + var closestMu sync.RWMutex + for i := range keys { - top6 := s.ht.closestContactsWithIncludingNode(Alpha, hashes[i], s.ignorelist.ToNodeList(), nil) + if _, found := resMap.Load(hexKeys[i]); found { + continue + } + + top6 := s.ht.closestContactsWithIncludingNode(Alpha, hashes[i], ignoreList, nil) closestMu.Lock() globalClosestContacts[keys[i]] = top6 closestMu.Unlock() @@ -734,61 +801,73 @@ func (s *DHT) BatchRetrieve(ctx context.Context, keys []string, required int32, delete(knownNodes, string(self.ID)) - foundLocalCount, err = s.fetchAndAddLocalKeys(ctx, hexKeys, &resMap, required) - if err != nil { - return nil, fmt.Errorf("fetch and add local keys: %v", err) - } - // Report how many were found locally, for event metrics - p2pmetrics.ReportFoundLocal(p2pmetrics.TaskIDFromContext(ctx), int(foundLocalCount)) - if foundLocalCount >= required { - return result, nil - } - batchSize := batchRetrieveSize var networkFound int32 totalBatches := int(math.Ceil(float64(required) / float64(batchSize))) parallelBatches := int(math.Min(float64(totalBatches), float64(fetchSymbolsBatchConcurrency))) - semaphore := make(chan struct{}, parallelBatches) + sem := semaphore.NewWeighted(int64(parallelBatches)) var wg sync.WaitGroup gctx, cancel := context.WithCancel(ctx) defer cancel() + // Measure only the network retrieval phase (after local scan) + netStart := time.Now() + for start := 0; start < len(keys); start += batchSize { end := start + batchSize if end > len(keys) { end = len(keys) } + if atomic.LoadInt32(&networkFound)+int32(foundLocalCount) >= int32(required) { break } - wg.Add(1) - semaphore <- struct{}{} - go s.processBatch( - gctx, - keys[start:end], - hexKeys[start:end], - semaphore, &wg, - globalClosestContacts, - &closestMu, - knownNodes, &knownMu, - &resMap, - required, - foundLocalCount, - &networkFound, - cancel, - txID, - ) + wg.Add(1) + go func(start, end int) { + defer wg.Done() + + if err := sem.Acquire(gctx, 1); err != nil { + return + } + defer sem.Release(1) + + if atomic.LoadInt32(&networkFound)+int32(foundLocalCount) >= int32(required) { + return + } + + s.processBatch( + gctx, + keys[start:end], + hexKeys[start:end], + globalClosestContacts, + &closestMu, + knownNodes, &knownMu, + &resMap, + required, + foundLocalCount, + &networkFound, + cancel, + txID, + writer, + ) + }(start, end) } wg.Wait() netFound := int(atomic.LoadInt32(&networkFound)) - // Record batch retrieve stats for internal DHT snapshot window - s.metrics.RecordBatchRetrieve(len(keys), int(required), int(foundLocalCount), netFound, time.Since(start)) - // Also feed retrieve counts into the per-task collector for stream events - p2pmetrics.SetRetrieveBatchSummary(p2pmetrics.TaskIDFromContext(ctx), len(keys), int(required), int(foundLocalCount), netFound, time.Since(start).Milliseconds()) + { + f := logtrace.Fields{"txid": txID, "found_local": foundLocalCount, "found_network": netFound, "required": required, "ms": time.Since(netStart).Milliseconds(), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "dht: batch retrieve summary", f) + } + // Record batch retrieve stats for internal DHT snapshot window (network phase only) + s.metrics.RecordBatchRetrieve(len(keys), int(required), int(foundLocalCount), netFound, time.Since(netStart)) + // No per-task metrics collector updates return result, nil } @@ -797,8 +876,6 @@ func (s *DHT) processBatch( ctx context.Context, batchKeys []string, batchHexKeys []string, - semaphore chan struct{}, - wg *sync.WaitGroup, globalClosestContacts map[string]*NodeList, closestMu *sync.RWMutex, knownNodes map[string]*Node, @@ -809,118 +886,86 @@ func (s *DHT) processBatch( networkFound *int32, cancel context.CancelFunc, txID string, + writer func(symbolID string, data []byte) error, ) { - defer wg.Done() - defer func() { <-semaphore }() + select { + case <-ctx.Done(): + return + default: + } - for i := 0; i < maxIterations; i++ { - select { - case <-ctx.Done(): - return - default: - } + fetchMap := make(map[string][]int) - // Build fetch map (read globalClosestContacts under RLock) - fetchMap := make(map[string][]int) - for i, key := range batchKeys { - closestMu.RLock() - nl := globalClosestContacts[key] - closestMu.RUnlock() - if nl == nil { - continue - } - for _, node := range nl.Nodes { - nodeID := string(node.ID) - fetchMap[nodeID] = append(fetchMap[nodeID], i) - } - } + closestMu.RLock() + localContacts := make(map[string]*NodeList, len(batchKeys)) + for _, key := range batchKeys { + localContacts[key] = globalClosestContacts[key] + } + closestMu.RUnlock() - foundCount, newClosestContacts, batchErr := s.iterateBatchGetValues( - ctx, knownNodes, batchKeys, batchHexKeys, fetchMap, resMap, required, foundLocalCount+atomic.LoadInt32(networkFound), - ) - if batchErr != nil { - logtrace.Error(ctx, "Iterate batch get values failed", logtrace.Fields{ - logtrace.FieldModule: "dht", "txid": txID, logtrace.FieldError: batchErr.Error(), - }) + for idx, key := range batchKeys { + nl := localContacts[key] + if nl == nil { + continue } - - atomic.AddInt32(networkFound, int32(foundCount)) - if atomic.LoadInt32(networkFound)+int32(foundLocalCount) >= int32(required) { - cancel() - break + for _, node := range nl.Nodes { + nodeID := string(node.ID) + fetchMap[nodeID] = append(fetchMap[nodeID], idx) } + } - changed := false - for key, nodesList := range newClosestContacts { - if nodesList == nil || nodesList.Nodes == nil { - continue - } - - closestMu.RLock() - curr := globalClosestContacts[key] - closestMu.RUnlock() - if curr == nil || curr.Nodes == nil { - logtrace.Warn(ctx, "Global contacts missing key during merge", logtrace.Fields{"key": key}) - continue - } - - if !haveAllNodes(nodesList.Nodes, curr.Nodes) { - changed = true - } - - nodesList.AddNodes(curr.Nodes) - nodesList.Sort() - nodesList.TopN(Alpha) - - s.addKnownNodesSafe(ctx, nodesList.Nodes, knownNodes, knownMu) - - closestMu.Lock() - globalClosestContacts[key] = nodesList - closestMu.Unlock() - } + foundCount, batchErr := s.iterateBatchGetValues( + ctx, knownNodes, batchHexKeys, fetchMap, resMap, required, foundLocalCount+atomic.LoadInt32(networkFound), writer, + ) + if batchErr != nil { + logtrace.Error(ctx, "Iterate batch get values failed", logtrace.Fields{ + logtrace.FieldModule: "dht", "txid": txID, logtrace.FieldError: batchErr.Error(), + }) + } - if !changed { - break - } + atomic.AddInt32(networkFound, int32(foundCount)) + if atomic.LoadInt32(networkFound)+int32(foundLocalCount) >= int32(required) { + cancel() } } -func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, keys []string, hexKeys []string, fetchMap map[string][]int, - resMap *sync.Map, req, alreadyFound int32) (int, map[string]*NodeList, error) { - semaphore := make(chan struct{}, storeSameSymbolsBatchConcurrency) // Limit concurrency to 1 - closestContacts := make(map[string]*NodeList) +func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, hexKeys []string, fetchMap map[string][]int, + resMap *sync.Map, req, alreadyFound int32, writer func(symbolID string, data []byte) error) (int, error) { + sem := semaphore.NewWeighted(int64(storeSameSymbolsBatchConcurrency)) var wg sync.WaitGroup - contactsMap := make(map[string]map[string][]*Node) var firstErr error var mu sync.Mutex // To protect the firstErr foundCount := int32(0) - gctx, cancel := context.WithCancel(ctx) // Create a cancellable context - defer cancel() - for nodeID, node := range nodes { + gctx, cancel := context.WithCancel(ctx) // Create a cancellable context + defer cancel() + for nodeID := range fetchMap { + node, ok := nodes[nodeID] + if !ok { + continue + } + if s.ignorelist.Banned(node) { - logtrace.Info(ctx, "Ignore banned node in iterate batch get values", logtrace.Fields{ + logtrace.Debug(ctx, "Ignore banned node in iterate batch get values", logtrace.Fields{ logtrace.FieldModule: "dht", "node": node.String(), }) continue } - contactsMap[nodeID] = make(map[string][]*Node) wg.Add(1) go func(node *Node, nodeID string) { defer wg.Done() - select { - case <-ctx.Done(): + if err := sem.Acquire(gctx, 1); err != nil { return - case <-gctx.Done(): + } + defer sem.Release(1) + + if atomic.LoadInt32(&foundCount) >= int32(req-alreadyFound) { return - case semaphore <- struct{}{}: - defer func() { <-semaphore }() } - callStart := time.Now() indices := fetchMap[nodeID] requestKeys := make(map[string]KeyValWithClosest) for _, idx := range indices { @@ -938,14 +983,15 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, return } - decompressedData, err := s.doBatchGetValuesCall(gctx, node, requestKeys) + callStart := time.Now() + decompressedData, err := s.doBatchGetValuesCall(gctx, node, requestKeys) if err != nil { mu.Lock() if firstErr == nil { firstErr = err } mu.Unlock() - // record failed RPC per-node + // record failed RPC for metrics p2pmetrics.RecordRetrieve(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ IP: node.IP, Address: node.String(), @@ -960,18 +1006,38 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, returned := 0 for k, v := range decompressedData { if len(v.Value) > 0 { - _, loaded := resMap.LoadOrStore(k, v.Value) + // When writer is provided, only store empty marker to save memory + // The writer already persists data to disk for RaptorQ + storeVal := v.Value + if writer != nil { + storeVal = []byte{} // Empty marker for deduplication only + } + _, loaded := resMap.LoadOrStore(k, storeVal) if !loaded { - atomic.AddInt32(&foundCount, 1) - returned++ - if atomic.LoadInt32(&foundCount) >= int32(req-alreadyFound) { - cancel() // Cancel context to stop other goroutines - // don't early return; record metric and exit goroutine - break + writeSuccess := true + if writer != nil { + // decode k (hex) back to base58 key if your writer expects that + // or just pass the hex; you control the writer side. + if err := writer(k, v.Value); err != nil { + // Log error and mark write as failed + logtrace.Error(ctx, "writer error", logtrace.Fields{"key": k, logtrace.FieldError: err.Error()}) + writeSuccess = false + // Remove from resMap since write failed + resMap.Delete(k) + } + } + + // Only count if write succeeded (or no writer provided) + if writeSuccess { + atomic.AddInt32(&foundCount, 1) + returned++ + if atomic.LoadInt32(&foundCount) >= int32(req-alreadyFound) { + cancel() // Cancel context to stop other goroutines + // don't early return; record metric and exit goroutine + break + } } } - } else { - contactsMap[nodeID][k] = v.Closest } } @@ -989,7 +1055,7 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, wg.Wait() - logtrace.Info(ctx, "Iterate batch get values done", logtrace.Fields{ + logtrace.Debug(ctx, "Iterate batch get values done", logtrace.Fields{ logtrace.FieldModule: "dht", "found_count": atomic.LoadInt32(&foundCount), }) @@ -1002,41 +1068,29 @@ func (s *DHT) iterateBatchGetValues(ctx context.Context, nodes map[string]*Node, }) } - for _, closestNodes := range contactsMap { - for key, nodes := range closestNodes { - comparator, err := hex.DecodeString(key) - if err != nil { - logtrace.Error(ctx, "Failed to decode hex key in closestNodes.Range", logtrace.Fields{ - logtrace.FieldModule: "dht", - "key": key, - logtrace.FieldError: err.Error(), - }) - return 0, nil, err - } - bkey := base58.Encode(comparator) - - if _, ok := closestContacts[bkey]; !ok { - closestContacts[bkey] = &NodeList{Nodes: nodes, Comparator: comparator} - } else { - closestContacts[bkey].AddNodes(nodes) - } - } - } - - for key, nodes := range closestContacts { - nodes.Sort() - nodes.TopN(Alpha) - closestContacts[key] = nodes - } - return int(foundCount), closestContacts, firstErr + return int(foundCount), firstErr } func (s *DHT) doBatchGetValuesCall(ctx context.Context, node *Node, requestKeys map[string]KeyValWithClosest) (map[string]KeyValWithClosest, error) { request := s.newMessage(BatchGetValues, node, &BatchGetValuesRequest{Data: requestKeys}) + { + f := logtrace.Fields{"node": node.String(), "keys": len(requestKeys), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "dht: batch get send", f) + } response, err := s.network.Call(ctx, request, false) if err != nil { return nil, fmt.Errorf("network call request %s failed: %w", request.String(), err) } + { + f := logtrace.Fields{"node": node.String(), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "dht: batch get ok", f) + } resp, ok := response.Data.(*BatchGetValuesResponse) if !ok { @@ -1071,7 +1125,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat // find the closest contacts for the target node from queries route tables nl, _ := s.ht.closestContacts(Alpha, target, igList) if len(igList) > 0 { - logtrace.Info(ctx, "Closest contacts", logtrace.Fields{ + logtrace.Debug(ctx, "Closest contacts", logtrace.Fields{ logtrace.FieldModule: "p2p", "nodes": nl.String(), "ignored": s.ignorelist.String(), @@ -1081,7 +1135,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat if nl.Len() == 0 { return nil, nil } - logtrace.Info(ctx, "Iterate start", logtrace.Fields{ + logtrace.Debug(ctx, "Iterate start", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "type": iterativeType, @@ -1095,7 +1149,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat if iterativeType == IterateFindNode { hashedTargetID, _ := utils.Blake3Hash(target) bucket := s.ht.bucketIndex(s.ht.self.HashedID, hashedTargetID) - logtrace.Info(ctx, "Bucket for target", logtrace.Fields{ + logtrace.Debug(ctx, "Bucket for target", logtrace.Fields{ logtrace.FieldModule: "p2p", "target": sKey, }) @@ -1119,7 +1173,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat // Set a maximum number of iterations to prevent indefinite looping maxIterations := 5 // Adjust the maximum iterations as needed - logtrace.Info(ctx, "Begin iteration", logtrace.Fields{ + logtrace.Debug(ctx, "Begin iteration", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1130,7 +1184,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat case <-ctx.Done(): return nil, fmt.Errorf("iterate cancelled: %w", ctx.Err()) case <-timeout: - logtrace.Info(ctx, "Iteration timed out", logtrace.Fields{ + logtrace.Debug(ctx, "Iteration timed out", logtrace.Fields{ logtrace.FieldModule: "p2p", }) return nil, nil @@ -1153,7 +1207,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat } default: - logtrace.Error(ctx, "Unknown message type", logtrace.Fields{ + logtrace.Debug(ctx, "Unknown message type", logtrace.Fields{ logtrace.FieldModule: "dht", "type": response.MessageType, }) @@ -1162,7 +1216,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat // Stop search if no more nodes to contact if !searchRest && len(nl.Nodes) == 0 { - logtrace.Info(ctx, "Search stopped", logtrace.Fields{ + logtrace.Debug(ctx, "Search stopped", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1174,7 +1228,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat nl.Comparator = target nl.Sort() - logtrace.Info(ctx, "Iterate sorted nodes", logtrace.Fields{ + logtrace.Debug(ctx, "Iterate sorted nodes", logtrace.Fields{ logtrace.FieldModule: "p2p", "id": base58.Encode(s.ht.self.ID), "iterate": iterativeType, @@ -1211,7 +1265,7 @@ func (s *DHT) iterate(ctx context.Context, iterativeType int, target []byte, dat } } - logtrace.Info(ctx, "Finish iteration without results", logtrace.Fields{ + logtrace.Debug(ctx, "Finish iteration without results", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1232,7 +1286,7 @@ func (s *DHT) handleResponses(ctx context.Context, responses <-chan *Message, nl v, ok := response.Data.(*FindValueResponse) if ok { if v.Status.Result == ResultOk && len(v.Value) > 0 { - logtrace.Info(ctx, "Iterate found value from network", logtrace.Fields{ + logtrace.Debug(ctx, "Iterate found value from network", logtrace.Fields{ logtrace.FieldModule: "p2p", }) return nl, v.Value @@ -1262,7 +1316,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] // nl will have the closest nodes to the target value, it will ignore the nodes in igList nl, _ := s.ht.closestContacts(Alpha, target, igList) if len(igList) > 0 { - logtrace.Info(ctx, "Closest contacts", logtrace.Fields{ + logtrace.Debug(ctx, "Closest contacts", logtrace.Fields{ logtrace.FieldModule: "p2p", "nodes": nl.String(), "ignored": s.ignorelist.String(), @@ -1277,7 +1331,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] searchRest := false // keep track of contacted nodes so that we don't hit them again contacted := make(map[string]bool) - logtrace.Info(ctx, "Begin iteration", logtrace.Fields{ + logtrace.Debug(ctx, "Begin iteration", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1286,7 +1340,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] var closestNode *Node var iterationCount int for iterationCount = 0; iterationCount < maxIterations; iterationCount++ { - logtrace.Info(ctx, "Begin find value", logtrace.Fields{ + logtrace.Debug(ctx, "Begin find value", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "nl": nl.Len(), @@ -1295,7 +1349,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] }) if nl.Len() == 0 { - logtrace.Error(ctx, "Nodes list length is 0", logtrace.Fields{ + logtrace.Debug(ctx, "Nodes list length is 0", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1306,7 +1360,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] // if the closest node is the same as the last iteration and we don't want to search rest of nodes, we are done if !searchRest && (closestNode != nil && bytes.Equal(nl.Nodes[0].ID, closestNode.ID)) { - logtrace.Info(ctx, "Closest node is the same as the last iteration", logtrace.Fields{ + logtrace.Debug(ctx, "Closest node is the same as the last iteration", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1325,7 +1379,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] nl.Sort() - logtrace.Info(ctx, "Iteration progress", logtrace.Fields{ + logtrace.Debug(ctx, "Iteration progress", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1334,7 +1388,7 @@ func (s *DHT) iterateFindValue(ctx context.Context, iterativeType int, target [] }) } - logtrace.Info(ctx, "Finished iterations without results", logtrace.Fields{ + logtrace.Debug(ctx, "Finished iterations without results", logtrace.Fields{ logtrace.FieldModule: "p2p", "task_id": taskID, "key": sKey, @@ -1378,14 +1432,39 @@ func (s *DHT) sendStoreData(ctx context.Context, n *Node, request *StoreDataRequ // add a node into the appropriate k bucket, return the removed node if it's full func (s *DHT) addNode(ctx context.Context, node *Node) *Node { + // Minimum-version gating: reject nodes below configured minimum. + peerVer := "" + if node != nil { + peerVer = node.Version + } + if minRequired, tooOld := versionTooOld(peerVer); tooOld { + fields := logtrace.Fields{ + logtrace.FieldModule: "p2p", + "min_required": minRequired, + "peer_version": strings.TrimSpace(peerVer), + } + if node != nil { + fields["peer"] = node.String() + } + logtrace.Debug(ctx, "Rejecting node: peer below minimum version", fields) + return nil + } // Allow localhost for integration testing isIntegrationTest := os.Getenv("INTEGRATION_TEST") == "true" if node.IP == "" || node.IP == "0.0.0.0" || (!isIntegrationTest && node.IP == "127.0.0.1") { - logtrace.Debug(ctx, "Trying to add invalid node", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Info(ctx, "Rejecting node: invalid IP", logtrace.Fields{ + logtrace.FieldModule: "p2p", + "ip": node.IP, + "node": node.String(), + "integration_test": isIntegrationTest, + }) return nil } if bytes.Equal(node.ID, s.ht.self.ID) { - logtrace.Debug(ctx, "Trying to add itself", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Info(ctx, "Rejecting node: is self", logtrace.Fields{ + logtrace.FieldModule: "p2p", + "node": node.String(), + }) return nil } node.SetHashedID() @@ -1514,7 +1593,7 @@ func (s *DHT) storeToAlphaNodes(ctx context.Context, nl *NodeList, data []byte, for i := Alpha; i < nl.Len() && finalStoreCount < int32(Alpha); i++ { n := nl.Nodes[i] if s.ignorelist.Banned(n) { - logtrace.Info(ctx, "Ignore banned node during sequential store", logtrace.Fields{ + logtrace.Debug(ctx, "Ignore banned node during sequential store", logtrace.Fields{ logtrace.FieldModule: "p2p", "node": n.String(), "task_id": taskID, @@ -1547,7 +1626,7 @@ func (s *DHT) storeToAlphaNodes(ctx context.Context, nl *NodeList, data []byte, skey, _ := utils.Blake3Hash(data) if finalStoreCount >= int32(Alpha) { - logtrace.Info(ctx, "Store data to alpha nodes success", logtrace.Fields{ + logtrace.Debug(ctx, "Store data to alpha nodes success", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": taskID, "len_total_nodes": nl.Len(), @@ -1557,7 +1636,7 @@ func (s *DHT) storeToAlphaNodes(ctx context.Context, nl *NodeList, data []byte, return nil } - logtrace.Info(ctx, "Store data to alpha nodes failed", logtrace.Fields{ + logtrace.Debug(ctx, "Store data to alpha nodes failed", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": taskID, "store_count": finalStoreCount, @@ -1570,7 +1649,7 @@ func (s *DHT) storeToAlphaNodes(ctx context.Context, nl *NodeList, data []byte, func (s *DHT) removeNode(ctx context.Context, node *Node) { // ensure this is not itself address if bytes.Equal(node.ID, s.ht.self.ID) { - logtrace.Info(ctx, "Trying to remove itself", logtrace.Fields{ + logtrace.Debug(ctx, "Trying to remove itself", logtrace.Fields{ logtrace.FieldModule: "p2p", }) return @@ -1586,7 +1665,7 @@ func (s *DHT) removeNode(ctx context.Context, node *Node) { "bucket": index, }) } else { - logtrace.Info(ctx, "Removed node from bucket success", logtrace.Fields{ + logtrace.Debug(ctx, "Removed node from bucket success", logtrace.Fields{ logtrace.FieldModule: "p2p", "node": node.String(), "bucket": index, @@ -1644,12 +1723,13 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i knownNodes := make(map[string]*Node) hashes := make([][]byte, len(values)) - logtrace.Info(ctx, "Iterate batch store begin", logtrace.Fields{ - logtrace.FieldModule: "dht", - "task_id": id, - "keys": len(values), - "len_nodes": len(s.ht.nodes()), - }) + { + f := logtrace.Fields{logtrace.FieldModule: "dht", "task_id": id, "keys": len(values), "len_nodes": len(s.ht.nodes()), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "dht: batch store start", f) + } for i := 0; i < len(values); i++ { target, _ := utils.Blake3Hash(values[i]) hashes[i] = target @@ -1671,17 +1751,15 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i requests := 0 successful := 0 + logtrace.Debug(ctx, "Iterate batch store: dispatching to nodes", logtrace.Fields{"task_id": id, "nodes": len(knownNodes)}) storeResponses := s.batchStoreNetwork(ctx, values, knownNodes, storageMap, typ) for response := range storeResponses { requests++ var nodeAddr string - var nodeIP string if response.Receiver != nil { nodeAddr = response.Receiver.String() - nodeIP = response.Receiver.IP } else if response.Message != nil && response.Message.Sender != nil { nodeAddr = response.Message.Sender.String() - nodeIP = response.Message.Sender.IP } errMsg := "" @@ -1712,15 +1790,7 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i } } - // Emit per-node store RPC call via metrics bridge (no P2P API coupling) - p2pmetrics.RecordStore(p2pmetrics.TaskIDFromContext(ctx), p2pmetrics.Call{ - IP: nodeIP, - Address: nodeAddr, - Keys: response.KeysCount, - Success: errMsg == "" && response.Error == nil, - Error: errMsg, - DurationMS: response.DurationMS, - }) + // per-node store metrics removed; logs retained } @@ -1729,14 +1799,14 @@ func (s *DHT) IterateBatchStore(ctx context.Context, values [][]byte, typ int, i successRate := float64(successful) / float64(requests) * 100 if successRate >= minimumDataStoreSuccessRate { - logtrace.Info(ctx, "Successful store operations", logtrace.Fields{ + logtrace.Info(ctx, "dht: batch store ok", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": id, "success_rate": fmt.Sprintf("%.2f%%", successRate), }) return nil } else { - logtrace.Info(ctx, "Failed to achieve desired success rate", logtrace.Fields{ + logtrace.Info(ctx, "dht: batch store below threshold", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": id, "success_rate": fmt.Sprintf("%.2f%%", successRate), @@ -1763,12 +1833,9 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ var wg sync.WaitGroup for key, node := range nodes { - logtrace.Info(ctx, "Node", logtrace.Fields{ - logtrace.FieldModule: "dht", - "port": node.String(), - }) + logtrace.Debug(ctx, "Preparing batch store to node", logtrace.Fields{logtrace.FieldModule: "dht", "node": node.String()}) if s.ignorelist.Banned(node) { - logtrace.Info(ctx, "Ignoring banned node in batch store network call", logtrace.Fields{ + logtrace.Debug(ctx, "Ignoring banned node in batch store network call", logtrace.Fields{ logtrace.FieldModule: "dht", "node": node.String(), }) @@ -1796,15 +1863,17 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ totalBytes += len(values[idx]) } - logtrace.Info(ctx, "Batch store to node", logtrace.Fields{ - logtrace.FieldModule: "dht", - "keys": len(toStore), - "size_before_compress": utils.BytesIntToMB(totalBytes), - }) + { + f := logtrace.Fields{logtrace.FieldModule: "dht", "node": receiver.String(), "keys": len(toStore), "size_mb": utils.BytesIntToMB(totalBytes), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "dht: batch store RPC send", f) + } // Skip empty payloads: avoid sending empty store RPCs and do not record no-op metrics. if len(toStore) == 0 { - logtrace.Info(ctx, "Skipping store RPC with empty payload", logtrace.Fields{ + logtrace.Debug(ctx, "Skipping store RPC with empty payload", logtrace.Fields{ logtrace.FieldModule: "dht", "node": receiver.String(), }) @@ -1821,15 +1890,18 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ s.metrics.IncHotPathBanIncr() } - logtrace.Info(ctx, "Network call batch store request failed", logtrace.Fields{ - logtrace.FieldModule: "p2p", - logtrace.FieldError: err.Error(), - "request": request.String(), - }) + logtrace.Error(ctx, "RPC BatchStoreData failed", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err.Error(), "node": receiver.String(), "ms": dur}) responses <- &MessageWithError{Error: err, Message: response, KeysCount: len(toStore), Receiver: receiver, DurationMS: dur} return } + { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "node": receiver.String(), "keys": len(toStore), "ms": dur, logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "dht: batch store RPC ok", f) + } responses <- &MessageWithError{Message: response, KeysCount: len(toStore), Receiver: receiver, DurationMS: dur} } }(node, key) @@ -1842,7 +1914,7 @@ func (s *DHT) batchStoreNetwork(ctx context.Context, values [][]byte, nodes map[ } func (s *DHT) batchFindNode(ctx context.Context, payload [][]byte, nodes map[string]*Node, contacted map[string]bool, txid string) (chan *MessageWithError, bool) { - logtrace.Info(ctx, "Batch find node begin", logtrace.Fields{ + logtrace.Debug(ctx, "Batch find node begin", logtrace.Fields{ logtrace.FieldModule: "dht", "task_id": txid, "nodes_count": len(nodes), @@ -1865,7 +1937,7 @@ func (s *DHT) batchFindNode(ctx context.Context, payload [][]byte, nodes map[str continue } if s.ignorelist.Banned(node) { - logtrace.Info(ctx, "Ignoring banned node in batch find call", logtrace.Fields{ + logtrace.Debug(ctx, "Ignoring banned node in batch find call", logtrace.Fields{ logtrace.FieldModule: "dht", "node": node.String(), "txid": txid, @@ -1913,7 +1985,7 @@ func (s *DHT) batchFindNode(ctx context.Context, payload [][]byte, nodes map[str } wg.Wait() close(responses) - logtrace.Info(ctx, "Batch find node done", logtrace.Fields{ + logtrace.Debug(ctx, "Batch find node done", logtrace.Fields{ logtrace.FieldModule: "dht", "nodes_count": len(nodes), "len_resp": len(responses), diff --git a/p2p/kademlia/fetch_and_store.go b/p2p/kademlia/fetch_and_store.go index 9803bf3d..6344095d 100644 --- a/p2p/kademlia/fetch_and_store.go +++ b/p2p/kademlia/fetch_and_store.go @@ -26,12 +26,12 @@ const ( // FetchAndStore fetches all keys from the queries TODO replicate list, fetches value from respective nodes and stores them in the queries store func (s *DHT) FetchAndStore(ctx context.Context) error { - logtrace.Info(ctx, "Getting fetch and store keys", logtrace.Fields{}) + logtrace.Debug(ctx, "Getting fetch and store keys", logtrace.Fields{}) keys, err := s.store.GetAllToDoRepKeys(failedKeysClosestContactsLookupCount+maxBatchAttempts+1, totalMaxAttempts) if err != nil { return fmt.Errorf("get all keys error: %w", err) } - logtrace.Info(ctx, "got keys from queries store", logtrace.Fields{"count": len(keys)}) + logtrace.Debug(ctx, "got keys from queries store", logtrace.Fields{"count": len(keys)}) if len(keys) == 0 { return nil @@ -79,7 +79,7 @@ func (s *DHT) FetchAndStore(ctx context.Context) error { return } - logtrace.Info(cctx, "iterate fetch for replication success", logtrace.Fields{"key": info.Key, "ip": info.IP}) + logtrace.Debug(cctx, "iterate fetch for replication success", logtrace.Fields{"key": info.Key, "ip": info.IP}) } if err := s.store.Store(cctx, sKey, value, 0, false); err != nil { @@ -94,7 +94,7 @@ func (s *DHT) FetchAndStore(ctx context.Context) error { atomic.AddInt32(&successCounter, 1) // Increment the counter atomically - logtrace.Info(cctx, "fetch & store key success", logtrace.Fields{"key": info.Key, "ip": info.IP}) + logtrace.Debug(cctx, "fetch & store key success", logtrace.Fields{"key": info.Key, "ip": info.IP}) }(key) time.Sleep(100 * time.Millisecond) @@ -102,7 +102,7 @@ func (s *DHT) FetchAndStore(ctx context.Context) error { //wg.Wait() - logtrace.Info(ctx, "Successfully fetched & stored keys", logtrace.Fields{"todo-keys": len(keys), "successfully-added-keys": atomic.LoadInt32(&successCounter)}) // Log the final count + logtrace.Debug(ctx, "Successfully fetched & stored keys", logtrace.Fields{"todo-keys": len(keys), "successfully-added-keys": atomic.LoadInt32(&successCounter)}) // Log the final count return nil } @@ -114,7 +114,7 @@ func (s *DHT) BatchFetchAndStoreFailedKeys(ctx context.Context) error { if err != nil { return fmt.Errorf("get all keys error: %w", err) } - logtrace.Info(ctx, "read failed keys from store", logtrace.Fields{"count": len(keys)}) + logtrace.Debug(ctx, "read failed keys from store", logtrace.Fields{"count": len(keys)}) if len(keys) == 0 { return nil @@ -143,7 +143,7 @@ func (s *DHT) BatchFetchAndStoreFailedKeys(ctx context.Context) error { repKeys = append(repKeys, repKey) } } - logtrace.Info(ctx, "got 2nd tier replication keys from queries store", logtrace.Fields{"count": len(repKeys)}) + logtrace.Debug(ctx, "got 2nd tier replication keys from queries store", logtrace.Fields{"count": len(repKeys)}) if err := s.GroupAndBatchFetch(ctx, repKeys, 0, false); err != nil { logtrace.Error(ctx, "group and batch fetch failed-keys error", logtrace.Fields{logtrace.FieldError: err}) @@ -160,7 +160,7 @@ func (s *DHT) BatchFetchAndStore(ctx context.Context) error { if err != nil { return fmt.Errorf("get all keys error: %w", err) } - logtrace.Info(ctx, "got batch todo rep-keys from queries store", logtrace.Fields{"count": len(keys)}) + logtrace.Debug(ctx, "got batch todo rep-keys from queries store", logtrace.Fields{"count": len(keys)}) if len(keys) == 0 { return nil @@ -213,12 +213,12 @@ func (s *DHT) GroupAndBatchFetch(ctx context.Context, repKeys []domain.ToRepKey, totalKeysFound := 0 for len(stringKeys) > 0 && iterations < maxSingleBatchIterations { iterations++ - logtrace.Info(ctx, "fetching batch values from node", logtrace.Fields{"node-ip": node.IP, "count": len(stringKeys), "keys[0]": stringKeys[0], "keys[len()]": stringKeys[len(stringKeys)-1]}) + logtrace.Debug(ctx, "fetching batch values from node", logtrace.Fields{"node-ip": node.IP, "count": len(stringKeys), "keys[0]": stringKeys[0], "keys[len()]": stringKeys[len(stringKeys)-1]}) isDone, retMap, failedKeys, err := s.GetBatchValuesFromNode(ctx, stringKeys, node) if err != nil { // Log the error but don't stop the process, continue to the next node - logtrace.Info(ctx, "failed to get batch values", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err}) + logtrace.Debug(ctx, "failed to get batch values", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err}) continue } @@ -238,7 +238,7 @@ func (s *DHT) GroupAndBatchFetch(ctx context.Context, repKeys []domain.ToRepKey, err = s.store.StoreBatch(ctx, response, datatype, isOriginal) if err != nil { // Log the error but don't stop the process, continue to the next node - logtrace.Info(ctx, "failed to store batch values", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err}) + logtrace.Debug(ctx, "failed to store batch values", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err}) continue } @@ -246,7 +246,7 @@ func (s *DHT) GroupAndBatchFetch(ctx context.Context, repKeys []domain.ToRepKey, err = s.store.BatchDeleteRepKeys(stringDelKeys) if err != nil { // Log the error but don't stop the process, continue to the next node - logtrace.Info(ctx, "failed to delete rep keys", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err}) + logtrace.Debug(ctx, "failed to delete rep keys", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err}) continue } } else { @@ -255,7 +255,7 @@ func (s *DHT) GroupAndBatchFetch(ctx context.Context, repKeys []domain.ToRepKey, if isDone && len(failedKeys) > 0 { if err := s.store.IncrementAttempts(failedKeys); err != nil { - logtrace.Info(ctx, "failed to increment attempts", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err}) + logtrace.Debug(ctx, "failed to increment attempts", logtrace.Fields{"node-ip": node.IP, logtrace.FieldError: err}) // not adding 'continue' here because we want to delete the keys from the todo list } } else if isDone { @@ -265,7 +265,7 @@ func (s *DHT) GroupAndBatchFetch(ctx context.Context, repKeys []domain.ToRepKey, } } - logtrace.Info(ctx, "fetch batch values from node successfully", logtrace.Fields{"node-ip": node.IP, "count": totalKeysFound, "iterations": iterations}) + logtrace.Debug(ctx, "fetch batch values from node successfully", logtrace.Fields{"node-ip": node.IP, "count": totalKeysFound, "iterations": iterations}) } } @@ -274,7 +274,9 @@ func (s *DHT) GroupAndBatchFetch(ctx context.Context, repKeys []domain.ToRepKey, // GetBatchValuesFromNode get values from node in bateches func (s *DHT) GetBatchValuesFromNode(ctx context.Context, keys []string, n *Node) (bool, map[string][]byte, []string, error) { - logtrace.Info(ctx, "sending batch fetch request", logtrace.Fields{"node-ip": n.IP, "keys": len(keys)}) + logtrace.Debug(ctx, "sending batch fetch request", logtrace.Fields{"node-ip": n.IP, "keys": len(keys)}) + // Minimal per-RPC visibility for background replication path + logtrace.Debug(ctx, "RPC BatchFindValues send", logtrace.Fields{"node": n.String(), "keys": len(keys)}) messageType := BatchFindValues @@ -347,8 +349,9 @@ func (s *DHT) GetBatchValuesFromNode(ctx context.Context, keys []string, n *Node if err != nil { return isDone, nil, nil, fmt.Errorf("failed to verify and filter data: %w", err) } - logtrace.Info(ctx, "batch fetch response rcvd and keys verified", logtrace.Fields{"node-ip": n.IP, "received-keys": len(decompressedMap), "verified-keys": len(retMap), "failed-keys": len(failedKeys)}) + logtrace.Debug(ctx, "batch fetch response rcvd and keys verified", logtrace.Fields{"node-ip": n.IP, "received-keys": len(decompressedMap), "verified-keys": len(retMap), "failed-keys": len(failedKeys)}) + logtrace.Debug(ctx, "RPC BatchFindValues completed", logtrace.Fields{"node": n.String(), "received_keys": len(decompressedMap), "verified_keys": len(retMap)}) return v.Done, retMap, failedKeys, nil } diff --git a/p2p/kademlia/message.go b/p2p/kademlia/message.go index 0baef37c..4f778d1f 100644 --- a/p2p/kademlia/message.go +++ b/p2p/kademlia/message.go @@ -66,6 +66,11 @@ type Message struct { Receiver *Node // the receiver node MessageType int // the message type Data interface{} // the real data for the request + // CorrelationID carries a best-effort trace identifier so that logs + // across nodes can be joined in external systems. + CorrelationID string + // Origin carries the phase that produced this message (first_pass | worker | download) + Origin string } func (m *Message) String() string { diff --git a/p2p/kademlia/network.go b/p2p/kademlia/network.go index 935d1583..a5ae39ee 100644 --- a/p2p/kademlia/network.go +++ b/p2p/kademlia/network.go @@ -28,10 +28,10 @@ import ( const ( defaultConnRate = 1000 - defaultMaxPayloadSize = 200 // MB + defaultMaxPayloadSize = 400 // MB errorBusy = "Busy" maxConcurrentFindBatchValsRequests = 25 - defaultExecTimeout = 10 * time.Second + defaultExecTimeout = 15 * time.Second ) // Global map for message type timeouts @@ -68,13 +68,6 @@ type Network struct { sem *semaphore.Weighted metrics sync.Map - - // recent request tracking (last 10 entries overall and per IP) - recentMu sync.Mutex - recentStoreOverall []RecentBatchStoreEntry - recentStoreByIP map[string][]RecentBatchStoreEntry - recentRetrieveOverall []RecentBatchRetrieveEntry - recentRetrieveByIP map[string][]RecentBatchRetrieveEntry } // NewNetwork returns a network service @@ -196,7 +189,7 @@ func (s *Network) handleFindValue(ctx context.Context, message *Message) (res [] request, ok := message.Data.(*FindValueRequest) if !ok { err := errors.New("invalid FindValueRequest") - return s.generateResponseMessage(FindValue, message.Sender, ResultFailed, err.Error()) + return s.generateResponseMessage(ctx, FindValue, message.Sender, ResultFailed, err.Error()) } // add the sender to queries hash table @@ -251,7 +244,7 @@ func (s *Network) handleStoreData(ctx context.Context, message *Message) (res [] request, ok := message.Data.(*StoreDataRequest) if !ok { err := errors.New("invalid StoreDataRequest") - return s.generateResponseMessage(StoreData, message.Sender, ResultFailed, err.Error()) + return s.generateResponseMessage(ctx, StoreData, message.Sender, ResultFailed, err.Error()) } logtrace.Debug(ctx, "Handle store data", logtrace.Fields{logtrace.FieldModule: "p2p", "message": message.String()}) @@ -267,7 +260,7 @@ func (s *Network) handleStoreData(ctx context.Context, message *Message) (res [] // store the data to queries storage if err := s.dht.store.Store(ctx, key, request.Data, request.Type, false); err != nil { err = errors.Errorf("store the data: %w", err) - return s.generateResponseMessage(StoreData, message.Sender, ResultFailed, err.Error()) + return s.generateResponseMessage(ctx, StoreData, message.Sender, ResultFailed, err.Error()) } } @@ -292,13 +285,13 @@ func (s *Network) handleReplicate(ctx context.Context, message *Message) (res [] request, ok := message.Data.(*ReplicateDataRequest) if !ok { err := errors.New("invalid ReplicateDataRequest") - return s.generateResponseMessage(Replicate, message.Sender, ResultFailed, err.Error()) + return s.generateResponseMessage(ctx, Replicate, message.Sender, ResultFailed, err.Error()) } logtrace.Debug(ctx, "Handle replicate data", logtrace.Fields{logtrace.FieldModule: "p2p", "message": message.String()}) if err := s.handleReplicateRequest(ctx, request, message.Sender.ID, message.Sender.IP, message.Sender.Port); err != nil { - return s.generateResponseMessage(Replicate, message.Sender, ResultFailed, err.Error()) + return s.generateResponseMessage(ctx, Replicate, message.Sender, ResultFailed, err.Error()) } response := &ReplicateDataResponse{ @@ -336,7 +329,7 @@ func (s *Network) handleReplicateRequest(ctx context.Context, req *ReplicateData return fmt.Errorf("unable to store batch replication keys: %w", err) } - logtrace.Info(ctx, "Store batch replication keys stored", logtrace.Fields{ + logtrace.Debug(ctx, "Store batch replication keys stored", logtrace.Fields{ logtrace.FieldModule: "p2p", "to-store-keys": len(keysToStore), "rcvd-keys": len(req.Keys), @@ -347,7 +340,7 @@ func (s *Network) handleReplicateRequest(ctx context.Context, req *ReplicateData return nil } -func (s *Network) handlePing(_ context.Context, message *Message) ([]byte, error) { +func (s *Network) handlePing(ctx context.Context, message *Message) ([]byte, error) { // new a response message resMsg := s.dht.newMessage(Ping, message.Sender, nil) @@ -412,6 +405,30 @@ func (s *Network) handleConn(ctx context.Context, rawConn net.Conn) { }) return } + // stitch correlation + origin into context for downstream handler logs + if request != nil { + if s := strings.TrimSpace(request.CorrelationID); s != "" { + ctx = logtrace.CtxWithCorrelationID(ctx, s) + } + if o := strings.TrimSpace(request.Origin); o != "" { + ctx = logtrace.CtxWithOrigin(ctx, o) + } + } + + // Minimum-version gating: reject immediately if peer is below configured minimum + var senderVer string + if request != nil && request.Sender != nil { + senderVer = request.Sender.Version + } + if minRequired, tooOld := versionTooOld(senderVer); tooOld { + logtrace.Debug(ctx, "Rejecting connection: peer below minimum version", logtrace.Fields{ + logtrace.FieldModule: "p2p", + "min_required": minRequired, + "peer_version": strings.TrimSpace(senderVer), + }) + return + } + reqID := uuid.New().String() mt := request.MessageType @@ -592,6 +609,33 @@ func (s *Network) Call(ctx context.Context, request *Message, isLong bool) (*Mes // pool key: bech32@ip:port (bech32 identity is your invariant) idStr := string(request.Receiver.ID) remoteAddr := fmt.Sprintf("%s@%s:%d", idStr, strings.TrimSpace(request.Receiver.IP), request.Receiver.Port) + // Log raw RPC start (reduce noise: Info only for high-signal messages) + startFields := logtrace.Fields{ + logtrace.FieldModule: "p2p", + "remote": remoteAddr, + "message": msgName(request.MessageType), + "timeout_ms": int64(timeout / time.Millisecond), + } + // Tag role/origin for filtering + startFields[logtrace.FieldRole] = "client" + if o := logtrace.OriginFromContext(ctx); o != "" { + startFields[logtrace.FieldOrigin] = o + } + if isHighSignalMsg(request.MessageType) { + logtrace.Info(ctx, fmt.Sprintf("RPC %s start remote=%s timeout_ms=%d", msgName(request.MessageType), remoteAddr, int64(timeout/time.Millisecond)), startFields) + } else { + logtrace.Debug(ctx, fmt.Sprintf("RPC %s start remote=%s timeout_ms=%d", msgName(request.MessageType), remoteAddr, int64(timeout/time.Millisecond)), startFields) + } + + // Attach correlation id only for high‑signal messages (store/retrieve batches) + if isHighSignalMsg(request.MessageType) { + if cid := logtrace.CorrelationIDFromContext(ctx); cid != "unknown" { + request.CorrelationID = cid + } + if o := logtrace.OriginFromContext(ctx); o != "" { + request.Origin = o + } + } // try get from pool s.connPoolMtx.Lock() @@ -633,6 +677,7 @@ func (s *Network) Call(ctx context.Context, request *Message, isLong bool) (*Mes // ---- retryable RPC helpers ------------------------------------------------- func (s *Network) rpcOnceWrapper(ctx context.Context, cw *connWrapper, remoteAddr string, data []byte, timeout time.Duration, msgType int) (*Message, error) { + start := time.Now() writeDL := calcWriteDeadline(timeout, len(data), 1.0) // target ~1 MB/s retried := false @@ -649,7 +694,7 @@ func (s *Network) rpcOnceWrapper(ctx context.Context, cw *connWrapper, remoteAdd if _, e := cw.secureConn.Write(data); e != nil { cw.mtx.Unlock() if isStaleConnError(e) && !retried { - logtrace.Info(ctx, "Stale pooled connection on write; redialing", logtrace.Fields{ + logtrace.Debug(ctx, "Stale pooled connection on write; redialing", logtrace.Fields{ logtrace.FieldModule: "p2p", "remote": remoteAddr, "message_type": msgType, @@ -690,7 +735,7 @@ func (s *Network) rpcOnceWrapper(ctx context.Context, cw *connWrapper, remoteAdd cw.mtx.Unlock() if e != nil { if isStaleConnError(e) && !retried { - logtrace.Info(ctx, "Stale pooled connection on read; redialing", logtrace.Fields{ + logtrace.Debug(ctx, "Stale pooled connection on read; redialing", logtrace.Fields{ logtrace.FieldModule: "p2p", "remote": remoteAddr, "message_type": msgType, @@ -717,11 +762,22 @@ func (s *Network) rpcOnceWrapper(ctx context.Context, cw *connWrapper, remoteAdd s.dropFromPool(remoteAddr, cw) return nil, errors.Errorf("conn read: %w", e) } + // Single-line completion for successful outbound RPC + if isHighSignalMsg(msgType) { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), f) + } else { + logtrace.Debug(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"}) + } return r, nil } } func (s *Network) rpcOnceNonWrapper(ctx context.Context, conn net.Conn, remoteAddr string, data []byte, timeout time.Duration, msgType int) (*Message, error) { + start := time.Now() sizeMB := float64(len(data)) / (1024.0 * 1024.0) // data is your gob-encoded message throughputFloor := 8.0 // MB/s (~64 Mbps) est := time.Duration(sizeMB / throughputFloor * float64(time.Second)) @@ -743,7 +799,7 @@ Retry: } if _, err := conn.Write(data); err != nil { if isStaleConnError(err) && !retried { - logtrace.Info(ctx, "Stale pooled connection on write; redialing", logtrace.Fields{ + logtrace.Debug(ctx, "Stale pooled connection on write; redialing", logtrace.Fields{ logtrace.FieldModule: "p2p", "remote": remoteAddr, "message_type": msgType, @@ -777,7 +833,7 @@ Retry: _ = conn.SetDeadline(time.Time{}) if err != nil { if isStaleConnError(err) && !retried { - logtrace.Info(ctx, "Stale pooled connection on read; redialing", logtrace.Fields{ + logtrace.Debug(ctx, "Stale pooled connection on read; redialing", logtrace.Fields{ logtrace.FieldModule: "p2p", "remote": remoteAddr, "message_type": msgType, @@ -801,6 +857,15 @@ Retry: s.dropFromPool(remoteAddr, conn) return nil, errors.Errorf("conn read: %w", err) } + if isHighSignalMsg(msgType) { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), f) + } else { + logtrace.Debug(ctx, fmt.Sprintf("RPC %s ok remote=%s ms=%d", msgName(msgType), remoteAddr, time.Since(start).Milliseconds()), logtrace.Fields{logtrace.FieldModule: "p2p", "remote": remoteAddr, "message": msgName(msgType), "ms": time.Since(start).Milliseconds(), logtrace.FieldRole: "client"}) + } return resp, nil } @@ -841,16 +906,16 @@ func (s *Network) handleBatchFindValues(ctx context.Context, message *Message, r // Try to acquire the semaphore, wait up to 1 minute logtrace.Debug(ctx, "Attempting to acquire semaphore immediately", logtrace.Fields{logtrace.FieldModule: "p2p"}) if !s.sem.TryAcquire(1) { - logtrace.Info(ctx, "Immediate acquisition failed. Waiting up to 1 minute", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "Immediate acquisition failed. Waiting up to 1 minute", logtrace.Fields{logtrace.FieldModule: "p2p"}) ctxWithTimeout, cancel := context.WithTimeout(ctx, 1*time.Minute) defer cancel() if err := s.sem.Acquire(ctxWithTimeout, 1); err != nil { logtrace.Error(ctx, "Failed to acquire semaphore within 1 minute", logtrace.Fields{logtrace.FieldModule: "p2p"}) // failed to acquire semaphore within 1 minute - return s.generateResponseMessage(BatchFindValues, message.Sender, ResultFailed, errorBusy) + return s.generateResponseMessage(ctx, BatchFindValues, message.Sender, ResultFailed, errorBusy) } - logtrace.Info(ctx, "Semaphore acquired after waiting", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "Semaphore acquired after waiting", logtrace.Fields{logtrace.FieldModule: "p2p"}) } // Add a defer function to recover from panic @@ -874,18 +939,18 @@ func (s *Network) handleBatchFindValues(ctx context.Context, message *Message, r err = errors.New("unknown error") } - res, _ = s.generateResponseMessage(BatchFindValues, message.Sender, ResultFailed, err.Error()) + res, _ = s.generateResponseMessage(ctx, BatchFindValues, message.Sender, ResultFailed, err.Error()) } }() request, ok := message.Data.(*BatchFindValuesRequest) if !ok { - return s.generateResponseMessage(BatchFindValues, message.Sender, ResultFailed, "invalid BatchFindValueRequest") + return s.generateResponseMessage(ctx, BatchFindValues, message.Sender, ResultFailed, "invalid BatchFindValueRequest") } isDone, data, err := s.handleBatchFindValuesRequest(ctx, request, message.Sender.IP, reqID) if err != nil { - return s.generateResponseMessage(BatchFindValues, message.Sender, ResultFailed, err.Error()) + return s.generateResponseMessage(ctx, BatchFindValues, message.Sender, ResultFailed, err.Error()) } response := &BatchFindValuesResponse{ @@ -897,46 +962,24 @@ func (s *Network) handleBatchFindValues(ctx context.Context, message *Message, r } resMsg := s.dht.newMessage(BatchFindValues, message.Sender, response) + resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx) return s.encodeMesage(resMsg) } func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, reqID string) (res []byte, err error) { - start := time.Now() - appended := false defer func() { if response, err := s.handlePanic(ctx, message.Sender, BatchGetValues); response != nil || err != nil { res = response - if !appended { - s.appendRetrieveEntry(message.Sender.IP, RecentBatchRetrieveEntry{ - TimeUnix: time.Now().UTC().Unix(), - SenderID: string(message.Sender.ID), - SenderIP: message.Sender.IP, - Requested: 0, - Found: 0, - DurationMS: time.Since(start).Milliseconds(), - Error: "panic/recovered", - }) - } } }() request, ok := message.Data.(*BatchGetValuesRequest) if !ok { err := errors.New("invalid BatchGetValuesRequest") - s.appendRetrieveEntry(message.Sender.IP, RecentBatchRetrieveEntry{ - TimeUnix: time.Now().UTC().Unix(), - SenderID: string(message.Sender.ID), - SenderIP: message.Sender.IP, - Requested: 0, - Found: 0, - DurationMS: time.Since(start).Milliseconds(), - Error: err.Error(), - }) - appended = true - return s.generateResponseMessage(BatchGetValues, message.Sender, ResultFailed, err.Error()) + return s.generateResponseMessage(ctx, BatchGetValues, message.Sender, ResultFailed, err.Error()) } - logtrace.Info(ctx, "Batch get values request received", logtrace.Fields{ + logtrace.Debug(ctx, "Batch get values request received", logtrace.Fields{ logtrace.FieldModule: "p2p", "from": message.Sender.String(), }) @@ -953,25 +996,16 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, values, count, err := s.dht.store.RetrieveBatchValues(ctx, keys, false) if err != nil { err = errors.Errorf("batch find values: %w", err) - s.appendRetrieveEntry(message.Sender.IP, RecentBatchRetrieveEntry{ - TimeUnix: time.Now().UTC().Unix(), - SenderID: string(message.Sender.ID), - SenderIP: message.Sender.IP, - Requested: len(keys), - Found: count, - DurationMS: time.Since(start).Milliseconds(), - Error: err.Error(), - }) - appended = true - return s.generateResponseMessage(BatchGetValues, message.Sender, ResultFailed, err.Error()) + return s.generateResponseMessage(ctx, BatchGetValues, message.Sender, ResultFailed, err.Error()) } - logtrace.Info(ctx, "Batch get values request processed", logtrace.Fields{ - logtrace.FieldModule: "p2p", - "requested-keys": len(keys), - "found": count, - "sender": message.Sender.String(), - }) + { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "requested-keys": len(keys), "found": count, "sender": message.Sender.String(), logtrace.FieldRole: "server"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "network: batch get values ok", f) + } for i, key := range keys { val := KeyValWithClosest{ @@ -991,22 +1025,13 @@ func (s *Network) handleGetValuesRequest(ctx context.Context, message *Message, // new a response message resMsg := s.dht.newMessage(BatchGetValues, message.Sender, response) - s.appendRetrieveEntry(message.Sender.IP, RecentBatchRetrieveEntry{ - TimeUnix: time.Now().UTC().Unix(), - SenderID: string(message.Sender.ID), - SenderIP: message.Sender.IP, - Requested: len(keys), - Found: count, - DurationMS: time.Since(start).Milliseconds(), - Error: "", - }) - appended = true + resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx) return s.encodeMesage(resMsg) } func (s *Network) handleBatchFindValuesRequest(ctx context.Context, req *BatchFindValuesRequest, ip string, reqID string) (isDone bool, compressedData []byte, err error) { // log.WithContext(ctx).WithField("p2p-req-id", reqID).WithField("keys", len(req.Keys)).WithField("from-ip", ip).Info("batch find values request received") - logtrace.Info(ctx, "Batch find values request received", logtrace.Fields{ + logtrace.Debug(ctx, "Batch find values request received", logtrace.Fields{ logtrace.FieldModule: "p2p", "from": ip, "keys": len(req.Keys), @@ -1029,7 +1054,7 @@ func (s *Network) handleBatchFindValuesRequest(ctx context.Context, req *BatchFi return false, nil, fmt.Errorf("failed to retrieve batch values: %w", err) } // log.WithContext(ctx).WithField("p2p-req-id", reqID).WithField("values-len", len(values)).WithField("found", count).WithField("from-ip", ip).Info("batch find values request processed") - logtrace.Info(ctx, "Batch find values request processed", logtrace.Fields{ + logtrace.Debug(ctx, "Batch find values request processed", logtrace.Fields{ logtrace.FieldModule: "p2p", "p2p-req-id": reqID, "values-len": len(values), @@ -1044,7 +1069,7 @@ func (s *Network) handleBatchFindValuesRequest(ctx context.Context, req *BatchFi // log.WithContext(ctx).WithField("p2p-req-id", reqID).WithField("compressed-data-len", utils.BytesToMB(uint64(len(compressedData)))).WithField("found", count). // WithField("from-ip", ip).Info("batch find values response sent") - logtrace.Info(ctx, "Batch find values response sent", logtrace.Fields{ + logtrace.Debug(ctx, "Batch find values response sent", logtrace.Fields{ logtrace.FieldModule: "p2p", "p2p-req-id": reqID, "compressed-data-len": utils.BytesToMB(uint64(len(compressedData))), @@ -1172,64 +1197,33 @@ func findTopHeaviestKeys(dataMap map[string][]byte, size int) (int, []string) { } func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (res []byte, err error) { - start := time.Now() - appended := false defer func() { if response, err := s.handlePanic(ctx, message.Sender, BatchStoreData); response != nil || err != nil { res = response - if !appended { - s.appendStoreEntry(message.Sender.IP, RecentBatchStoreEntry{ - TimeUnix: time.Now().UTC().Unix(), - SenderID: string(message.Sender.ID), - SenderIP: message.Sender.IP, - Keys: 0, - DurationMS: time.Since(start).Milliseconds(), - OK: false, - Error: "panic/recovered", - }) - } } }() request, ok := message.Data.(*BatchStoreDataRequest) if !ok { err := errors.New("invalid BatchStoreDataRequest") - s.appendStoreEntry(message.Sender.IP, RecentBatchStoreEntry{ - TimeUnix: time.Now().UTC().Unix(), - SenderID: string(message.Sender.ID), - SenderIP: message.Sender.IP, - Keys: 0, - DurationMS: time.Since(start).Milliseconds(), - OK: false, - Error: err.Error(), - }) - appended = true - return s.generateResponseMessage(BatchStoreData, message.Sender, ResultFailed, err.Error()) + return s.generateResponseMessage(ctx, BatchStoreData, message.Sender, ResultFailed, err.Error()) } // log.P2P().WithContext(ctx).Info("handle batch store data request received") - logtrace.Info(ctx, "Handle batch store data request received", logtrace.Fields{ - logtrace.FieldModule: "p2p", - "sender": message.Sender.String(), - "keys": len(request.Data), - }) + { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "sender": message.Sender.String(), "keys": len(request.Data), logtrace.FieldRole: "server"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "network: batch store recv", f) + } // add the sender to queries hash table s.dht.addNode(ctx, message.Sender) if err := s.dht.store.StoreBatch(ctx, request.Data, 1, false); err != nil { err = errors.Errorf("batch store the data: %w", err) - s.appendStoreEntry(message.Sender.IP, RecentBatchStoreEntry{ - TimeUnix: time.Now().UTC().Unix(), - SenderID: string(message.Sender.ID), - SenderIP: message.Sender.IP, - Keys: len(request.Data), - DurationMS: time.Since(start).Milliseconds(), - OK: false, - Error: err.Error(), - }) - appended = true - return s.generateResponseMessage(BatchStoreData, message.Sender, ResultFailed, err.Error()) + return s.generateResponseMessage(ctx, BatchStoreData, message.Sender, ResultFailed, err.Error()) } response := &StoreDataResponse{ @@ -1238,24 +1232,17 @@ func (s *Network) handleBatchStoreData(ctx context.Context, message *Message) (r }, } // log.P2P().WithContext(ctx).Info("handle batch store data request processed") - logtrace.Info(ctx, "Handle batch store data request processed", logtrace.Fields{ - logtrace.FieldModule: "p2p", - "sender": message.Sender.String(), - "keys": len(request.Data), - }) + { + f := logtrace.Fields{logtrace.FieldModule: "p2p", "sender": message.Sender.String(), "keys": len(request.Data), logtrace.FieldRole: "server"} + if o := logtrace.OriginFromContext(ctx); o != "" { + f[logtrace.FieldOrigin] = o + } + logtrace.Info(ctx, "network: batch store ok", f) + } // new a response message resMsg := s.dht.newMessage(BatchStoreData, message.Sender, response) - s.appendStoreEntry(message.Sender.IP, RecentBatchStoreEntry{ - TimeUnix: time.Now().UTC().Unix(), - SenderID: string(message.Sender.ID), - SenderIP: message.Sender.IP, - Keys: len(request.Data), - DurationMS: time.Since(start).Milliseconds(), - OK: true, - Error: "", - }) - appended = true + resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx) return s.encodeMesage(resMsg) } @@ -1269,7 +1256,7 @@ func (s *Network) handleBatchFindNode(ctx context.Context, message *Message) (re request, ok := message.Data.(*BatchFindNodeRequest) if !ok { err := errors.New("invalid FindNodeRequest") - return s.generateResponseMessage(BatchFindNode, message.Sender, ResultFailed, err.Error()) + return s.generateResponseMessage(ctx, BatchFindNode, message.Sender, ResultFailed, err.Error()) } // add the sender to queries hash table @@ -1283,7 +1270,7 @@ func (s *Network) handleBatchFindNode(ctx context.Context, message *Message) (re closestMap := make(map[string][]*Node) // log.WithContext(ctx).WithField("sender", message.Sender.String()).Info("Batch Find Nodes Request Received") - logtrace.Info(ctx, "Batch Find Nodes Request Received", logtrace.Fields{ + logtrace.Debug(ctx, "Batch Find Nodes Request Received", logtrace.Fields{ logtrace.FieldModule: "p2p", "sender": message.Sender.String(), "hashed-targets": len(request.HashedTarget), @@ -1294,7 +1281,7 @@ func (s *Network) handleBatchFindNode(ctx context.Context, message *Message) (re } response.ClosestNodes = closestMap // log.WithContext(ctx).WithField("sender", message.Sender.String()).Info("Batch Find Nodes Request Processed") - logtrace.Info(ctx, "Batch Find Nodes Request Processed", logtrace.Fields{ + logtrace.Debug(ctx, "Batch Find Nodes Request Processed", logtrace.Fields{ logtrace.FieldModule: "p2p", "sender": message.Sender.String(), }) @@ -1304,7 +1291,7 @@ func (s *Network) handleBatchFindNode(ctx context.Context, message *Message) (re return s.encodeMesage(resMsg) } -func (s *Network) generateResponseMessage(messageType int, receiver *Node, result ResultType, errMsg string) ([]byte, error) { +func (s *Network) generateResponseMessage(ctx context.Context, messageType int, receiver *Node, result ResultType, errMsg string) ([]byte, error) { responseStatus := ResponseStatus{ Result: result, ErrMsg: errMsg, @@ -1332,6 +1319,10 @@ func (s *Network) generateResponseMessage(messageType int, receiver *Node, resul } resMsg := s.dht.newMessage(messageType, receiver, response) + // propagate correlation id on responses too, but only for high‑signal messages + if isHighSignalMsg(messageType) { + resMsg.CorrelationID = logtrace.CorrelationIDFromContext(ctx) + } return s.encodeMesage(resMsg) } @@ -1353,7 +1344,7 @@ func (s *Network) handlePanic(ctx context.Context, sender *Node, messageType int err = errors.New("unknown error") } - if res, err := s.generateResponseMessage(messageType, sender, ResultFailed, err.Error()); err != nil { + if res, err := s.generateResponseMessage(ctx, messageType, sender, ResultFailed, err.Error()); err != nil { // log.WithContext(ctx).Errorf("Error generating response message: %v", err) logtrace.Error(ctx, "Error generating response message", logtrace.Fields{ logtrace.FieldModule: "p2p", @@ -1468,6 +1459,18 @@ func msgName(t int) string { } } +// isHighSignalMsg returns true for message types that are heavy and relevant +// to artefact store/retrieve visibility. Lightweight chatter like Ping or +// FindNode is excluded to avoid log noise at Info level. +func isHighSignalMsg(t int) bool { + switch t { + case BatchStoreData, BatchGetValues, BatchFindValues: + return true + default: + return false + } +} + func (s *Network) HandleMetricsSnapshot() map[string]HandleCounters { out := make(map[string]HandleCounters) s.metrics.Range(func(k, v any) bool { diff --git a/p2p/kademlia/node.go b/p2p/kademlia/node.go index b7a4baeb..51c495fd 100644 --- a/p2p/kademlia/node.go +++ b/p2p/kademlia/node.go @@ -23,6 +23,9 @@ type Node struct { // port of the node Port uint16 `json:"port,omitempty"` + // Version of the supernode binary (advertised to peers; may be used by min-version gating) + Version string `json:"version,omitempty"` + HashedID []byte } @@ -180,15 +183,13 @@ func (s *NodeList) DelNode(node *Node) { } func haveAllNodes(a, b []*Node) bool { + bSet := make(map[string]bool, len(b)) + for _, y := range b { + bSet[string(y.HashedID)] = true + } + for _, x := range a { - found := false - for _, y := range b { - if bytes.Equal(x.HashedID, y.HashedID) { - found = true - break - } - } - if !found { + if !bSet[string(x.HashedID)] { return false } } diff --git a/p2p/kademlia/node_activity.go b/p2p/kademlia/node_activity.go index cc7089d6..f2f77e69 100644 --- a/p2p/kademlia/node_activity.go +++ b/p2p/kademlia/node_activity.go @@ -25,7 +25,7 @@ func (s *DHT) checkNodeActivity(ctx context.Context) { return case <-ticker.C: if !utils.CheckInternetConnectivity() { - logtrace.Info(ctx, "no internet connectivity, not checking node activity", logtrace.Fields{}) + logtrace.Debug(ctx, "no internet connectivity, not checking node activity", logtrace.Fields{}) continue } @@ -42,7 +42,7 @@ func (s *DHT) checkNodeActivity(ctx context.Context) { var wg sync.WaitGroup for _, info := range repInfo { - info := info // capture + wg.Add(1) sem <- struct{}{} // acquire go func() { @@ -51,8 +51,8 @@ func (s *DHT) checkNodeActivity(ctx context.Context) { node := s.makeNode([]byte(info.ID), info.IP, info.Port) - // Short per-ping timeout (fail fast) - if err := s.pingNode(ctx, node, 3*time.Second); err != nil { + // Per-ping timeout + if err := s.pingNode(ctx, node, 5*time.Second); err != nil { s.handlePingFailure(ctx, info.Active, node, err) return } @@ -76,8 +76,15 @@ func (s *DHT) pingNode(ctx context.Context, n *Node, timeout time.Duration) erro pctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() req := s.newMessage(Ping, n, nil) - _, err := s.network.Call(pctx, req, false) - return err + resp, err := s.network.Call(pctx, req, false) + if err != nil { + return err + } + // Capture remote version from response sender for later gating + if resp != nil && resp.Sender != nil { + n.Version = resp.Sender.Version + } + return nil } func (s *DHT) handlePingFailure(ctx context.Context, wasActive bool, n *Node, err error) { @@ -115,7 +122,7 @@ func (s *DHT) handlePingSuccess(ctx context.Context, wasActive bool, n *Node) { s.ignorelist.Delete(n) if !wasActive { - logtrace.Info(ctx, "node found to be active again", logtrace.Fields{ + logtrace.Debug(ctx, "node found to be active again", logtrace.Fields{ logtrace.FieldModule: "p2p", "ip": n.IP, "node_id": string(n.ID), diff --git a/p2p/kademlia/recent.go b/p2p/kademlia/recent.go deleted file mode 100644 index 2467cf02..00000000 --- a/p2p/kademlia/recent.go +++ /dev/null @@ -1,90 +0,0 @@ -package kademlia - -import ( - "sync" - "time" -) - -// RecentBatchStoreEntry captures a handled BatchStoreData request outcome -type RecentBatchStoreEntry struct { - TimeUnix int64 `json:"time_unix"` - SenderID string `json:"sender_id"` - SenderIP string `json:"sender_ip"` - Keys int `json:"keys"` - DurationMS int64 `json:"duration_ms"` - OK bool `json:"ok"` - Error string `json:"error,omitempty"` -} - -// RecentBatchRetrieveEntry captures a handled BatchGetValues request outcome -type RecentBatchRetrieveEntry struct { - TimeUnix int64 `json:"time_unix"` - SenderID string `json:"sender_id"` - SenderIP string `json:"sender_ip"` - Requested int `json:"requested"` - Found int `json:"found"` - DurationMS int64 `json:"duration_ms"` - Error string `json:"error,omitempty"` -} - -func (s *Network) appendStoreEntry(ip string, e RecentBatchStoreEntry) { - s.recentMu.Lock() - defer s.recentMu.Unlock() - if s.recentStoreByIP == nil { - s.recentStoreByIP = make(map[string][]RecentBatchStoreEntry) - } - s.recentStoreOverall = append([]RecentBatchStoreEntry{e}, s.recentStoreOverall...) - if len(s.recentStoreOverall) > 10 { - s.recentStoreOverall = s.recentStoreOverall[:10] - } - lst := append([]RecentBatchStoreEntry{e}, s.recentStoreByIP[ip]...) - if len(lst) > 10 { - lst = lst[:10] - } - s.recentStoreByIP[ip] = lst -} - -func (s *Network) appendRetrieveEntry(ip string, e RecentBatchRetrieveEntry) { - s.recentMu.Lock() - defer s.recentMu.Unlock() - if s.recentRetrieveByIP == nil { - s.recentRetrieveByIP = make(map[string][]RecentBatchRetrieveEntry) - } - s.recentRetrieveOverall = append([]RecentBatchRetrieveEntry{e}, s.recentRetrieveOverall...) - if len(s.recentRetrieveOverall) > 10 { - s.recentRetrieveOverall = s.recentRetrieveOverall[:10] - } - lst := append([]RecentBatchRetrieveEntry{e}, s.recentRetrieveByIP[ip]...) - if len(lst) > 10 { - lst = lst[:10] - } - s.recentRetrieveByIP[ip] = lst -} - -// RecentBatchStoreSnapshot returns copies of recent store entries (overall and by IP) -func (s *Network) RecentBatchStoreSnapshot() (overall []RecentBatchStoreEntry, byIP map[string][]RecentBatchStoreEntry) { - s.recentMu.Lock() - defer s.recentMu.Unlock() - overall = append([]RecentBatchStoreEntry(nil), s.recentStoreOverall...) - byIP = make(map[string][]RecentBatchStoreEntry, len(s.recentStoreByIP)) - for k, v := range s.recentStoreByIP { - byIP[k] = append([]RecentBatchStoreEntry(nil), v...) - } - return -} - -// RecentBatchRetrieveSnapshot returns copies of recent retrieve entries (overall and by IP) -func (s *Network) RecentBatchRetrieveSnapshot() (overall []RecentBatchRetrieveEntry, byIP map[string][]RecentBatchRetrieveEntry) { - s.recentMu.Lock() - defer s.recentMu.Unlock() - overall = append([]RecentBatchRetrieveEntry(nil), s.recentRetrieveOverall...) - byIP = make(map[string][]RecentBatchRetrieveEntry, len(s.recentRetrieveByIP)) - for k, v := range s.recentRetrieveByIP { - byIP[k] = append([]RecentBatchRetrieveEntry(nil), v...) - } - return -} - -// helper to avoid unused import warning if needed -var _ = time.Now -var _ = sync.Mutex{} diff --git a/p2p/kademlia/redundant_data.go b/p2p/kademlia/redundant_data.go index bfe6947d..151269d1 100644 --- a/p2p/kademlia/redundant_data.go +++ b/p2p/kademlia/redundant_data.go @@ -13,7 +13,7 @@ import ( ) func (s *DHT) startDisabledKeysCleanupWorker(ctx context.Context) error { - logtrace.Info(ctx, "disabled keys cleanup worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "disabled keys cleanup worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) for { select { @@ -50,7 +50,7 @@ func (s *DHT) cleanupDisabledKeys(ctx context.Context) error { } func (s *DHT) startCleanupRedundantDataWorker(ctx context.Context) { - logtrace.Info(ctx, "redundant data cleanup worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "redundant data cleanup worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) for { select { @@ -66,7 +66,7 @@ func (s *DHT) startCleanupRedundantDataWorker(ctx context.Context) { func (s *DHT) cleanupRedundantDataWorker(ctx context.Context) { from := time.Now().AddDate(-5, 0, 0) // 5 years ago - logtrace.Info(ctx, "getting all possible replication keys past five years", logtrace.Fields{logtrace.FieldModule: "p2p", "from": from}) + logtrace.Debug(ctx, "getting all possible replication keys past five years", logtrace.Fields{logtrace.FieldModule: "p2p", "from": from}) to := time.Now().UTC() replicationKeys := s.store.GetKeysForReplication(ctx, from, to) @@ -88,7 +88,7 @@ func (s *DHT) cleanupRedundantDataWorker(ctx context.Context) { removeKeys := make([]domain.DelKey, 0) for key, closestContacts := range closestContactsMap { if len(closestContacts) < Alpha { - logtrace.Info(ctx, "not enough contacts to replicate", logtrace.Fields{logtrace.FieldModule: "p2p", "key": key, "closest contacts": closestContacts}) + logtrace.Debug(ctx, "not enough contacts to replicate", logtrace.Fields{logtrace.FieldModule: "p2p", "key": key, "closest contacts": closestContacts}) continue } @@ -118,9 +118,9 @@ func (s *DHT) cleanupRedundantDataWorker(ctx context.Context) { return } - logtrace.Info(ctx, "insert del keys success", logtrace.Fields{logtrace.FieldModule: "p2p", "count-del-keys": len(insertKeys)}) + logtrace.Debug(ctx, "insert del keys success", logtrace.Fields{logtrace.FieldModule: "p2p", "count-del-keys": len(insertKeys)}) } else { - logtrace.Info(ctx, "No redundant key found to be stored in the storage", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "No redundant key found to be stored in the storage", logtrace.Fields{logtrace.FieldModule: "p2p"}) } if len(removeKeys) > 0 { @@ -133,7 +133,7 @@ func (s *DHT) cleanupRedundantDataWorker(ctx context.Context) { } func (s *DHT) startDeleteDataWorker(ctx context.Context) { - logtrace.Info(ctx, "start delete data worker", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "start delete data worker", logtrace.Fields{logtrace.FieldModule: "p2p"}) for { select { diff --git a/p2p/kademlia/replication.go b/p2p/kademlia/replication.go index 5163fd0b..247f43b8 100644 --- a/p2p/kademlia/replication.go +++ b/p2p/kademlia/replication.go @@ -23,7 +23,7 @@ var ( nodeShowUpDeadline = time.Minute * 35 // check for active & inactive nodes after this interval - checkNodeActivityInterval = time.Minute * 2 + checkNodeActivityInterval = time.Minute * 5 defaultFetchAndStoreInterval = time.Minute * 10 @@ -34,7 +34,7 @@ var ( // StartReplicationWorker starts replication func (s *DHT) StartReplicationWorker(ctx context.Context) error { - logtrace.Info(ctx, "replication worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "replication worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) go s.checkNodeActivity(ctx) go s.StartBatchFetchAndStoreWorker(ctx) @@ -54,7 +54,7 @@ func (s *DHT) StartReplicationWorker(ctx context.Context) error { // StartBatchFetchAndStoreWorker starts replication func (s *DHT) StartBatchFetchAndStoreWorker(ctx context.Context) error { - logtrace.Info(ctx, "batch fetch and store worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "batch fetch and store worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) for { select { @@ -69,7 +69,7 @@ func (s *DHT) StartBatchFetchAndStoreWorker(ctx context.Context) error { // StartFailedFetchAndStoreWorker starts replication func (s *DHT) StartFailedFetchAndStoreWorker(ctx context.Context) error { - logtrace.Info(ctx, "fetch and store worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "fetch and store worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) for { select { @@ -131,7 +131,7 @@ func (s *DHT) Replicate(ctx context.Context) { historicStart = time.Now().UTC().Add(-24 * time.Hour * 180) } - logtrace.Info(ctx, "replicating data", logtrace.Fields{logtrace.FieldModule: "p2p", "historic-start": historicStart}) + logtrace.Debug(ctx, "replicating data", logtrace.Fields{logtrace.FieldModule: "p2p", "historic-start": historicStart}) for i := 0; i < B; i++ { if time.Since(s.ht.refreshTime(i)) > defaultRefreshTime { @@ -150,7 +150,7 @@ func (s *DHT) Replicate(ctx context.Context) { } if len(repInfo) == 0 { - logtrace.Info(ctx, "no replication info found", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "no replication info found", logtrace.Fields{logtrace.FieldModule: "p2p"}) return } @@ -159,7 +159,7 @@ func (s *DHT) Replicate(ctx context.Context) { from = *repInfo[0].LastReplicatedAt } - logtrace.Info(ctx, "getting all possible replication keys", logtrace.Fields{logtrace.FieldModule: "p2p", "from": from}) + logtrace.Debug(ctx, "getting all possible replication keys", logtrace.Fields{logtrace.FieldModule: "p2p", "from": from}) to := time.Now().UTC() replicationKeys := s.store.GetKeysForReplication(ctx, from, to) @@ -199,7 +199,7 @@ func (s *DHT) Replicate(ctx context.Context) { continue } countToSendKeys := len(replicationKeys) - idx - logtrace.Info(ctx, "count of replication keys to be checked", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID), "len-rep-keys": countToSendKeys}) + logtrace.Debug(ctx, "count of replication keys to be checked", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID), "len-rep-keys": countToSendKeys}) // Preallocate a slice with a capacity equal to the number of keys. closestContactKeys := make([]string, 0, countToSendKeys) @@ -212,13 +212,13 @@ func (s *DHT) Replicate(ctx context.Context) { } } - logtrace.Info(ctx, "closest contact keys count", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID), "len-rep-keys": len(closestContactKeys)}) + logtrace.Debug(ctx, "closest contact keys count", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID), "len-rep-keys": len(closestContactKeys)}) if len(closestContactKeys) == 0 { if err := s.updateLastReplicated(ctx, info.ID, to); err != nil { logtrace.Error(ctx, "replicate update lastReplicated failed", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID)}) } else { - logtrace.Info(ctx, "no closest keys found - replicate update lastReplicated success", logtrace.Fields{logtrace.FieldModule: "p2p", "node": info.IP, "to": to.String(), "closest-contact-keys": 0}) + logtrace.Debug(ctx, "no closest keys found - replicate update lastReplicated success", logtrace.Fields{logtrace.FieldModule: "p2p", "node": info.IP, "to": to.String(), "closest-contact-keys": 0}) } continue @@ -258,17 +258,17 @@ func (s *DHT) Replicate(ctx context.Context) { if err := s.updateLastReplicated(ctx, info.ID, to); err != nil { logtrace.Error(ctx, "replicate update lastReplicated failed", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID)}) } else { - logtrace.Info(ctx, "replicate update lastReplicated success", logtrace.Fields{logtrace.FieldModule: "p2p", "node": info.IP, "to": to.String(), "expected-rep-keys": len(closestContactKeys)}) + logtrace.Debug(ctx, "replicate update lastReplicated success", logtrace.Fields{logtrace.FieldModule: "p2p", "node": info.IP, "to": to.String(), "expected-rep-keys": len(closestContactKeys)}) } } - logtrace.Info(ctx, "Replication done", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "Replication done", logtrace.Fields{logtrace.FieldModule: "p2p"}) } func (s *DHT) adjustNodeKeys(ctx context.Context, from time.Time, info domain.NodeReplicationInfo) error { replicationKeys := s.store.GetKeysForReplication(ctx, from, time.Now().UTC()) - logtrace.Info(ctx, "begin adjusting node keys process for offline node", logtrace.Fields{logtrace.FieldModule: "p2p", "offline-node-ip": info.IP, "offline-node-id": string(info.ID), "total-rep-keys": len(replicationKeys), "from": from.String()}) + logtrace.Debug(ctx, "begin adjusting node keys process for offline node", logtrace.Fields{logtrace.FieldModule: "p2p", "offline-node-ip": info.IP, "offline-node-id": string(info.ID), "total-rep-keys": len(replicationKeys), "from": from.String()}) // prepare ignored nodes list but remove the node we are adjusting // because we want to find if this node was supposed to hold this key @@ -315,7 +315,7 @@ func (s *DHT) adjustNodeKeys(ctx context.Context, from time.Time, info domain.No failureCount := 0 for nodeInfoKey, keys := range nodeKeysMap { - logtrace.Info(ctx, "sending adjusted replication keys to node", logtrace.Fields{logtrace.FieldModule: "p2p", "offline-node-ip": info.IP, "offline-node-id": string(info.ID), "adjust-to-node": nodeInfoKey, "to-adjust-keys-len": len(keys)}) + logtrace.Debug(ctx, "sending adjusted replication keys to node", logtrace.Fields{logtrace.FieldModule: "p2p", "offline-node-ip": info.IP, "offline-node-id": string(info.ID), "adjust-to-node": nodeInfoKey, "to-adjust-keys-len": len(keys)}) // Retrieve the node object from the key node, err := getNodeFromKey(nodeInfoKey) if err != nil { @@ -370,14 +370,14 @@ func (s *DHT) adjustNodeKeys(ctx context.Context, from time.Time, info domain.No return fmt.Errorf("replicate update isAdjusted failed: %v", err) } - logtrace.Info(ctx, "offline node was successfully adjusted", logtrace.Fields{logtrace.FieldModule: "p2p", "offline-node-ip": info.IP, "offline-node-id": string(info.ID)}) + logtrace.Debug(ctx, "offline node was successfully adjusted", logtrace.Fields{logtrace.FieldModule: "p2p", "offline-node-ip": info.IP, "offline-node-id": string(info.ID)}) return nil } func isNodeGoneAndShouldBeAdjusted(lastSeen *time.Time, isAlreadyAdjusted bool) bool { if lastSeen == nil { - logtrace.Info(context.Background(), "lastSeen is nil - aborting node adjustment", logtrace.Fields{}) + logtrace.Debug(context.Background(), "lastSeen is nil - aborting node adjustment", logtrace.Fields{}) return false } @@ -396,10 +396,10 @@ func (s *DHT) checkAndAdjustNode(ctx context.Context, info domain.NodeReplicatio if err := s.store.UpdateIsAdjusted(ctx, string(info.ID), true); err != nil { logtrace.Error(ctx, "failed to update replication info, set isAdjusted to true", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err.Error(), "rep-ip": info.IP, "rep-id": string(info.ID)}) } else { - logtrace.Info(ctx, "set isAdjusted to true", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID)}) + logtrace.Debug(ctx, "set isAdjusted to true", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID)}) } } } - logtrace.Info(ctx, "replication node not active, skipping over it.", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID)}) + logtrace.Debug(ctx, "replication node not active, skipping over it.", logtrace.Fields{logtrace.FieldModule: "p2p", "rep-ip": info.IP, "rep-id": string(info.ID)}) } diff --git a/p2p/kademlia/rq_symbols.go b/p2p/kademlia/rq_symbols.go index fbf6563d..7aa2c578 100644 --- a/p2p/kademlia/rq_symbols.go +++ b/p2p/kademlia/rq_symbols.go @@ -16,7 +16,8 @@ const ( ) func (s *DHT) startStoreSymbolsWorker(ctx context.Context) { - logtrace.Info(ctx, "start delete data worker", logtrace.Fields{logtrace.FieldModule: "p2p"}) + // Minimal visibility for lifecycle + each tick + logtrace.Debug(ctx, "rq_symbols worker started", logtrace.Fields{logtrace.FieldModule: "p2p"}) for { select { @@ -25,7 +26,7 @@ func (s *DHT) startStoreSymbolsWorker(ctx context.Context) { logtrace.Error(ctx, "store symbols", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err}) } case <-ctx.Done(): - logtrace.Error(ctx, "closing store symbols worker", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "rq_symbols worker stopping", logtrace.Fields{logtrace.FieldModule: "p2p"}) return } } @@ -37,13 +38,30 @@ func (s *DHT) storeSymbols(ctx context.Context) error { return fmt.Errorf("get to do store symbol dirs: %w", err) } + // Minimal visibility: how many dirs to process this tick + if len(dirs) > 0 { + logtrace.Info(ctx, "worker: symbols todo", logtrace.Fields{"count": len(dirs)}) + } + for _, dir := range dirs { - logtrace.Info(ctx, "rq_symbols worker: start scanning dir & storing raptorQ symbols", logtrace.Fields{"dir": dir, "txid": dir.TXID}) - if err := s.scanDirAndStoreSymbols(ctx, dir.Dir, dir.TXID); err != nil { - logtrace.Error(ctx, "scan and store symbols", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err}) + // Use txid as correlation id so worker logs join with register flow + wctx := logtrace.CtxWithCorrelationID(ctx, dir.TXID) + // Pre-count symbols in this directory + preCount := -1 + if set, rerr := utils.ReadDirFilenames(dir.Dir); rerr == nil { + preCount = len(set) } - - logtrace.Info(ctx, "rq_symbols worker: scanned dir & stored raptorQ symbols", logtrace.Fields{"dir": dir, "txid": dir.TXID}) + start := time.Now() + logtrace.Info(wctx, "worker: dir start", logtrace.Fields{"dir": dir.Dir, "txid": dir.TXID, "symbols": preCount}) + if err := s.scanDirAndStoreSymbols(wctx, dir.Dir, dir.TXID); err != nil { + logtrace.Error(wctx, "scan and store symbols", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err}) + } + // Post-count remaining symbols + remCount := -1 + if set, rerr := utils.ReadDirFilenames(dir.Dir); rerr == nil { + remCount = len(set) + } + logtrace.Info(wctx, "worker: dir done", logtrace.Fields{"dir": dir.Dir, "txid": dir.TXID, "remaining": remCount, "ms": time.Since(start).Milliseconds()}) } return nil @@ -74,7 +92,7 @@ func (s *DHT) scanDirAndStoreSymbols(ctx context.Context, dir, txid string) erro if end > len(keys) { end = len(keys) } - if err := s.storeSymbolsInP2P(ctx, dir, keys[start:end]); err != nil { + if err := s.storeSymbolsInP2P(ctx, txid, dir, keys[start:end]); err != nil { return err } start = end @@ -90,16 +108,22 @@ func (s *DHT) scanDirAndStoreSymbols(ctx context.Context, dir, txid string) erro // --------------------------------------------------------------------- // 2. Load → StoreBatch → Delete for a slice of keys // --------------------------------------------------------------------- -func (s *DHT) storeSymbolsInP2P(ctx context.Context, dir string, keys []string) error { +func (s *DHT) storeSymbolsInP2P(ctx context.Context, txid, dir string, keys []string) error { + // Per-batch visibility for background worker + logtrace.Info(ctx, "worker: batch send", logtrace.Fields{"dir": dir, "keys": len(keys), logtrace.FieldTaskID: txid}) + + start := time.Now() loaded, err := utils.LoadSymbols(dir, keys) if err != nil { return fmt.Errorf("load symbols: %w", err) } - if err := s.StoreBatch(ctx, loaded, 1, dir); err != nil { + if err := s.StoreBatch(ctx, loaded, 1, txid); err != nil { return fmt.Errorf("p2p store batch: %w", err) } + logtrace.Info(ctx, "worker: batch ok", logtrace.Fields{"dir": dir, "keys": len(loaded), "ms": time.Since(start).Milliseconds(), logtrace.FieldTaskID: txid}) + if err := utils.DeleteSymbols(ctx, dir, keys); err != nil { return fmt.Errorf("delete symbols: %w", err) } diff --git a/p2p/kademlia/store/meta/meta.go b/p2p/kademlia/store/meta/meta.go index fa75dc81..c57d05a4 100644 --- a/p2p/kademlia/store/meta/meta.go +++ b/p2p/kademlia/store/meta/meta.go @@ -67,7 +67,7 @@ func NewStore(ctx context.Context, dataDir string) (*Store, error) { quit: make(chan bool), } - logtrace.Info(ctx, fmt.Sprintf("p2p data dir: %v", dataDir), logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, fmt.Sprintf("p2p data dir: %v", dataDir), logtrace.Fields{logtrace.FieldModule: "p2p"}) if _, err := os.Stat(dataDir); os.IsNotExist(err) { if err := os.MkdirAll(dataDir, 0750); err != nil { return nil, fmt.Errorf("mkdir %q: %w", dataDir, err) @@ -185,10 +185,10 @@ func (s *Store) startCheckpointWorker(ctx context.Context) { select { case <-ctx.Done(): - logtrace.Info(ctx, "Stopping checkpoint worker because of context cancel", logtrace.Fields{}) + logtrace.Debug(ctx, "Stopping checkpoint worker because of context cancel", logtrace.Fields{}) return case <-s.worker.quit: - logtrace.Info(ctx, "Stopping checkpoint worker because of quit signal", logtrace.Fields{}) + logtrace.Debug(ctx, "Stopping checkpoint worker because of quit signal", logtrace.Fields{}) return default: } @@ -204,10 +204,10 @@ func (s *Store) start(ctx context.Context) { logtrace.Error(ctx, "Failed to perform job", logtrace.Fields{logtrace.FieldError: err}) } case <-s.worker.quit: - logtrace.Info(ctx, "exit sqlite meta db worker - quit signal received", logtrace.Fields{}) + logtrace.Debug(ctx, "exit sqlite meta db worker - quit signal received", logtrace.Fields{}) return case <-ctx.Done(): - logtrace.Info(ctx, "exit sqlite meta db worker- ctx done signal received", logtrace.Fields{}) + logtrace.Debug(ctx, "exit sqlite meta db worker- ctx done signal received", logtrace.Fields{}) return } } diff --git a/p2p/kademlia/store/sqlite/meta_worker.go b/p2p/kademlia/store/sqlite/meta_worker.go index eb7a968f..6d1207df 100644 --- a/p2p/kademlia/store/sqlite/meta_worker.go +++ b/p2p/kademlia/store/sqlite/meta_worker.go @@ -124,7 +124,7 @@ func NewMigrationMetaStore(ctx context.Context, dataDir string, cloud cloud.Stor go handler.startLastAccessedUpdateWorker(ctx) go handler.startInsertWorker(ctx) go handler.startMigrationExecutionWorker(ctx) - logtrace.Info(ctx, "MigrationMetaStore workers started", logtrace.Fields{}) + logtrace.Debug(ctx, "MigrationMetaStore workers started", logtrace.Fields{}) return handler, nil } @@ -348,7 +348,7 @@ func (d *MigrationMetaStore) startLastAccessedUpdateWorker(ctx context.Context) case <-d.updateTicker.C: d.commitLastAccessedUpdates(ctx) case <-ctx.Done(): - logtrace.Info(ctx, "Shutting down last accessed update worker", logtrace.Fields{}) + logtrace.Debug(ctx, "Shutting down last accessed update worker", logtrace.Fields{}) return } } @@ -414,7 +414,7 @@ func (d *MigrationMetaStore) commitLastAccessedUpdates(ctx context.Context) { d.updates.Delete(k) } - logtrace.Info(ctx, "Committed last accessed updates", logtrace.Fields{"count": len(keysToUpdate)}) + logtrace.Debug(ctx, "Committed last accessed updates", logtrace.Fields{"count": len(keysToUpdate)}) } func PostKeysInsert(updates []UpdateMessage) { @@ -437,7 +437,7 @@ func (d *MigrationMetaStore) startInsertWorker(ctx context.Context) { case <-d.insertTicker.C: d.commitInserts(ctx) case <-ctx.Done(): - logtrace.Info(ctx, "Shutting down insert meta keys worker", logtrace.Fields{}) + logtrace.Debug(ctx, "Shutting down insert meta keys worker", logtrace.Fields{}) d.commitInserts(ctx) return } @@ -501,7 +501,7 @@ func (d *MigrationMetaStore) commitInserts(ctx context.Context) { d.inserts.Delete(k) } - logtrace.Info(ctx, "Committed inserts", logtrace.Fields{"count": len(keysToUpdate)}) + logtrace.Debug(ctx, "Committed inserts", logtrace.Fields{"count": len(keysToUpdate)}) } // startMigrationExecutionWorker starts the worker that executes a migration @@ -511,7 +511,7 @@ func (d *MigrationMetaStore) startMigrationExecutionWorker(ctx context.Context) case <-d.migrationExecutionTicker.C: d.checkAndExecuteMigration(ctx) case <-ctx.Done(): - logtrace.Info(ctx, "Shutting down data migration worker", logtrace.Fields{}) + logtrace.Debug(ctx, "Shutting down data migration worker", logtrace.Fields{}) return } } @@ -544,7 +544,7 @@ func (d *MigrationMetaStore) checkAndExecuteMigration(ctx context.Context) { //return //} - logtrace.Info(ctx, "Starting data migration", logtrace.Fields{"islow": isLow}) + logtrace.Debug(ctx, "Starting data migration", logtrace.Fields{"islow": isLow}) // Step 1: Fetch pending migrations var migrations Migrations @@ -553,11 +553,11 @@ func (d *MigrationMetaStore) checkAndExecuteMigration(ctx context.Context) { logtrace.Error(ctx, "Failed to fetch pending migrations", logtrace.Fields{logtrace.FieldError: err}) return } - logtrace.Info(ctx, "Fetched pending migrations", logtrace.Fields{"count": len(migrations)}) + logtrace.Debug(ctx, "Fetched pending migrations", logtrace.Fields{"count": len(migrations)}) // Iterate over each migration for _, migration := range migrations { - logtrace.Info(ctx, "Processing migration", logtrace.Fields{"migration_id": migration.ID}) + logtrace.Debug(ctx, "Processing migration", logtrace.Fields{"migration_id": migration.ID}) if err := d.ProcessMigrationInBatches(ctx, migration); err != nil { logtrace.Error(ctx, "Failed to process migration", logtrace.Fields{logtrace.FieldError: err, "migration_id": migration.ID}) @@ -579,7 +579,7 @@ func (d *MigrationMetaStore) ProcessMigrationInBatches(ctx context.Context, migr } if totalKeys < minKeysToMigrate { - logtrace.Info(ctx, "Skipping migration due to insufficient keys", logtrace.Fields{"migration_id": migration.ID, "keys-count": totalKeys}) + logtrace.Debug(ctx, "Skipping migration due to insufficient keys", logtrace.Fields{"migration_id": migration.ID, "keys-count": totalKeys}) return nil } @@ -630,7 +630,7 @@ func (d *MigrationMetaStore) ProcessMigrationInBatches(ctx context.Context, migr } } - logtrace.Info(ctx, "Migration processed successfully", logtrace.Fields{"migration_id": migration.ID, "tota-keys-count": totalKeys, "migrated_in_current_iteration": nonMigratedKeys}) + logtrace.Debug(ctx, "Migration processed successfully", logtrace.Fields{"migration_id": migration.ID, "tota-keys-count": totalKeys, "migrated_in_current_iteration": nonMigratedKeys}) return nil } @@ -683,7 +683,7 @@ func (d *MigrationMetaStore) uploadInBatches(ctx context.Context, keys []string, continue } - logtrace.Info(ctx, "Successfully uploaded and deleted records for batch", logtrace.Fields{"batch": i + 1, "total_batches": batches}) + logtrace.Debug(ctx, "Successfully uploaded and deleted records for batch", logtrace.Fields{"batch": i + 1, "total_batches": batches}) } return lastError @@ -823,7 +823,7 @@ func (d *MigrationMetaStore) InsertMetaMigrationData(ctx context.Context, migrat func (d *MigrationMetaStore) batchSetMigrated(keys []string) error { if len(keys) == 0 { // log.P2P().Info("no keys provided for batch update (is_migrated)") - logtrace.Info(context.Background(), "No keys provided for batch update (is_migrated)", logtrace.Fields{}) + logtrace.Debug(context.Background(), "No keys provided for batch update (is_migrated)", logtrace.Fields{}) return nil } diff --git a/p2p/kademlia/store/sqlite/sqlite.go b/p2p/kademlia/store/sqlite/sqlite.go index 71224a57..d38661d1 100644 --- a/p2p/kademlia/store/sqlite/sqlite.go +++ b/p2p/kademlia/store/sqlite/sqlite.go @@ -293,10 +293,10 @@ func (s *Store) startCheckpointWorker(ctx context.Context) { select { case <-ctx.Done(): - logtrace.Info(ctx, "Stopping checkpoint worker because of context cancel", logtrace.Fields{}) + logtrace.Debug(ctx, "Stopping checkpoint worker because of context cancel", logtrace.Fields{}) return case <-s.worker.quit: - logtrace.Info(ctx, "Stopping checkpoint worker because of quit signal", logtrace.Fields{}) + logtrace.Debug(ctx, "Stopping checkpoint worker because of quit signal", logtrace.Fields{}) return default: } @@ -312,10 +312,10 @@ func (s *Store) start(ctx context.Context) { logtrace.Error(ctx, "Failed to perform job", logtrace.Fields{logtrace.FieldError: err.Error()}) } case <-s.worker.quit: - logtrace.Info(ctx, "exit sqlite db worker - quit signal received", logtrace.Fields{}) + logtrace.Debug(ctx, "exit sqlite db worker - quit signal received", logtrace.Fields{}) return case <-ctx.Done(): - logtrace.Info(ctx, "exit sqlite db worker- ctx done signal received", logtrace.Fields{}) + logtrace.Debug(ctx, "exit sqlite db worker- ctx done signal received", logtrace.Fields{}) return } } @@ -737,11 +737,11 @@ func (s *Store) GetOwnCreatedAt(ctx context.Context) (time.Time, error) { func (s *Store) GetLocalKeys(from time.Time, to time.Time) ([]string, error) { var keys []string ctx := context.Background() - logtrace.Info(ctx, "getting all keys for SC", logtrace.Fields{}) + logtrace.Debug(ctx, "getting all keys for SC", logtrace.Fields{}) if err := s.db.SelectContext(ctx, &keys, `SELECT key FROM data WHERE createdAt > ? and createdAt < ?`, from, to); err != nil { return keys, fmt.Errorf("error reading all keys from database: %w", err) } - logtrace.Info(ctx, "got all keys for SC", logtrace.Fields{}) + logtrace.Debug(ctx, "got all keys for SC", logtrace.Fields{}) return keys, nil } @@ -762,7 +762,7 @@ func stringArgsToInterface(args []string) []interface{} { func batchDeleteRecords(db *sqlx.DB, keys []string) error { if len(keys) == 0 { - logtrace.Info(context.Background(), "no keys provided for batch delete", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(context.Background(), "no keys provided for batch delete", logtrace.Fields{logtrace.FieldModule: "p2p"}) return nil } total := int64(0) @@ -784,7 +784,7 @@ func batchDeleteRecords(db *sqlx.DB, keys []string) error { func batchSetMigratedRecords(db *sqlx.DB, keys []string) error { if len(keys) == 0 { - logtrace.Info(context.Background(), "no keys provided for batch update (migrated)", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(context.Background(), "no keys provided for batch update (migrated)", logtrace.Fields{logtrace.FieldModule: "p2p"}) return nil } total := int64(0) diff --git a/p2p/kademlia/version_gate.go b/p2p/kademlia/version_gate.go new file mode 100644 index 00000000..d2d1a755 --- /dev/null +++ b/p2p/kademlia/version_gate.go @@ -0,0 +1,112 @@ +package kademlia + +import ( + "strconv" + "strings" +) + +// localVer is the advertised version of this binary (e.g., v1.2.3), +// injected by the caller (supernode/cmd) at startup. +var localVer string + +// minVer is the optional minimum peer version to accept. If empty, gating is disabled. +var minVer string + +// SetLocalVersion sets the version this node advertises to peers. +func SetLocalVersion(v string) { + localVer = strings.TrimSpace(v) +} + +// SetMinVersion sets the optional minimum required peer version for DHT interactions. +// When empty, version gating is disabled and all peers are accepted regardless of version string. +func SetMinVersion(v string) { + minVer = strings.TrimSpace(v) +} + +// localVersion returns the configured advertised version. +func localVersion() string { return localVer } + +// minimumVersion returns the configured minimum acceptable version; empty disables gating. +func minimumVersion() string { return minVer } + +// versionTooOld reports whether the peerVersion is below the configured minimum version. +// If no minimum is configured, gating is disabled and this returns ("", false). +func versionTooOld(peerVersion string) (minRequired string, tooOld bool) { + minRequired = minimumVersion() + if strings.TrimSpace(minRequired) == "" { + // Gating disabled + return "", false + } + + // Normalize inputs (strip leading 'v' and pre-release/build metadata) + p, okP := parseSemver(peerVersion) + m, okM := parseSemver(minRequired) + if !okM { + // Misconfigured minimum; disable gating to avoid accidental network splits. + return "", false + } + if !okP { + // Peer did not provide a valid version; treat as too old under a min-version policy. + return minRequired, true + } + // Compare peer >= min + if p[0] < m[0] { + return minRequired, true + } + if p[0] > m[0] { + return minRequired, false + } + if p[1] < m[1] { + return minRequired, true + } + if p[1] > m[1] { + return minRequired, false + } + if p[2] < m[2] { + return minRequired, true + } + return minRequired, false +} + +// parseSemver parses versions like "v1.2.3", "1.2.3-alpha" into [major, minor, patch]. +// Returns ok=false if no numeric major part is found. +func parseSemver(v string) ([3]int, bool) { + var out [3]int + s := strings.TrimSpace(v) + if s == "" { + return out, false + } + if s[0] == 'v' || s[0] == 'V' { + s = s[1:] + } + // Drop pre-release/build metadata + if i := strings.IndexAny(s, "-+"); i >= 0 { + s = s[:i] + } + parts := strings.Split(s, ".") + if len(parts) == 0 { + return out, false + } + // Parse up to 3 numeric parts; missing parts default to 0 + for i := 0; i < len(parts) && i < 3; i++ { + numStr := parts[i] + // Trim non-digit suffixes (e.g., "1rc1" -> "1") + j := 0 + for j < len(numStr) && numStr[j] >= '0' && numStr[j] <= '9' { + j++ + } + if j == 0 { + // No leading digits + if i == 0 { + return out, false + } + break + } + n, err := strconv.Atoi(numStr[:j]) + if err != nil { + return out, false + } + out[i] = n + } + return out, true +} diff --git a/p2p/mocks/Client.go b/p2p/mocks/Client.go index 67991025..273eb00a 100644 --- a/p2p/mocks/Client.go +++ b/p2p/mocks/Client.go @@ -16,7 +16,7 @@ type Client struct { } // BatchRetrieve provides a mock function with given fields: ctx, keys, reqCount, txID, localOnly -func (_m *Client) BatchRetrieve(ctx context.Context, keys []string, reqCount int, txID string, localOnly ...bool) (map[string][]byte, error) { +func (_m *Client) BatchRetrieve(ctx context.Context, keys []string, reqCount int, txID string, writer func(string, []byte) error, localOnly ...bool) (map[string][]byte, error) { _va := make([]interface{}, len(localOnly)) for _i := range localOnly { _va[_i] = localOnly[_i] diff --git a/p2p/p2p.go b/p2p/p2p.go index e3d6b40a..3d84042d 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -64,7 +64,7 @@ func (s *p2p) Run(ctx context.Context) error { logtrace.Error(ctx, "failed to run kadmelia, retrying.", logtrace.Fields{logtrace.FieldModule: "p2p", logtrace.FieldError: err}) } else { - logtrace.Info(ctx, "kadmelia started successfully", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "kadmelia started successfully", logtrace.Fields{logtrace.FieldModule: "p2p"}) return nil } } @@ -74,7 +74,7 @@ func (s *p2p) Run(ctx context.Context) error { // run the kademlia network func (s *p2p) run(ctx context.Context) error { - logtrace.Info(ctx, "Running kademlia network", logtrace.Fields{logtrace.FieldModule: "p2p"}) + logtrace.Debug(ctx, "Running kademlia network", logtrace.Fields{logtrace.FieldModule: "p2p"}) // configure the kademlia dht for p2p service if err := s.configure(ctx); err != nil { return errors.Errorf("configure kademlia dht: %w", err) @@ -95,7 +95,7 @@ func (s *p2p) run(ctx context.Context) error { } s.running = true - logtrace.Info(ctx, "p2p service is started", logtrace.Fields{}) + logtrace.Debug(ctx, "p2p service is started", logtrace.Fields{}) // block until context is done <-ctx.Done() @@ -103,7 +103,7 @@ func (s *p2p) run(ctx context.Context) error { // stop the node for kademlia network s.dht.Stop(ctx) - logtrace.Info(ctx, "p2p service is stopped", logtrace.Fields{}) + logtrace.Debug(ctx, "p2p service is stopped", logtrace.Fields{}) return nil } @@ -137,13 +137,13 @@ func (s *p2p) Retrieve(ctx context.Context, key string, localOnly ...bool) ([]by } // BatchRetrieve retrive the data from the kademlia network -func (s *p2p) BatchRetrieve(ctx context.Context, keys []string, reqCount int, txID string, localOnly ...bool) (map[string][]byte, error) { +func (s *p2p) BatchRetrieve(ctx context.Context, keys []string, reqCount int, txID string, writer func(symbolID string, data []byte) error, localOnly ...bool) (map[string][]byte, error) { if !s.running { return nil, errors.New("p2p service is not running") } - return s.dht.BatchRetrieve(ctx, keys, int32(reqCount), txID, localOnly...) + return s.dht.BatchRetrieve(ctx, keys, int32(reqCount), txID, writer, localOnly...) } // Delete delete key in queries node diff --git a/pkg/cascade/signature.go b/pkg/cascade/signature.go deleted file mode 100644 index 4cb83cf1..00000000 --- a/pkg/cascade/signature.go +++ /dev/null @@ -1,134 +0,0 @@ -package cascade - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "io" - - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/keyring" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/cosmos/btcutil/base58" - cosmoskeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" - "lukechampine.com/blake3" -) - -// CreateLayoutSignature creates the cascade signature format for a given layout file. -// It returns the signature format and index file IDs needed for CASCADE action. -func CreateLayoutSignature(metadataFile codec.Layout, kr cosmoskeyring.Keyring, userKeyName string, ic uint32, maxFiles uint32) (signatureFormat string, indexFileIDs []string, err error) { - // Step 1: Convert metadata to JSON then base64 - me, err := json.Marshal(metadataFile) - if err != nil { - return "", nil, fmt.Errorf("failed to marshal metadata: %w", err) - } - layoutBase64 := base64.StdEncoding.EncodeToString(me) - - // Step 2: Sign the layout data - layoutSignature, err := keyring.SignBytes(kr, userKeyName, []byte(layoutBase64)) - if err != nil { - return "", nil, fmt.Errorf("failed to sign layout: %w", err) - } - layoutSignatureB64 := base64.StdEncoding.EncodeToString(layoutSignature) - - // Step 3: Generate redundant layout file IDs - layoutIDs := GenerateLayoutIDsBatch(layoutBase64, layoutSignatureB64, ic, maxFiles) - - // Step 4: Create index file containing layout references - indexFile := map[string]interface{}{ - "layout_ids": layoutIDs, - "layout_signature": layoutSignatureB64, - } - - // Step 5: Sign the index file - indexFileJSON, err := json.Marshal(indexFile) - if err != nil { - return "", nil, fmt.Errorf("failed to marshal index file: %w", err) - } - indexFileBase64 := base64.StdEncoding.EncodeToString(indexFileJSON) - - creatorSignature, err := keyring.SignBytes(kr, userKeyName, []byte(indexFileBase64)) - if err != nil { - return "", nil, fmt.Errorf("failed to sign index file: %w", err) - } - creatorSignatureB64 := base64.StdEncoding.EncodeToString(creatorSignature) - - // Step 6: Create final signature format - signatureFormat = fmt.Sprintf("%s.%s", indexFileBase64, creatorSignatureB64) - - // Step 7: Generate final index file IDs for submission - indexFileIDs = GenerateIndexIDsBatch(signatureFormat, ic, maxFiles) - - return signatureFormat, indexFileIDs, nil -} - -// GenerateLayoutIDsBatch generates layout IDs using the process: -// combine data -> add counter -> compress -> hash -> Base58 encode -func GenerateLayoutIDsBatch(layoutBase64, layoutSignatureB64 string, ic, maxFiles uint32) []string { - layoutWithSig := fmt.Sprintf("%s.%s", layoutBase64, layoutSignatureB64) - layoutIDs := make([]string, maxFiles) - - var buffer bytes.Buffer - buffer.Grow(len(layoutWithSig) + 10) - - for i := uint32(0); i < maxFiles; i++ { - // Build unique content with counter - buffer.Reset() - buffer.WriteString(layoutWithSig) - buffer.WriteByte('.') - buffer.WriteString(fmt.Sprintf("%d", ic+i)) - - // Compress for efficiency - compressedData, err := utils.ZstdCompress(buffer.Bytes()) - if err != nil { - continue - } - - // Hash for uniqueness - hash, err := utils.Blake3Hash(compressedData) - if err != nil { - continue - } - - // Base58 encode for readable ID - layoutIDs[i] = base58.Encode(hash) - } - - return layoutIDs -} - -// GenerateIndexIDsBatch generates index file IDs using same process as layout IDs -func GenerateIndexIDsBatch(signatureFormat string, ic, maxFiles uint32) []string { - indexFileIDs := make([]string, maxFiles) - - var buffer bytes.Buffer - buffer.Grow(len(signatureFormat) + 10) - - for i := uint32(0); i < maxFiles; i++ { - buffer.Reset() - buffer.WriteString(signatureFormat) - buffer.WriteByte('.') - buffer.WriteString(fmt.Sprintf("%d", ic+i)) - - compressedData, err := utils.ZstdCompress(buffer.Bytes()) - if err != nil { - continue - } - hash, err := utils.Blake3Hash(compressedData) - if err != nil { - continue - } - indexFileIDs[i] = base58.Encode(hash) - } - return indexFileIDs -} - -// ComputeBlake3Hash computes Blake3 hash of the given message -func ComputeBlake3Hash(msg []byte) ([]byte, error) { - hasher := blake3.New(32, nil) - if _, err := io.Copy(hasher, bytes.NewReader(msg)); err != nil { - return nil, err - } - return hasher.Sum(nil), nil -} \ No newline at end of file diff --git a/pkg/cascadekit/cascadekit_test.go b/pkg/cascadekit/cascadekit_test.go new file mode 100644 index 00000000..d3299705 --- /dev/null +++ b/pkg/cascadekit/cascadekit_test.go @@ -0,0 +1,66 @@ +package cascadekit + +import ( + "encoding/base64" + "testing" + + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/klauspost/compress/zstd" +) + +func TestExtractIndexAndCreatorSig_Strict(t *testing.T) { + // too few parts + if _, _, err := ExtractIndexAndCreatorSig("abc"); err == nil { + t.Fatalf("expected error for single segment") + } + // too many parts + if _, _, err := ExtractIndexAndCreatorSig("a.b.c"); err == nil { + t.Fatalf("expected error for three segments") + } + // exactly two parts + a, b, err := ExtractIndexAndCreatorSig("a.b") + if err != nil || a != "a" || b != "b" { + t.Fatalf("unexpected result: a=%q b=%q err=%v", a, b, err) + } +} + +func TestParseCompressedIndexFile_Strict(t *testing.T) { + idx := IndexFile{LayoutIDs: []string{"L1", "L2"}, LayoutSignature: base64.StdEncoding.EncodeToString([]byte("sig"))} + idxB64, err := EncodeIndexB64(idx) + if err != nil { + t.Fatalf("encode index: %v", err) + } + payload := []byte(idxB64 + "." + base64.StdEncoding.EncodeToString([]byte("sig2")) + ".0") + + enc, _ := zstd.NewWriter(nil) + defer enc.Close() + compressed := enc.EncodeAll(payload, nil) + + got, err := ParseCompressedIndexFile(compressed) + if err != nil { + t.Fatalf("parse compressed index: %v", err) + } + if got.LayoutSignature != idx.LayoutSignature || len(got.LayoutIDs) != 2 { + t.Fatalf("unexpected index decoded: %+v", got) + } + + // malformed: only two segments + compressedBad := enc.EncodeAll([]byte("a.b"), nil) + if _, err := ParseCompressedIndexFile(compressedBad); err == nil { + t.Fatalf("expected error for two segments") + } + // malformed: four segments + compressedBad4 := enc.EncodeAll([]byte("a.b.c.d"), nil) + if _, err := ParseCompressedIndexFile(compressedBad4); err == nil { + t.Fatalf("expected error for four segments") + } +} + +func TestVerifySingleBlock(t *testing.T) { + if err := VerifySingleBlock(codec.Layout{Blocks: []codec.Block{{}}}); err != nil { + t.Fatalf("unexpected error for single block: %v", err) + } + if err := VerifySingleBlock(codec.Layout{Blocks: []codec.Block{{}, {}}}); err == nil { + t.Fatalf("expected error for multi-block layout") + } +} diff --git a/pkg/cascadekit/doc.go b/pkg/cascadekit/doc.go new file mode 100644 index 00000000..326ed87c --- /dev/null +++ b/pkg/cascadekit/doc.go @@ -0,0 +1,16 @@ +// Package cascadekit provides small, pure utilities for generating, +// parsing, signing and validating Cascade artefacts used by the supernode +// register/download flows. +// +// Scope: +// - Build and sign layout metadata (RaptorQ layout) and index files +// - Generate redundant metadata files and index files + their IDs +// - Extract and decode index payloads from the on-chain index signature format string +// - Compute data hashes for request metadata +// - Verify single-block layout consistency (explicit error if more than 1 block) +// +// Non-goals: +// - No network or chain dependencies (verification is left to callers) +// - No logging; keep functions small and deterministic +// - No orchestration helpers; this package exposes building blocks only +package cascadekit diff --git a/pkg/cascadekit/hash.go b/pkg/cascadekit/hash.go new file mode 100644 index 00000000..811f32cf --- /dev/null +++ b/pkg/cascadekit/hash.go @@ -0,0 +1,17 @@ +package cascadekit + +import ( + "encoding/base64" + + "github.com/LumeraProtocol/supernode/v2/pkg/utils" +) + +// ComputeBlake3DataHashB64 computes a Blake3 hash of the input and +// returns it as a base64-encoded string. +func ComputeBlake3DataHashB64(data []byte) (string, error) { + h, err := utils.Blake3Hash(data) + if err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(h), nil +} diff --git a/pkg/cascadekit/ids.go b/pkg/cascadekit/ids.go new file mode 100644 index 00000000..bd9540c9 --- /dev/null +++ b/pkg/cascadekit/ids.go @@ -0,0 +1,103 @@ +package cascadekit + +import ( + "bytes" + "strconv" + + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/cosmos/btcutil/base58" + "github.com/klauspost/compress/zstd" +) + +// GenerateLayoutIDs computes IDs for redundant layout files (not the final index IDs). +// The ID is base58(blake3(zstd(layout_signature_format.counter))). +// layoutSignatureFormat must be: base64(JSON(layout)).layout_signature_base64 +func GenerateLayoutIDs(layoutSignatureFormat string, ic, max uint32) ([]string, error) { + return generateIDs([]byte(layoutSignatureFormat), ic, max) +} + +// GenerateIndexIDs computes IDs for index files from the full index signature format string. +func GenerateIndexIDs(indexSignatureFormat string, ic, max uint32) ([]string, error) { + return generateIDs([]byte(indexSignatureFormat), ic, max) +} + +// getIDFiles generates ID files by appending a '.' and counter, compressing, +// and returning both IDs and compressed payloads. +// generateIDFiles builds compressed ID files from a base payload and returns +// both their content-addressed IDs and the compressed files themselves. +// For each counter in [ic..ic+max-1], the payload is: +// +// base + '.' + counter +// +// then zstd-compressed; the ID is base58(blake3(compressed)). +func generateIDFiles(base []byte, ic uint32, max uint32) (ids []string, files [][]byte, err error) { + idFiles := make([][]byte, 0, max) + ids = make([]string, 0, max) + var buffer bytes.Buffer + + // Reuse a single zstd encoder across iterations + enc, zerr := zstd.NewWriter(nil) + if zerr != nil { + return ids, idFiles, errors.Errorf("compress identifiers file: %w", zerr) + } + defer enc.Close() + + for i := uint32(0); i < max; i++ { + buffer.Reset() + counter := ic + i + + buffer.Write(base) + buffer.WriteByte(SeparatorByte) + // Append counter efficiently without intermediate string + var tmp [20]byte + cnt := strconv.AppendUint(tmp[:0], uint64(counter), 10) + buffer.Write(cnt) + + compressedData := enc.EncodeAll(buffer.Bytes(), nil) + + idFiles = append(idFiles, compressedData) + + hash, err := utils.Blake3Hash(compressedData) + if err != nil { + return ids, idFiles, errors.Errorf("blake3 hash error getting an id file: %w", err) + } + + ids = append(ids, base58.Encode(hash)) + } + + return ids, idFiles, nil +} + +// generateIDs computes base58(blake3(zstd(base + '.' + counter))) for counters ic..ic+max-1. +// It reuses a single zstd encoder and avoids per-iteration heap churn. +func generateIDs(base []byte, ic, max uint32) ([]string, error) { + ids := make([]string, max) + + var buffer bytes.Buffer + // Reserve base length + dot + up to 10 digits + buffer.Grow(len(base) + 12) + + enc, err := zstd.NewWriter(nil) + if err != nil { + return nil, errors.Errorf("zstd encoder init: %w", err) + } + defer enc.Close() + + for i := uint32(0); i < max; i++ { + buffer.Reset() + buffer.Write(base) + buffer.WriteByte(SeparatorByte) + var tmp [20]byte + cnt := strconv.AppendUint(tmp[:0], uint64(ic+i), 10) + buffer.Write(cnt) + + compressed := enc.EncodeAll(buffer.Bytes(), nil) + h, err := utils.Blake3Hash(compressed) + if err != nil { + return nil, errors.Errorf("blake3 hash (i=%d): %w", i, err) + } + ids[i] = base58.Encode(h) + } + return ids, nil +} diff --git a/pkg/cascadekit/index.go b/pkg/cascadekit/index.go new file mode 100644 index 00000000..456b365f --- /dev/null +++ b/pkg/cascadekit/index.go @@ -0,0 +1,57 @@ +package cascadekit + +import ( + "encoding/base64" + "encoding/json" + "strings" + + "github.com/LumeraProtocol/supernode/v2/pkg/errors" +) + +// SeparatorByte is the '.' separator used when composing payloads with counters. +const SeparatorByte byte = 46 + +// IndexFile represents the structure of the index file referenced on-chain. +// The JSON fields must match the existing format. +type IndexFile struct { + Version int `json:"version,omitempty"` + LayoutIDs []string `json:"layout_ids"` + LayoutSignature string `json:"layout_signature"` +} + +// BuildIndex creates an IndexFile from layout IDs and the layout signature. +func BuildIndex(layoutIDs []string, layoutSigB64 string) IndexFile { + return IndexFile{LayoutIDs: layoutIDs, LayoutSignature: layoutSigB64} +} + +// EncodeIndexB64 marshals an index file and returns its base64-encoded JSON. +func EncodeIndexB64(idx IndexFile) (string, error) { + raw, err := json.Marshal(idx) + if err != nil { + return "", errors.Errorf("marshal index file: %w", err) + } + return base64.StdEncoding.EncodeToString(raw), nil +} + +// DecodeIndexB64 decodes base64(JSON(IndexFile)). +func DecodeIndexB64(data string) (IndexFile, error) { + var indexFile IndexFile + decodedData, err := base64.StdEncoding.DecodeString(data) + if err != nil { + return indexFile, errors.Errorf("failed to decode index file: %w", err) + } + if err := json.Unmarshal(decodedData, &indexFile); err != nil { + return indexFile, errors.Errorf("failed to unmarshal index file: %w", err) + } + return indexFile, nil +} + +// ExtractIndexAndCreatorSig splits a signature-format string formatted as: +// Base64(index_json).Base64(creator_signature) +func ExtractIndexAndCreatorSig(indexSignatureFormat string) (indexB64 string, creatorSigB64 string, err error) { + parts := strings.Split(indexSignatureFormat, ".") + if len(parts) != 2 { + return "", "", errors.New("invalid index signature format: expected 2 segments (index_b64.creator_sig_b64)") + } + return parts[0], parts[1], nil +} diff --git a/pkg/cascadekit/index_parse.go b/pkg/cascadekit/index_parse.go new file mode 100644 index 00000000..342728d6 --- /dev/null +++ b/pkg/cascadekit/index_parse.go @@ -0,0 +1,22 @@ +package cascadekit + +import ( + "bytes" + + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" +) + +// ParseCompressedIndexFile parses a compressed index file into an IndexFile. +// The compressed format is: base64(IndexJSON).creator_signature.counter +func ParseCompressedIndexFile(data []byte) (IndexFile, error) { + decompressed, err := utils.ZstdDecompress(data) + if err != nil { + return IndexFile{}, errors.Errorf("decompress index file: %w", err) + } + parts := bytes.Split(decompressed, []byte{SeparatorByte}) + if len(parts) != 3 { + return IndexFile{}, errors.New("invalid index file format: expected 3 parts (index_b64.creator_sig_b64.counter)") + } + return DecodeIndexB64(string(parts[0])) +} diff --git a/pkg/cascadekit/keyring_signatures.go b/pkg/cascadekit/keyring_signatures.go new file mode 100644 index 00000000..968af4b5 --- /dev/null +++ b/pkg/cascadekit/keyring_signatures.go @@ -0,0 +1,14 @@ +package cascadekit + +import ( + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + keyringpkg "github.com/LumeraProtocol/supernode/v2/pkg/keyring" + cosmoskeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" +) + +// CreateSignaturesWithKeyring signs layout and index using a Cosmos keyring. +// These helpers centralize keyring-backed signing for clarity. +func CreateSignaturesWithKeyring(layout codec.Layout, kr cosmoskeyring.Keyring, keyName string, ic, max uint32) (string, []string, error) { + signer := func(msg []byte) ([]byte, error) { return keyringpkg.SignBytes(kr, keyName, msg) } + return CreateSignatures(layout, signer, ic, max) +} diff --git a/pkg/cascadekit/metadata.go b/pkg/cascadekit/metadata.go new file mode 100644 index 00000000..a77ddfd4 --- /dev/null +++ b/pkg/cascadekit/metadata.go @@ -0,0 +1,17 @@ +package cascadekit + +import ( + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" +) + +// NewCascadeMetadata creates a types.CascadeMetadata for RequestAction. +// The keeper will populate rq_ids_max; rq_ids_ids is for FinalizeAction only. +func NewCascadeMetadata(dataHashB64, fileName string, rqIdsIc uint64, indexSignatureFormat string, public bool) actiontypes.CascadeMetadata { + return actiontypes.CascadeMetadata{ + DataHash: dataHashB64, + FileName: fileName, + RqIdsIc: rqIdsIc, + Signatures: indexSignatureFormat, + Public: public, + } +} diff --git a/pkg/cascadekit/metadata_helpers.go b/pkg/cascadekit/metadata_helpers.go new file mode 100644 index 00000000..94a20442 --- /dev/null +++ b/pkg/cascadekit/metadata_helpers.go @@ -0,0 +1,26 @@ +package cascadekit + +import ( + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/golang/protobuf/proto" +) + +// UnmarshalCascadeMetadata decodes action metadata bytes into CascadeMetadata. +func UnmarshalCascadeMetadata(raw []byte) (actiontypes.CascadeMetadata, error) { + var meta actiontypes.CascadeMetadata + if err := proto.Unmarshal(raw, &meta); err != nil { + return meta, errors.Errorf("failed to unmarshal cascade metadata: %w", err) + } + return meta, nil +} + +// VerifyB64DataHash compares a raw hash with an expected base64 string. +func VerifyB64DataHash(raw []byte, expectedB64 string) error { + b64 := utils.B64Encode(raw) + if string(b64) != expectedB64 { + return errors.New("data hash doesn't match") + } + return nil +} diff --git a/pkg/cascadekit/parsers.go b/pkg/cascadekit/parsers.go new file mode 100644 index 00000000..eb90dde0 --- /dev/null +++ b/pkg/cascadekit/parsers.go @@ -0,0 +1,39 @@ +package cascadekit + +import ( + "bytes" + "encoding/json" + + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" +) + +// ParseRQMetadataFile parses a compressed rq metadata file into layout, signature and counter. +// File format: base64(JSON(layout)).signature.counter (all parts separated by '.') +func ParseRQMetadataFile(data []byte) (layout codec.Layout, signature string, counter string, err error) { + decompressed, err := utils.ZstdDecompress(data) + if err != nil { + return layout, "", "", errors.Errorf("decompress rq metadata file: %w", err) + } + + // base64EncodeMetadata.Signature.Counter + parts := bytes.Split(decompressed, []byte{SeparatorByte}) + if len(parts) != 3 { + return layout, "", "", errors.New("invalid rq metadata format: expecting 3 parts (layout, signature, counter)") + } + + layoutJson, err := utils.B64Decode(parts[0]) + if err != nil { + return layout, "", "", errors.Errorf("base64 decode failed: %w", err) + } + + if err := json.Unmarshal(layoutJson, &layout); err != nil { + return layout, "", "", errors.Errorf("unmarshal layout: %w", err) + } + + signature = string(parts[1]) + counter = string(parts[2]) + + return layout, signature, counter, nil +} diff --git a/pkg/cascadekit/request_builder.go b/pkg/cascadekit/request_builder.go new file mode 100644 index 00000000..695e2fdf --- /dev/null +++ b/pkg/cascadekit/request_builder.go @@ -0,0 +1,23 @@ +package cascadekit + +import ( + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + cosmoskeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" +) + +// BuildCascadeRequest builds a Cascade request metadata from layout and file bytes. +// It computes blake3(data) base64, creates the index signature format and index IDs, +// and returns a CascadeMetadata ready for RequestAction. +func BuildCascadeRequest(layout codec.Layout, fileBytes []byte, fileName string, kr cosmoskeyring.Keyring, keyName string, ic, max uint32, public bool) (actiontypes.CascadeMetadata, []string, error) { + dataHashB64, err := ComputeBlake3DataHashB64(fileBytes) + if err != nil { + return actiontypes.CascadeMetadata{}, nil, err + } + indexSignatureFormat, indexIDs, err := CreateSignaturesWithKeyring(layout, kr, keyName, ic, max) + if err != nil { + return actiontypes.CascadeMetadata{}, nil, err + } + meta := NewCascadeMetadata(dataHashB64, fileName, uint64(ic), indexSignatureFormat, public) + return meta, indexIDs, nil +} diff --git a/pkg/cascadekit/rqid.go b/pkg/cascadekit/rqid.go new file mode 100644 index 00000000..8f6a85aa --- /dev/null +++ b/pkg/cascadekit/rqid.go @@ -0,0 +1,27 @@ +package cascadekit + +import ( + "github.com/LumeraProtocol/supernode/v2/pkg/errors" +) + +// GenerateLayoutFilesFromB64 builds redundant metadata files using a precomputed +// base64(JSON(layout)) and the layout signature, avoiding an extra JSON marshal. +// The content is: base64(JSON(layout)).layout_signature +func GenerateLayoutFilesFromB64(layoutB64 []byte, layoutSigB64 string, ic uint32, max uint32) (ids []string, files [][]byte, err error) { + enc := make([]byte, 0, len(layoutB64)+1+len(layoutSigB64)) + enc = append(enc, layoutB64...) + enc = append(enc, SeparatorByte) + enc = append(enc, []byte(layoutSigB64)...) + return generateIDFiles(enc, ic, max) +} + +// GenerateIndexFiles generates index files and their IDs from the full index signature format. +func GenerateIndexFiles(indexSignatureFormat string, ic uint32, max uint32) (indexIDs []string, indexFiles [][]byte, err error) { + // Use the full index signature format that matches what was sent during RequestAction + // The chain expects this exact format for ID generation + indexIDs, indexFiles, err = generateIDFiles([]byte(indexSignatureFormat), ic, max) + if err != nil { + return nil, nil, errors.Errorf("get index ID files: %w", err) + } + return indexIDs, indexFiles, nil +} diff --git a/pkg/cascadekit/serialize.go b/pkg/cascadekit/serialize.go new file mode 100644 index 00000000..21cef3d9 --- /dev/null +++ b/pkg/cascadekit/serialize.go @@ -0,0 +1,29 @@ +package cascadekit + +import ( + "encoding/base64" + "encoding/json" + + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" +) + +// LayoutJSON marshals a codec.Layout using the standard library encoder. +func LayoutJSON(layout codec.Layout) ([]byte, error) { + b, err := json.Marshal(layout) + if err != nil { + return nil, errors.Errorf("marshal layout: %w", err) + } + return b, nil +} + +// LayoutB64 returns base64(JSON(layout)) bytes using encoding/json for deterministic output. +func LayoutB64(layout codec.Layout) ([]byte, error) { + raw, err := LayoutJSON(layout) + if err != nil { + return nil, err + } + out := make([]byte, base64.StdEncoding.EncodedLen(len(raw))) + base64.StdEncoding.Encode(out, raw) + return out, nil +} diff --git a/pkg/cascadekit/signatures.go b/pkg/cascadekit/signatures.go new file mode 100644 index 00000000..b8a02da9 --- /dev/null +++ b/pkg/cascadekit/signatures.go @@ -0,0 +1,85 @@ +package cascadekit + +import ( + "encoding/base64" + "encoding/json" + "fmt" + + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" +) + +// Signer is a function that signs the provided message and returns the raw signature bytes. +type Signer func(msg []byte) ([]byte, error) + +// SignLayoutB64 validates single-block layout, marshals to JSON, base64-encodes it, +// and signs the base64 payload, returning both the layout base64 and signature base64. +func SignLayoutB64(layout codec.Layout, signer Signer) (layoutB64 string, layoutSigB64 string, err error) { + if len(layout.Blocks) != 1 { + return "", "", errors.New("layout must contain exactly one block") + } + + me, err := json.Marshal(layout) + if err != nil { + return "", "", errors.Errorf("marshal layout: %w", err) + } + layoutB64 = base64.StdEncoding.EncodeToString(me) + + sig, err := signer([]byte(layoutB64)) + if err != nil { + return "", "", errors.Errorf("sign layout: %w", err) + } + layoutSigB64 = base64.StdEncoding.EncodeToString(sig) + return layoutB64, layoutSigB64, nil +} + +// SignIndexB64 marshals the index to JSON, base64-encodes it, and signs the +// base64 payload, returning both the index base64 and creator-signature base64. +func SignIndexB64(idx IndexFile, signer Signer) (indexB64 string, creatorSigB64 string, err error) { + raw, err := json.Marshal(idx) + if err != nil { + return "", "", errors.Errorf("marshal index file: %w", err) + } + indexB64 = base64.StdEncoding.EncodeToString(raw) + + sig, err := signer([]byte(indexB64)) + if err != nil { + return "", "", errors.Errorf("sign index: %w", err) + } + creatorSigB64 = base64.StdEncoding.EncodeToString(sig) + return indexB64, creatorSigB64, nil +} + +// CreateSignatures produces the index signature format and index IDs: +// +// Base64(index_json).Base64(creator_signature) +// +// It validates the layout has exactly one block. +func CreateSignatures(layout codec.Layout, signer Signer, ic, max uint32) (indexSignatureFormat string, indexIDs []string, err error) { + layoutB64, layoutSigB64, err := SignLayoutB64(layout, signer) + if err != nil { + return "", nil, err + } + + // Generate layout IDs (not returned; used to populate the index file) + layoutSignatureFormat := layoutB64 + "." + layoutSigB64 + layoutIDs, err := GenerateLayoutIDs(layoutSignatureFormat, ic, max) + if err != nil { + return "", nil, err + } + + // Build and sign the index file + idx := BuildIndex(layoutIDs, layoutSigB64) + indexB64, creatorSigB64, err := SignIndexB64(idx, signer) + if err != nil { + return "", nil, err + } + indexSignatureFormat = fmt.Sprintf("%s.%s", indexB64, creatorSigB64) + + // Generate the index IDs (these are the RQIDs sent to chain) + indexIDs, err = GenerateIndexIDs(indexSignatureFormat, ic, max) + if err != nil { + return "", nil, err + } + return indexSignatureFormat, indexIDs, nil +} diff --git a/pkg/cascadekit/verify.go b/pkg/cascadekit/verify.go new file mode 100644 index 00000000..8fd90beb --- /dev/null +++ b/pkg/cascadekit/verify.go @@ -0,0 +1,60 @@ +package cascadekit + +import ( + "encoding/base64" + "fmt" + + actionkeeper "github.com/LumeraProtocol/lumera/x/action/v1/keeper" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" +) + +// Verifier is a function that verifies the signature over data using the signer's on-chain pubkey. +// It should return nil if signature is valid; otherwise an error. +type Verifier func(data []byte, signature []byte) error + +// VerifyStringRawOrADR36 verifies a signature over a message string in two passes: +// 1. raw: verify([]byte(message), sigRS) +// 2. ADR-36: build amino-JSON sign bytes with data = base64(message) and verify +// +// The signature is provided as base64 (DER or 64-byte r||s), and coerced to 64-byte r||s. +func VerifyStringRawOrADR36(message string, sigB64 string, signer string, verify Verifier) error { + sigRaw, err := base64.StdEncoding.DecodeString(sigB64) + if err != nil { + return fmt.Errorf("invalid base64 signature: %w", err) + } + sigRS, err := actionkeeper.CoerceToRS64(sigRaw) + if err != nil { + return fmt.Errorf("coerce signature: %w", err) + } + if err := verify([]byte(message), sigRS); err == nil { + return nil + } + dataB64 := base64.StdEncoding.EncodeToString([]byte(message)) + doc, err := actionkeeper.MakeADR36AminoSignBytes(signer, dataB64) + if err != nil { + return fmt.Errorf("build adr36 doc: %w", err) + } + if err := verify(doc, sigRS); err == nil { + return nil + } + return fmt.Errorf("signature verification failed") +} + +// VerifyIndex verifies the creator's signature over indexB64 (string), using the given verifier. +func VerifyIndex(indexB64 string, sigB64 string, signer string, verify Verifier) error { + return VerifyStringRawOrADR36(indexB64, sigB64, signer, verify) +} + +// VerifyLayout verifies the layout signature over base64(JSON(layout)) bytes. +func VerifyLayout(layoutB64 []byte, sigB64 string, signer string, verify Verifier) error { + return VerifyStringRawOrADR36(string(layoutB64), sigB64, signer, verify) +} + +// VerifySingleBlock ensures the RaptorQ layout contains exactly one block. +func VerifySingleBlock(layout codec.Layout) error { + if len(layout.Blocks) != 1 { + return errors.New("layout must contain exactly one block") + } + return nil +} diff --git a/pkg/codec/codec.go b/pkg/codec/codec.go index 39029569..433994e8 100644 --- a/pkg/codec/codec.go +++ b/pkg/codec/codec.go @@ -1,14 +1,13 @@ -//go:generate mockgen -destination=codec_mock.go -package=codec -source=codec.go - package codec import ( "context" ) -// EncodeResponse represents the response of the encode request. +// EncodeResponse represents the response of the encode request. +// Layout contains the single-block layout produced by the encoder. type EncodeResponse struct { - Metadata Layout + Layout Layout SymbolsDir string } @@ -19,7 +18,7 @@ type Layout struct { // Block is the schema for each entry in the “blocks” array. type Block struct { BlockID int `json:"block_id"` - EncoderParameters []int `json:"encoder_parameters"` + EncoderParameters []uint8 `json:"encoder_parameters"` OriginalOffset int64 `json:"original_offset"` Size int64 `json:"size"` Symbols []string `json:"symbols"` @@ -32,10 +31,24 @@ type EncodeRequest struct { Path string DataSize int } +type CreateMetadataRequest struct { + Path string +} + +// CreateMetadataResponse returns the Layout. +type CreateMetadataResponse struct { + Layout Layout +} // RaptorQ contains methods for request services from RaptorQ service. type Codec interface { - // Encode a file - Encode(ctx context.Context, req EncodeRequest) (EncodeResponse, error) - Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) + // Encode a file + Encode(ctx context.Context, req EncodeRequest) (EncodeResponse, error) + Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) + // without generating RaptorQ symbols. + CreateMetadata(ctx context.Context, req CreateMetadataRequest) (CreateMetadataResponse, error) + // Streaming decode helpers for writing symbols directly to disk prior to decode + PrepareDecode(ctx context.Context, actionID string, layout Layout) (blockPaths []string, + Write func(block int, symbolID string, data []byte) (string, error), Cleanup func() error, ws *Workspace, err error) + DecodeFromPrepared(ctx context.Context, ws *Workspace, layout Layout) (DecodeResponse, error) } diff --git a/pkg/codec/codec_default_test.go b/pkg/codec/codec_default_test.go index 537a8d7d..79b97bd1 100644 --- a/pkg/codec/codec_default_test.go +++ b/pkg/codec/codec_default_test.go @@ -34,7 +34,7 @@ func TestEncode_ToDirA(t *testing.T) { t.Logf("encoded to: %s", resp.SymbolsDir) // Log theoretical minimum percentage of symbols needed per block - for _, b := range resp.Metadata.Blocks { + for _, b := range resp.Layout.Blocks { s := int64(rqSymbolSize) if s <= 0 { s = 65535 @@ -120,3 +120,36 @@ func itoa(i int) string { } return string(b[n:]) } + +// TestCreateMetadata_SaveToFile generates layout metadata only and writes it to a file. +func TestCreateMetadata_SaveToFile(t *testing.T) { + if InputPath == "" { + t.Skip("set InputPath constant to a file path to run this test") + } + + ctx := context.TODO() + c := NewRaptorQCodec(BaseDir) + + // Create metadata using the codec and write it next to the input file. + resp, err := c.CreateMetadata(ctx, CreateMetadataRequest{Path: InputPath}) + if err != nil { + t.Fatalf("create metadata: %v", err) + } + data, err := json.MarshalIndent(resp.Layout, "", " ") + if err != nil { + t.Fatalf("marshal metadata: %v", err) + } + outPath := InputPath + ".layout.json" + if err := os.WriteFile(outPath, data, 0o644); err != nil { + t.Fatalf("write output: %v", err) + } + + fi, err := os.Stat(outPath) + if err != nil { + t.Fatalf("stat output: %v", err) + } + if fi.Size() == 0 { + t.Fatalf("output file is empty: %s", outPath) + } + t.Logf("metadata saved to: %s (%d bytes)", outPath, fi.Size()) +} diff --git a/pkg/codec/codec_mock.go b/pkg/codec/codec_mock.go deleted file mode 100644 index 09484cee..00000000 --- a/pkg/codec/codec_mock.go +++ /dev/null @@ -1,65 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: codec.go - -// Package codec is a generated GoMock package. -package codec - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" -) - -// MockCodec is a mock of Codec interface. -type MockCodec struct { - ctrl *gomock.Controller - recorder *MockCodecMockRecorder -} - -// MockCodecMockRecorder is the mock recorder for MockCodec. -type MockCodecMockRecorder struct { - mock *MockCodec -} - -// NewMockCodec creates a new mock instance. -func NewMockCodec(ctrl *gomock.Controller) *MockCodec { - mock := &MockCodec{ctrl: ctrl} - mock.recorder = &MockCodecMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockCodec) EXPECT() *MockCodecMockRecorder { - return m.recorder -} - -// Decode mocks base method. -func (m *MockCodec) Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Decode", ctx, req) - ret0, _ := ret[0].(DecodeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Decode indicates an expected call of Decode. -func (mr *MockCodecMockRecorder) Decode(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Decode", reflect.TypeOf((*MockCodec)(nil).Decode), ctx, req) -} - -// Encode mocks base method. -func (m *MockCodec) Encode(ctx context.Context, req EncodeRequest) (EncodeResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Encode", ctx, req) - ret0, _ := ret[0].(EncodeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Encode indicates an expected call of Encode. -func (mr *MockCodecMockRecorder) Encode(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Encode", reflect.TypeOf((*MockCodec)(nil).Encode), ctx, req) -} diff --git a/pkg/codec/decode.go b/pkg/codec/decode.go index bd3b0231..251f92c4 100644 --- a/pkg/codec/decode.go +++ b/pkg/codec/decode.go @@ -49,6 +49,7 @@ func (rq *raptorQ) PrepareDecode( logtrace.FieldModule: "rq", logtrace.FieldActionID: actionID, } + logtrace.Info(ctx, "rq: prepare-decode start", fields) // Create root symbols dir for this action symbolsDir := filepath.Join(rq.symbolsBaseDir, actionID) @@ -145,10 +146,7 @@ func (rq *raptorQ) PrepareDecode( return os.RemoveAll(symbolsDir) } - logtrace.Info(ctx, "prepare decode workspace created", logtrace.Fields{ - "symbols_dir": symbolsDir, - "blocks": len(blockDirs), - }) + logtrace.Info(ctx, "rq: prepare-decode ok", logtrace.Fields{"symbols_dir": symbolsDir, "blocks": len(blockDirs)}) return blockDirs, Write, Cleanup, ws, nil } @@ -164,7 +162,7 @@ func (rq *raptorQ) DecodeFromPrepared( logtrace.FieldModule: "rq", logtrace.FieldActionID: ws.ActionID, } - logtrace.Info(ctx, "RaptorQ decode (prepared) requested", fields) + logtrace.Info(ctx, "rq: decode-from-prepared start", fields) processor, err := raptorq.NewRaptorQProcessor(rqSymbolSize, rqRedundancyFactor, rqMaxMemoryMB, rqConcurrency) if err != nil { @@ -173,9 +171,39 @@ func (rq *raptorQ) DecodeFromPrepared( } defer processor.Free() - // Write layout.json (idempotent) + // Write layout.json (idempotent). Important: encoder_parameters must be a JSON array, not base64 string. + // Go's encoding/json marshals []byte (aka []uint8) as base64 strings, which rq-go rejects. + // Use a wire struct that maps encoder_parameters to []int to produce a numeric array. + type blockOnDisk struct { + BlockID int `json:"block_id"` + EncoderParameters []int `json:"encoder_parameters"` + OriginalOffset int64 `json:"original_offset"` + Size int64 `json:"size"` + Symbols []string `json:"symbols"` + Hash string `json:"hash"` + } + type layoutOnDisk struct { + Blocks []blockOnDisk `json:"blocks"` + } + var lod layoutOnDisk + lod.Blocks = make([]blockOnDisk, len(layout.Blocks)) + for i, b := range layout.Blocks { + // convert []uint8 (aka []byte) to []int so JSON encodes as numeric array + ep := make([]int, len(b.EncoderParameters)) + for j := range b.EncoderParameters { + ep[j] = int(b.EncoderParameters[j]) + } + lod.Blocks[i] = blockOnDisk{ + BlockID: b.BlockID, + EncoderParameters: ep, + OriginalOffset: b.OriginalOffset, + Size: b.Size, + Symbols: b.Symbols, + Hash: b.Hash, + } + } layoutPath := filepath.Join(ws.SymbolsDir, "layout.json") - layoutBytes, err := json.Marshal(layout) + layoutBytes, err := json.Marshal(lod) if err != nil { fields[logtrace.FieldError] = err.Error() return DecodeResponse{}, fmt.Errorf("marshal layout: %w", err) @@ -184,7 +212,7 @@ func (rq *raptorQ) DecodeFromPrepared( fields[logtrace.FieldError] = err.Error() return DecodeResponse{}, fmt.Errorf("write layout file: %w", err) } - logtrace.Info(ctx, "layout.json written (prepared)", fields) + logtrace.Info(ctx, "rq: layout written", fields) // Decode to output (idempotent-safe: overwrite on success) outputPath := filepath.Join(ws.SymbolsDir, "output") @@ -194,64 +222,63 @@ func (rq *raptorQ) DecodeFromPrepared( return DecodeResponse{}, fmt.Errorf("raptorq decode: %w", err) } - logtrace.Info(ctx, "RaptorQ decoding completed successfully (prepared)", logtrace.Fields{ - "output_path": outputPath, - }) + logtrace.Info(ctx, "rq: decode-from-prepared ok", logtrace.Fields{"output_path": outputPath}) return DecodeResponse{FilePath: outputPath, DecodeTmpDir: ws.SymbolsDir}, nil } func (rq *raptorQ) Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) { - fields := logtrace.Fields{ - logtrace.FieldMethod: "Decode", - logtrace.FieldModule: "rq", - logtrace.FieldActionID: req.ActionID, - } - logtrace.Info(ctx, "RaptorQ decode request received", fields) + fields := logtrace.Fields{ + logtrace.FieldMethod: "Decode", + logtrace.FieldModule: "rq", + logtrace.FieldActionID: req.ActionID, + } + logtrace.Info(ctx, "rq: decode request", fields) - // 1) Validate layout (the check) - if len(req.Layout.Blocks) == 0 { - fields[logtrace.FieldError] = "empty layout" - return DecodeResponse{}, fmt.Errorf("invalid layout: no blocks present") - } - for _, blk := range req.Layout.Blocks { - if len(blk.Symbols) == 0 { - fields[logtrace.FieldError] = fmt.Sprintf("block_%d has no symbols", blk.BlockID) - return DecodeResponse{}, fmt.Errorf("invalid layout: block %d has no symbols", blk.BlockID) - } - } + // 1) Validate layout (the check) + if len(req.Layout.Blocks) == 0 { + fields[logtrace.FieldError] = "empty layout" + return DecodeResponse{}, fmt.Errorf("invalid layout: no blocks present") + } + for _, blk := range req.Layout.Blocks { + if len(blk.Symbols) == 0 { + fields[logtrace.FieldError] = fmt.Sprintf("block_%d has no symbols", blk.BlockID) + return DecodeResponse{}, fmt.Errorf("invalid layout: block %d has no symbols", blk.BlockID) + } + } - // 2) Prepare workspace (functionality) - _, Write, Cleanup, ws, err := rq.PrepareDecode(ctx, req.ActionID, req.Layout) - if err != nil { - fields[logtrace.FieldError] = err.Error() - return DecodeResponse{}, fmt.Errorf("prepare decode workspace: %w", err) - } + // 2) Prepare workspace (functionality) + _, Write, Cleanup, ws, err := rq.PrepareDecode(ctx, req.ActionID, req.Layout) + if err != nil { + fields[logtrace.FieldError] = err.Error() + return DecodeResponse{}, fmt.Errorf("prepare decode workspace: %w", err) + } - // Ensure workspace cleanup on failure. On success, caller cleans up via returned path. - success := false - defer func() { - if !success && Cleanup != nil { - _ = Cleanup() - } - }() + // Ensure workspace cleanup on failure. On success, caller cleans up via returned path. + success := false + defer func() { + if !success && Cleanup != nil { + _ = Cleanup() + } + }() - // 3) Persist provided in-memory symbols via Write (functionality) - if len(req.Symbols) > 0 { - for id, data := range req.Symbols { - if _, werr := Write(-1, id, data); werr != nil { - fields[logtrace.FieldError] = werr.Error() - return DecodeResponse{}, werr - } - } - logtrace.Info(ctx, "symbols persisted via Write()", fields) - } + // 3) Persist provided in-memory symbols via Write (functionality) + if len(req.Symbols) > 0 { + for id, data := range req.Symbols { + if _, werr := Write(-1, id, data); werr != nil { + fields[logtrace.FieldError] = werr.Error() + return DecodeResponse{}, werr + } + } + logtrace.Info(ctx, "rq: symbols persisted", logtrace.Fields{"count": len(req.Symbols)}) + } - // 4) Decode using the prepared workspace (functionality) - resp, derr := rq.DecodeFromPrepared(ctx, ws, req.Layout) - if derr != nil { - fields[logtrace.FieldError] = derr.Error() - return DecodeResponse{}, derr - } - success = true - return resp, nil + // 4) Decode using the prepared workspace (functionality) + resp, derr := rq.DecodeFromPrepared(ctx, ws, req.Layout) + if derr != nil { + fields[logtrace.FieldError] = derr.Error() + return DecodeResponse{}, derr + } + success = true + logtrace.Info(ctx, "rq: decode ok", fields) + return resp, nil } diff --git a/pkg/codec/raptorq.go b/pkg/codec/raptorq.go index 4564bc1b..487f92d8 100644 --- a/pkg/codec/raptorq.go +++ b/pkg/codec/raptorq.go @@ -15,7 +15,7 @@ const ( rqSymbolSize uint16 = 65535 rqRedundancyFactor uint8 = 6 // Limit RaptorQ processor memory usage to ~2 GiB - rqMaxMemoryMB uint64 = 2 * 1024 // MB + rqMaxMemoryMB uint64 = 8 * 1024 // MB // Concurrency tuned for 2 GiB limit and typical 8+ core CPUs rqConcurrency uint64 = 1 // Target single-block output for up to 1 GiB files with padding headroom (~1.25 GiB) @@ -43,12 +43,13 @@ func (rq *raptorQ) Encode(ctx context.Context, req EncodeRequest) (EncodeRespons "data-size": req.DataSize, } + logtrace.Info(ctx, "rq: encode start", fields) processor, err := raptorq.NewRaptorQProcessor(rqSymbolSize, rqRedundancyFactor, rqMaxMemoryMB, rqConcurrency) if err != nil { return EncodeResponse{}, fmt.Errorf("create RaptorQ processor: %w", err) } defer processor.Free() - logtrace.Info(ctx, "RaptorQ processor created", fields) + logtrace.Debug(ctx, "RaptorQ processor created", fields) /* ---------- 1. run the encoder ---------- */ // Deterministic: force single block @@ -57,24 +58,19 @@ func (rq *raptorQ) Encode(ctx context.Context, req EncodeRequest) (EncodeRespons symbolsDir := filepath.Join(rq.symbolsBaseDir, req.TaskID) if err := os.MkdirAll(symbolsDir, 0o755); err != nil { fields[logtrace.FieldError] = err.Error() - os.Remove(req.Path) return EncodeResponse{}, fmt.Errorf("mkdir %s: %w", symbolsDir, err) } - logtrace.Info(ctx, "RaptorQ processor encoding", fields) + logtrace.Debug(ctx, "RaptorQ processor encoding", fields) resp, err := processor.EncodeFile(req.Path, symbolsDir, blockSize) if err != nil { fields[logtrace.FieldError] = err.Error() - os.Remove(req.Path) return EncodeResponse{}, fmt.Errorf("raptorq encode: %w", err) } - /* we no longer need the temp file */ - // _ = os.Remove(tmpPath) - /* ---------- 2. read the layout JSON ---------- */ layoutData, err := os.ReadFile(resp.LayoutFilePath) - logtrace.Info(ctx, "RaptorQ processor layout file", logtrace.Fields{ + logtrace.Debug(ctx, "RaptorQ processor layout file", logtrace.Fields{ "layout-file": resp.LayoutFilePath}) if err != nil { fields[logtrace.FieldError] = err.Error() @@ -82,15 +78,77 @@ func (rq *raptorQ) Encode(ctx context.Context, req EncodeRequest) (EncodeRespons } var encodeResp EncodeResponse - if err := json.Unmarshal(layoutData, &encodeResp.Metadata); err != nil { + if err := json.Unmarshal(layoutData, &encodeResp.Layout); err != nil { return EncodeResponse{}, fmt.Errorf("unmarshal layout: %w", err) } encodeResp.SymbolsDir = symbolsDir // Enforce single-block output; abort if multiple blocks are produced - if n := len(encodeResp.Metadata.Blocks); n != 1 { + if n := len(encodeResp.Layout.Blocks); n != 1 { return EncodeResponse{}, fmt.Errorf("raptorq encode produced %d blocks; single-block layout is required", n) } - + logtrace.Info(ctx, "rq: encode ok", logtrace.Fields{"symbols_dir": encodeResp.SymbolsDir}) return encodeResp, nil } + +// CreateMetadata builds only the layout metadata for the given file without generating symbols. +func (rq *raptorQ) CreateMetadata(ctx context.Context, req CreateMetadataRequest) (CreateMetadataResponse, error) { + // Populate fields; include data-size by stat-ing the file to preserve existing log fields + fields := logtrace.Fields{ + logtrace.FieldMethod: "CreateMetadata", + logtrace.FieldModule: "rq", + "path": req.Path, + } + if fi, err := os.Stat(req.Path); err == nil { + fields["data-size"] = int(fi.Size()) + } + + logtrace.Info(ctx, "rq: create-metadata start", fields) + processor, err := raptorq.NewRaptorQProcessor(rqSymbolSize, rqRedundancyFactor, rqMaxMemoryMB, rqConcurrency) + if err != nil { + return CreateMetadataResponse{}, fmt.Errorf("create RaptorQ processor: %w", err) + } + defer processor.Free() + logtrace.Debug(ctx, "RaptorQ processor created", fields) + + // Deterministic: force single block + blockSize := rqBlockSize + + // Prepare a temporary path for the generated layout file + base := rq.symbolsBaseDir + if base == "" { + base = os.TempDir() + } + tmpDir, err := os.MkdirTemp(base, "rq_meta_*") + if err != nil { + fields[logtrace.FieldError] = err.Error() + return CreateMetadataResponse{}, fmt.Errorf("mkdir temp dir: %w", err) + } + defer os.RemoveAll(tmpDir) + layoutPath := filepath.Join(tmpDir, "layout.json") + + // Use rq-go's metadata-only creation; no symbols are produced here. + resp, err := processor.CreateMetadata(req.Path, layoutPath, blockSize) + if err != nil { + fields[logtrace.FieldError] = err.Error() + return CreateMetadataResponse{}, fmt.Errorf("raptorq create metadata: %w", err) + } + + layoutData, err := os.ReadFile(resp.LayoutFilePath) + if err != nil { + fields[logtrace.FieldError] = err.Error() + return CreateMetadataResponse{}, fmt.Errorf("read layout %s: %w", resp.LayoutFilePath, err) + } + + var layout Layout + if err := json.Unmarshal(layoutData, &layout); err != nil { + return CreateMetadataResponse{}, fmt.Errorf("unmarshal layout: %w", err) + } + + // Enforce single-block output; abort if multiple blocks are produced + if n := len(layout.Blocks); n != 1 { + return CreateMetadataResponse{}, fmt.Errorf("raptorq metadata produced %d blocks; single-block layout is required", n) + } + logtrace.Info(ctx, "rq: create-metadata ok", logtrace.Fields{"blocks": len(layout.Blocks)}) + return CreateMetadataResponse{Layout: layout}, nil +} diff --git a/pkg/common/blocktracker/block_tracker.go b/pkg/common/blocktracker/block_tracker.go deleted file mode 100644 index 00f8c512..00000000 --- a/pkg/common/blocktracker/block_tracker.go +++ /dev/null @@ -1,121 +0,0 @@ -package blocktracker - -import ( - "context" - "sync" - "time" - - "github.com/LumeraProtocol/supernode/v2/pkg/errors" -) - -const ( - defaultRetries = 3 - defaultDelayDurationBetweenRetries = 5 * time.Second - defaultRPCConnectTimeout = 15 * time.Second - // Update duration in case last update was success - defaultSuccessUpdateDuration = 10 * time.Second - // Update duration in case last update was failed - prevent too much call to Lumera - defaultFailedUpdateDuration = 5 * time.Second - defaultNextBlockTimeout = 30 * time.Minute -) - -// LumeraClient defines interface functions BlockCntTracker expects from Lumera -type LumeraClient interface { - // GetBlockCount returns block height of blockchain - GetBlockCount(ctx context.Context) (int32, error) -} - -// BlockCntTracker defines a block tracker - that will keep current block height -type BlockCntTracker struct { - mtx sync.Mutex - LumeraClient LumeraClient - curBlockCnt int32 - lastSuccess time.Time - lastRetried time.Time - lastErr error - delayBetweenRetries time.Duration - retries int -} - -// New returns an instance of BlockCntTracker -func New(LumeraClient LumeraClient) *BlockCntTracker { - return &BlockCntTracker{ - LumeraClient: LumeraClient, - curBlockCnt: 0, - delayBetweenRetries: defaultDelayDurationBetweenRetries, - retries: defaultRetries, - } -} - -func (tracker *BlockCntTracker) refreshBlockCount(retries int) { - tracker.lastRetried = time.Now().UTC() - for i := 0; i < retries; i = i + 1 { - ctx, cancel := context.WithTimeout(context.Background(), defaultRPCConnectTimeout) - blockCnt, err := tracker.LumeraClient.GetBlockCount(ctx) - if err == nil { - tracker.curBlockCnt = blockCnt - tracker.lastSuccess = time.Now().UTC() - cancel() - tracker.lastErr = nil - return - } - cancel() - - tracker.lastErr = err - // delay between retries - time.Sleep(tracker.delayBetweenRetries) - } - -} - -// GetBlockCount return current block count -// it will get from cache if last refresh is small than defaultSuccessUpdateDuration -// or will refresh it by call from Lumera daemon to get the latest one if defaultSuccessUpdateDuration expired -func (tracker *BlockCntTracker) GetBlockCount() (int32, error) { - tracker.mtx.Lock() - defer tracker.mtx.Unlock() - - shouldRefresh := false - - if tracker.lastSuccess.After(tracker.lastRetried) { - if time.Now().UTC().After(tracker.lastSuccess.Add(defaultSuccessUpdateDuration)) { - shouldRefresh = true - } - } else { - // prevent update too much - if time.Now().UTC().After(tracker.lastRetried.Add(defaultFailedUpdateDuration)) { - shouldRefresh = true - } - } - - if shouldRefresh { - tracker.refreshBlockCount(tracker.retries) - } - - if tracker.curBlockCnt == 0 { - return 0, errors.Errorf("failed to get blockcount: %w", tracker.lastErr) - } - - return tracker.curBlockCnt, nil -} - -// WaitTillNextBlock will wait until next block height is greater than blockCnt -func (tracker *BlockCntTracker) WaitTillNextBlock(ctx context.Context, blockCnt int32) error { - for { - select { - case <-ctx.Done(): - return errors.Errorf("context done: %w", ctx.Err()) - case <-time.After(defaultNextBlockTimeout): - return errors.Errorf("timeout waiting for next block") - case <-time.After(defaultSuccessUpdateDuration): - curBlockCnt, err := tracker.GetBlockCount() - if err != nil { - return errors.Errorf("failed to get blockcount: %w", err) - } - - if curBlockCnt > blockCnt { - return nil - } - } - } -} diff --git a/pkg/common/blocktracker/block_tracker_test.go b/pkg/common/blocktracker/block_tracker_test.go deleted file mode 100644 index b070a4b7..00000000 --- a/pkg/common/blocktracker/block_tracker_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package blocktracker - -import ( - "context" - "errors" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -type fakePastelClient struct { - retBlockCnt int32 - retErr error -} - -func (fake *fakePastelClient) GetBlockCount(_ context.Context) (int32, error) { - return fake.retBlockCnt, fake.retErr -} - -func TestGetCountFirstTime(t *testing.T) { - tests := []struct { - name string - pastelClient *fakePastelClient - expectErr bool - }{ - { - name: "success", - pastelClient: &fakePastelClient{ - retBlockCnt: 10, - retErr: nil, - }, - expectErr: false, - }, - { - name: "fail", - pastelClient: &fakePastelClient{ - retBlockCnt: 0, - retErr: errors.New("error"), - }, - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tracker := New(tt.pastelClient) - tracker.retries = 1 - blkCnt, err := tracker.GetBlockCount() - assert.Equal(t, tt.pastelClient.retBlockCnt, blkCnt) - if tt.expectErr { - assert.True(t, strings.Contains(err.Error(), tt.pastelClient.retErr.Error())) - } else { - assert.Nil(t, err) - } - }) - } -} - -func TestGetBlockCountNoRefresh(t *testing.T) { - pastelClient := &fakePastelClient{ - retBlockCnt: 10, - retErr: errors.New("error"), - } - - expectedBlk := int32(1) - tracker := New(pastelClient) - tracker.retries = 1 - tracker.curBlockCnt = expectedBlk - tracker.lastRetried = time.Now().UTC() - tracker.lastSuccess = time.Now().UTC() - - blkCnt, err := tracker.GetBlockCount() - assert.Equal(t, expectedBlk, blkCnt) - - assert.Nil(t, err) -} - -func TestGetBlockCountRefresh(t *testing.T) { - expectedBlk := int32(10) - pastelClient := &fakePastelClient{ - retBlockCnt: expectedBlk, - retErr: nil, - } - - tracker := New(pastelClient) - tracker.retries = 1 - tracker.curBlockCnt = 1 - tracker.lastRetried = time.Now().UTC().Add(-defaultSuccessUpdateDuration) - tracker.lastSuccess = time.Now().UTC().Add(-defaultSuccessUpdateDuration) - - blkCnt, err := tracker.GetBlockCount() - assert.Equal(t, expectedBlk, blkCnt) - - assert.Nil(t, err) -} diff --git a/pkg/common/task/action.go b/pkg/common/task/action.go deleted file mode 100644 index 227ebe35..00000000 --- a/pkg/common/task/action.go +++ /dev/null @@ -1,20 +0,0 @@ -package task - -import "context" - -// ActionFn represents a function that is run inside a goroutine. -type ActionFn func(ctx context.Context) error - -// Action represents the action of the task. -type Action struct { - fn ActionFn - doneCh chan struct{} -} - -// NewAction returns a new Action instance. -func NewAction(fn ActionFn) *Action { - return &Action{ - fn: fn, - doneCh: make(chan struct{}), - } -} diff --git a/pkg/common/task/state/state.go b/pkg/common/task/state/state.go deleted file mode 100644 index 05179a85..00000000 --- a/pkg/common/task/state/state.go +++ /dev/null @@ -1,174 +0,0 @@ -//go:generate mockery --name=State - -package state - -import ( - "context" - "sync" - "time" - - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/queries" - "github.com/LumeraProtocol/supernode/v2/pkg/types" -) - -// State represents a state of the task. -type State interface { - // Status returns the current status. - Status() *Status - - // SetStatusNotifyFunc sets a function to be called after the state is updated. - SetStatusNotifyFunc(fn func(status *Status)) - - // RequiredStatus returns an error if the current status doen't match the given one. - RequiredStatus(subStatus SubStatus) error - - // StatusHistory returns all history from the very beginning. - StatusHistory() []*Status - - // UpdateStatus updates the status of the state by creating a new status with the given `status`. - UpdateStatus(subStatus SubStatus) - - // SubscribeStatus returns a new subscription of the state. - SubscribeStatus() func() <-chan *Status - - //SetStateLog set the wallet node task status log to the state status log - SetStateLog(statusLog types.Fields) - - //InitialiseHistoryDB sets the connection to historyDB - InitialiseHistoryDB(store queries.LocalStoreInterface) -} - -type state struct { - status *Status - history []*Status - - notifyFn func(status *Status) - sync.RWMutex - subsCh []chan *Status - taskID string - statusLog types.Fields - historyDBStore queries.LocalStoreInterface -} - -// Status implements State.Status() -func (state *state) Status() *Status { - return state.status -} - -// SetStatusNotifyFunc implements State.SetStatusNotifyFunc() -func (state *state) SetStatusNotifyFunc(fn func(status *Status)) { - state.notifyFn = fn -} - -// RequiredStatus implements State.RequiredStatus() -func (state *state) RequiredStatus(subStatus SubStatus) error { - if state.status.Is(subStatus) { - return nil - } - return errors.Errorf("required status %q, current %q", subStatus, state.status) -} - -// StatusHistory implements State.StatusHistory() -func (state *state) StatusHistory() []*Status { - state.RLock() - defer state.RUnlock() - - return append(state.history, state.status) -} - -// UpdateStatus implements State.UpdateStatus() -func (state *state) UpdateStatus(subStatus SubStatus) { - state.Lock() - defer state.Unlock() - - status := NewStatus(subStatus) - state.history = append(state.history, state.status) - state.status = status - - history := types.TaskHistory{CreatedAt: time.Now().UTC(), TaskID: state.taskID, Status: status.String()} - if state.statusLog.IsValid() { - history.Details = types.NewDetails(status.String(), state.statusLog) - } - - if state.historyDBStore != nil { - if _, err := state.historyDBStore.InsertTaskHistory(history); err != nil { - logtrace.Error(context.Background(), "unable to store task status", logtrace.Fields{logtrace.FieldError: err.Error()}) - } - } else { - store, err := queries.OpenHistoryDB() - if err != nil { - logtrace.Error(context.Background(), "error opening history db", logtrace.Fields{logtrace.FieldError: err.Error()}) - } - - if store != nil { - defer store.CloseHistoryDB(context.Background()) - if _, err := store.InsertTaskHistory(history); err != nil { - logtrace.Error(context.Background(), "unable to store task status", logtrace.Fields{logtrace.FieldError: err.Error()}) - } - } - } - - if state.notifyFn != nil { - state.notifyFn(status) - } - - for _, subCh := range state.subsCh { - subCh := subCh - go func() { - subCh <- status - }() - } -} - -// SubscribeStatus implements State.SubscribeStatus() -func (state *state) SubscribeStatus() func() <-chan *Status { - state.RLock() - defer state.RUnlock() - - subCh := make(chan *Status) - state.subsCh = append(state.subsCh, subCh) - - for _, status := range append(state.history, state.status) { - status := status - go func() { - subCh <- status - }() - } - - sub := func() <-chan *Status { - return subCh - } - return sub -} - -func (state *state) SetStateLog(statusLog types.Fields) { - state.statusLog = statusLog -} - -func (state *state) InitialiseHistoryDB(storeInterface queries.LocalStoreInterface) { - state.historyDBStore = storeInterface -} - -// New returns a new state instance. -func New(subStatus SubStatus, taskID string) State { - store, err := queries.OpenHistoryDB() - if err != nil { - logtrace.Error(context.Background(), "error opening history db", logtrace.Fields{logtrace.FieldError: err.Error()}) - } - - if store != nil { - defer store.CloseHistoryDB(context.Background()) - - if _, err := store.InsertTaskHistory(types.TaskHistory{CreatedAt: time.Now().UTC(), TaskID: taskID, - Status: subStatus.String()}); err != nil { - logtrace.Error(context.Background(), "unable to store task status", logtrace.Fields{logtrace.FieldError: err.Error()}) - } - } - - return &state{ - status: NewStatus(subStatus), - taskID: taskID, - } -} diff --git a/pkg/common/task/state/status.go b/pkg/common/task/state/status.go deleted file mode 100644 index b1b00da6..00000000 --- a/pkg/common/task/state/status.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:generate mockery --name=SubStatus - -package state - -import ( - "fmt" - "time" -) - -// SubStatus represents a sub-status that contains a description of the status. -type SubStatus interface { - fmt.Stringer - IsFinal() bool - IsFailure() bool -} - -// Status represents a state of the task. -type Status struct { - CreatedAt time.Time - SubStatus -} - -// Is returns true if the current `Status` matches to the given `statuses`. -func (status *Status) Is(subStatus SubStatus) bool { - return status.SubStatus == subStatus -} - -// NewStatus returns a new Status instance. -func NewStatus(subStatus SubStatus) *Status { - return &Status{ - CreatedAt: time.Now().UTC(), - SubStatus: subStatus, - } -} diff --git a/pkg/common/task/task.go b/pkg/common/task/task.go deleted file mode 100644 index e4bb062a..00000000 --- a/pkg/common/task/task.go +++ /dev/null @@ -1,143 +0,0 @@ -//go:generate mockery --name=Task - -package task - -import ( - "context" - "sync" - - "github.com/LumeraProtocol/supernode/v2/pkg/common/task/state" - "github.com/LumeraProtocol/supernode/v2/pkg/errgroup" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/random" -) - -// Task represent a worker task. -type Task interface { - state.State - - // ID returns id of the task. - ID() string - - // Run starts the task. - Run(ctx context.Context) error - - // Cancel tells a task to abandon its work. - // Cancel may be called by multiple goroutines simultaneously. - // After the first call, subsequent calls to a Cancel do nothing. - Cancel() - - // Done returns a channel when the task is canceled. - Done() <-chan struct{} - - // RunAction waits for new actions, starts handling each of them in a new goroutine. - RunAction(ctx context.Context) error - - // NewAction creates a new action and passes for the execution. - // It is used when it is necessary to run an action in the context of `Tasks` rather than the one who was called. - NewAction(fn ActionFn) <-chan struct{} - - // CloseActionCh closes action ch - CloseActionCh() -} - -type task struct { - state.State - - id string - - actionCh chan *Action - - doneMu sync.Mutex - doneCh chan struct{} - closeOnce sync.Once -} - -// ID implements Task.ID -func (task *task) ID() string { - return task.id -} - -// Run implements Task.Run -func (task *task) Run(_ context.Context) error { - return errors.New("task default run func not implemented") -} - -// Cancel implements Task.Cancel -func (task *task) Cancel() { - task.doneMu.Lock() - defer task.doneMu.Unlock() - - select { - case <-task.Done(): - logtrace.Debug(context.Background(), "task cancelled", logtrace.Fields{"task_id": task.ID()}) - return - default: - close(task.doneCh) - } -} - -// Done implements Task.Done -func (task *task) Done() <-chan struct{} { - return task.doneCh -} - -// RunAction implements Task.RunAction -func (task *task) RunAction(ctx context.Context) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - group, ctx := errgroup.WithContext(ctx) - for { - select { - case <-ctx.Done(): - logtrace.Info(ctx, "context done", logtrace.Fields{"task_id": task.ID()}) - case <-task.Done(): - logtrace.Info(ctx, "task done", logtrace.Fields{"task_id": task.ID()}) - cancel() - case action, ok := <-task.actionCh: - if !ok { - logtrace.Info(ctx, "action channel closed", logtrace.Fields{"task_id": task.ID()}) - return group.Wait() - } - - currAction := action - group.Go(func() error { - defer close(currAction.doneCh) - - return currAction.fn(ctx) - }) - continue - } - break - } - - return group.Wait() -} - -// CloseActionCh safely closes the action channel -func (task *task) CloseActionCh() { - task.closeOnce.Do(func() { - close(task.actionCh) - }) -} - -// NewAction implements Task.NewAction -func (task *task) NewAction(fn ActionFn) <-chan struct{} { - act := NewAction(fn) - task.actionCh <- act - return act.doneCh -} - -// New returns a new task instance. -func New(status state.SubStatus) Task { - taskID, _ := random.String(8, random.Base62Chars) - - return &task{ - State: state.New(status, taskID), - id: taskID, - doneCh: make(chan struct{}), - actionCh: make(chan *Action), - } -} diff --git a/pkg/common/task/ticket.go b/pkg/common/task/ticket.go deleted file mode 100644 index 561b8f0b..00000000 --- a/pkg/common/task/ticket.go +++ /dev/null @@ -1,13 +0,0 @@ -package task - -type CascadeTicket struct { - Creator string `json:"creator"` - CreatorSignature []byte `json:"creator_signature"` - DataHash string `json:"data_hash"` - ActionID string `json:"action_id"` - BlockHeight int64 `json:"block_height"` - BlockHash []byte `json:"block_hash"` - RQIDsIC uint32 `json:"rqids_ic"` - RQIDsMax int32 `json:"rqids_max"` - RQIDs []string `json:"rq_ids"` -} diff --git a/pkg/common/task/worker.go b/pkg/common/task/worker.go deleted file mode 100644 index 280b5fb8..00000000 --- a/pkg/common/task/worker.go +++ /dev/null @@ -1,144 +0,0 @@ -package task - -import ( - "context" - "sync" - "time" - - "github.com/LumeraProtocol/supernode/v2/pkg/errgroup" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" -) - -// Worker represents a pool of the task. -type Worker struct { - sync.Mutex - - tasks []Task - taskCh chan Task -} - -// Tasks returns all tasks. -func (worker *Worker) Tasks() []Task { - worker.Lock() - defer worker.Unlock() - - // return a shallow copy to avoid data races - copied := make([]Task, len(worker.tasks)) - copy(copied, worker.tasks) - return copied -} - -// Task returns the task by the given id. -func (worker *Worker) Task(taskID string) Task { - worker.Lock() - defer worker.Unlock() - - for _, task := range worker.tasks { - if task.ID() == taskID { - return task - } - } - return nil -} - -// AddTask adds the new task. -func (worker *Worker) AddTask(task Task) { - worker.Lock() - defer worker.Unlock() - - worker.tasks = append(worker.tasks, task) - worker.taskCh <- task - - // Proactively remove the task once it's done to prevent lingering entries - go func(t Task) { - <-t.Done() - // remove promptly when the task signals completion/cancelation - worker.RemoveTask(t) - }(task) -} - -// RemoveTask removes the task. -func (worker *Worker) RemoveTask(subTask Task) { - worker.Lock() - defer worker.Unlock() - - for i, task := range worker.tasks { - if task == subTask { - worker.tasks = append(worker.tasks[:i], worker.tasks[i+1:]...) - return - } - } -} - -// Run waits for new tasks, starts handling each of them in a new goroutine. -func (worker *Worker) Run(ctx context.Context) error { - group, _ := errgroup.WithContext(ctx) // Create an error group but ignore the derived context - // Background sweeper to prune finalized tasks that might linger - // even if the task's Run wasn't executed to completion. - sweeperCtx, sweeperCancel := context.WithCancel(ctx) - defer sweeperCancel() - go worker.cleanupLoop(sweeperCtx) - for { - select { - case <-ctx.Done(): - logtrace.Warn(ctx, "Worker run stopping", logtrace.Fields{logtrace.FieldError: ctx.Err().Error()}) - return group.Wait() - case t := <-worker.taskCh: // Rename here - currentTask := t // Capture the loop variable - group.Go(func() error { - defer func() { - if r := recover(); r != nil { - logtrace.Error(ctx, "Recovered from panic in common task's worker run", logtrace.Fields{"task": currentTask.ID(), "error": r}) - } - - logtrace.Info(ctx, "Task Removed", logtrace.Fields{"task": currentTask.ID()}) - // Remove the task from the worker's task list - worker.RemoveTask(currentTask) - }() - - return currentTask.Run(ctx) // Use the captured variable - }) - } - } -} - -// NewWorker returns a new Worker instance. -func NewWorker() *Worker { - w := &Worker{taskCh: make(chan Task)} - return w -} - -// cleanupLoop periodically removes tasks that are in a final state for a grace period -func (worker *Worker) cleanupLoop(ctx context.Context) { - const ( - cleanupInterval = 30 * time.Second - finalTaskTTL = 2 * time.Minute - ) - - ticker := time.NewTicker(cleanupInterval) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - now := time.Now() - worker.Lock() - // iterate and compact in-place - kept := worker.tasks[:0] - for _, t := range worker.tasks { - st := t.Status() - if st != nil && st.SubStatus != nil && st.SubStatus.IsFinal() { - if now.Sub(st.CreatedAt) >= finalTaskTTL { - // drop this finalized task - continue - } - } - kept = append(kept, t) - } - worker.tasks = kept - worker.Unlock() - } - } -} diff --git a/pkg/common/task/worker_test.go b/pkg/common/task/worker_test.go deleted file mode 100644 index 4c5f21ac..00000000 --- a/pkg/common/task/worker_test.go +++ /dev/null @@ -1,147 +0,0 @@ -package task - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestWorkerTasks(t *testing.T) { - t.Parallel() - - type fields struct { - tasks []Task - } - tests := []struct { - name string - fields fields - want []Task - }{ - { - name: "retrieve tasks", - fields: fields{ - tasks: []Task{&task{id: "1"}, &task{id: "2"}}, - }, - want: []Task{&task{id: "1"}, &task{id: "2"}}, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - worker := &Worker{ - tasks: tt.fields.tasks, - } - assert.Equal(t, tt.want, worker.Tasks()) - }) - } -} - -func TestWorkerTask(t *testing.T) { - t.Parallel() - - type fields struct { - tasks []Task - } - type args struct { - taskID string - } - tests := []struct { - name string - fields fields - args args - want Task - }{ - { - name: "get task with id 1", - fields: fields{ - tasks: []Task{&task{id: "1"}, &task{id: "2"}}, - }, - args: args{"2"}, - want: &task{id: "2"}, - }, - { - name: "get not exist task", - fields: fields{ - tasks: []Task{&task{id: "1"}, &task{id: "2"}}, - }, - args: args{"3"}, - want: nil, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - worker := &Worker{ - tasks: tt.fields.tasks, - } - assert.Equal(t, tt.want, worker.Task(tt.args.taskID)) - }) - } -} - -func TestWorkerAddTask(t *testing.T) { - t.Parallel() - - type args struct { - task Task - } - tests := []struct { - name string - args args - want []Task - }{ - { - name: "add task", - args: args{&task{id: "1"}}, - want: []Task{&task{id: "1"}}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - worker := &Worker{ - taskCh: make(chan Task), - } - - go func() { - worker.AddTask(tt.args.task) - }() - - <-worker.taskCh - tasks := worker.tasks - assert.Equal(t, tt.want, tasks) - - }) - } -} - -func TestWorkerRemoveTask(t *testing.T) { - t.Parallel() - - type args struct { - subTask Task - } - tests := []struct { - name string - args args - want []Task - }{ - { - name: "removed task", - args: args{&task{id: "1"}}, - want: []Task{}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - worker := &Worker{ - tasks: []Task{tt.args.subTask}, - } - - worker.RemoveTask(tt.args.subTask) - assert.Equal(t, tt.want, worker.tasks) - }) - } -} diff --git a/pkg/crypto/hash.go b/pkg/crypto/hash.go deleted file mode 100644 index f45fa4ad..00000000 --- a/pkg/crypto/hash.go +++ /dev/null @@ -1,40 +0,0 @@ -package crypto - -import ( - "fmt" - "io" - "lukechampine.com/blake3" - "os" -) - -const defaultHashBufferSize = 1024 * 1024 // 1 MB - -func HashFileIncrementally(filePath string, bufferSize int) ([]byte, error) { - f, err := os.Open(filePath) - if err != nil { - return nil, fmt.Errorf("open decoded file: %w", err) - } - defer f.Close() - - if bufferSize == 0 { - bufferSize = defaultHashBufferSize - } - - hasher := blake3.New(32, nil) - buf := make([]byte, bufferSize) // 4MB buffer to balance memory vs I/O - - for { - n, readErr := f.Read(buf) - if n > 0 { - hasher.Write(buf[:n]) - } - if readErr == io.EOF { - break - } - if readErr != nil { - return nil, fmt.Errorf("streaming file read failed: %w", readErr) - } - } - - return hasher.Sum(nil), nil -} diff --git a/pkg/crypto/hash_test.go b/pkg/crypto/hash_test.go deleted file mode 100644 index 7814a772..00000000 --- a/pkg/crypto/hash_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package crypto - -import ( - "encoding/hex" - "os" - "path/filepath" - "testing" - - "lukechampine.com/blake3" -) - -func TestHashFileIncrementally(t *testing.T) { - expectedBlake3 := func(data []byte) string { - h := blake3.New(32, nil) - h.Write(data) - return hex.EncodeToString(h.Sum(nil)) - } - - testData := []byte("hello world") - emptyData := []byte("") - largeData := make([]byte, 5*1024*1024) - - // Temp dir for test files - tmpDir := t.TempDir() - - // Create helper function for file creation - createTempFile := func(name string, content []byte) string { - filePath := filepath.Join(tmpDir, name) - if err := os.WriteFile(filePath, content, 0644); err != nil { - t.Fatalf("failed to create temp file: %v", err) - } - return filePath - } - - // Create test files - smallFile := createTempFile("small.txt", testData) - emptyFile := createTempFile("empty.txt", emptyData) - largeFile := createTempFile("large.bin", largeData) - - tests := []struct { - name string - filePath string - bufferSize int - wantHash string - wantErr bool - }{ - { - name: "small file", - filePath: smallFile, - bufferSize: 4 * 1024, // 4KB buffer - wantHash: expectedBlake3(testData), - wantErr: false, - }, - { - name: "empty file", - filePath: emptyFile, - bufferSize: 1024, // small buffer - wantHash: expectedBlake3(emptyData), - wantErr: false, - }, - { - name: "large file", - filePath: largeFile, - bufferSize: 1024 * 1024, // 1MB buffer - wantHash: expectedBlake3(largeData), - wantErr: false, - }, - { - name: "file does not exist", - filePath: filepath.Join(tmpDir, "doesnotexist.txt"), - bufferSize: 4096, - wantHash: "", - wantErr: true, - }, - { - name: "zero buffer size (should use default)", - filePath: smallFile, - bufferSize: 0, - wantHash: expectedBlake3(testData), - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotHash, err := HashFileIncrementally(tt.filePath, tt.bufferSize) - - if (err != nil) != tt.wantErr { - t.Fatalf("expected error=%v, got err=%v", tt.wantErr, err) - } - - if !tt.wantErr && hex.EncodeToString(gotHash) != tt.wantHash { - t.Errorf("hash mismatch!\n got: %s\n want: %s", gotHash, tt.wantHash) - } - }) - } -} diff --git a/pkg/dd/client.go b/pkg/dd/client.go deleted file mode 100644 index f7b10c80..00000000 --- a/pkg/dd/client.go +++ /dev/null @@ -1,46 +0,0 @@ -package dd - -import ( - "context" - "time" - - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/random" - "google.golang.org/grpc" - "google.golang.org/grpc/encoding/gzip" -) - -const ( - defaultConnectTimeout = 60 * time.Second -) - -type client struct{} - -// Connect implements node.Client.Connect() -func (cl *client) Connect(ctx context.Context, address string) (Connection, error) { - // Limits the dial timeout, prevent got stuck too long - dialCtx, cancel := context.WithTimeout(ctx, defaultConnectTimeout) - defer cancel() - - id, _ := random.String(8, random.Base62Chars) - - grpcConn, err := grpc.DialContext(dialCtx, address, - //lint:ignore SA1019 we want to ignore this for now - grpc.WithInsecure(), - grpc.WithBlock(), - grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name), grpc.MaxCallRecvMsgSize(35000000)), - ) - if err != nil { - return nil, errors.Errorf("fail to dial: %w", err).WithField("address", address) - } - - logtrace.Debug(ctx, "Connected to address with max recv size 35 MB", logtrace.Fields{logtrace.FieldModule: "dd", "address": address}) - - conn := newClientConn(id, grpcConn) - go func() { - //<-conn.Done() // FIXME: to be implemented by new gRPC package - logtrace.Debug(ctx, "Disconnected", logtrace.Fields{logtrace.FieldModule: "dd", "target": grpcConn.Target()}) - }() - return conn, nil -} diff --git a/pkg/dd/config.go b/pkg/dd/config.go deleted file mode 100644 index c0ca0607..00000000 --- a/pkg/dd/config.go +++ /dev/null @@ -1,50 +0,0 @@ -package dd - -import ( - "fmt" - "path/filepath" -) - -const ( - errValidationStr = "ddserver client validation failed - missing val" -) - -// Config contains settings of the dd-server -type Config struct { - // Host the queries IPv4 or IPv6 address - Host string `mapstructure:"host" json:"host,omitempty"` - - // Port the queries port to listen for connections on - Port int `mapstructure:"port" json:"port,omitempty"` - - // DDFilesDir - the location of temporary folder to transfer image data to ddserver - DDFilesDir string `mapstructure:"dd-temp-file-dir" json:"dd-temp-file-dir,omitempty"` -} - -// NewConfig returns a new Config instance. -func NewConfig() *Config { - return &Config{} -} - -// SetWorkDir update working dir -func (config *Config) SetWorkDir(workDir string) { - if !filepath.IsAbs(config.DDFilesDir) { - config.DDFilesDir = filepath.Join(workDir, config.DDFilesDir) - } -} - -// Validate raptorq configs -func (config *Config) Validate() error { - if config.Host == "" { - return fmt.Errorf("%s: %s", errValidationStr, "host") - } - if config.Port == 0 { - return fmt.Errorf("%s: %s", errValidationStr, "port") - } - - if config.DDFilesDir == "" { - return fmt.Errorf("%s: %s", errValidationStr, "dd-temp-file-dir") - } - - return nil -} diff --git a/pkg/dd/connection.go b/pkg/dd/connection.go deleted file mode 100644 index 34f3b20e..00000000 --- a/pkg/dd/connection.go +++ /dev/null @@ -1,23 +0,0 @@ -package dd - -import ( - "google.golang.org/grpc" -) - -// clientConn represents grpc client conneciton. -type clientConn struct { - *grpc.ClientConn - - id string -} - -func (conn *clientConn) DDService(config *Config) DDService { - return newDDServerClient(conn, config) -} - -func newClientConn(id string, conn *grpc.ClientConn) *clientConn { - return &clientConn{ - ClientConn: conn, - id: id, - } -} diff --git a/pkg/dd/dd_mock.go b/pkg/dd/dd_mock.go deleted file mode 100644 index 224831c6..00000000 --- a/pkg/dd/dd_mock.go +++ /dev/null @@ -1,162 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: interfaces.go -// -// Generated by this command: -// -// mockgen -destination=dd_mock.go -package=dd -source=interfaces.go -// - -// Package dd is a generated GoMock package. -package dd - -import ( - context "context" - reflect "reflect" - - gomock "go.uber.org/mock/gomock" -) - -// MockClientInterface is a mock of ClientInterface interface. -type MockClientInterface struct { - ctrl *gomock.Controller - recorder *MockClientInterfaceMockRecorder - isgomock struct{} -} - -// MockClientInterfaceMockRecorder is the mock recorder for MockClientInterface. -type MockClientInterfaceMockRecorder struct { - mock *MockClientInterface -} - -// NewMockClientInterface creates a new mock instance. -func NewMockClientInterface(ctrl *gomock.Controller) *MockClientInterface { - mock := &MockClientInterface{ctrl: ctrl} - mock.recorder = &MockClientInterfaceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockClientInterface) EXPECT() *MockClientInterfaceMockRecorder { - return m.recorder -} - -// Connect mocks base method. -func (m *MockClientInterface) Connect(ctx context.Context, address string) (Connection, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Connect", ctx, address) - ret0, _ := ret[0].(Connection) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Connect indicates an expected call of Connect. -func (mr *MockClientInterfaceMockRecorder) Connect(ctx, address any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connect", reflect.TypeOf((*MockClientInterface)(nil).Connect), ctx, address) -} - -// MockConnection is a mock of Connection interface. -type MockConnection struct { - ctrl *gomock.Controller - recorder *MockConnectionMockRecorder - isgomock struct{} -} - -// MockConnectionMockRecorder is the mock recorder for MockConnection. -type MockConnectionMockRecorder struct { - mock *MockConnection -} - -// NewMockConnection creates a new mock instance. -func NewMockConnection(ctrl *gomock.Controller) *MockConnection { - mock := &MockConnection{ctrl: ctrl} - mock.recorder = &MockConnectionMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockConnection) EXPECT() *MockConnectionMockRecorder { - return m.recorder -} - -// Close mocks base method. -func (m *MockConnection) Close() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Close") - ret0, _ := ret[0].(error) - return ret0 -} - -// Close indicates an expected call of Close. -func (mr *MockConnectionMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockConnection)(nil).Close)) -} - -// DDService mocks base method. -func (m *MockConnection) DDService(config *Config) DDService { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DDService", config) - ret0, _ := ret[0].(DDService) - return ret0 -} - -// DDService indicates an expected call of DDService. -func (mr *MockConnectionMockRecorder) DDService(config any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DDService", reflect.TypeOf((*MockConnection)(nil).DDService), config) -} - -// MockDDService is a mock of DDService interface. -type MockDDService struct { - ctrl *gomock.Controller - recorder *MockDDServiceMockRecorder - isgomock struct{} -} - -// MockDDServiceMockRecorder is the mock recorder for MockDDService. -type MockDDServiceMockRecorder struct { - mock *MockDDService -} - -// NewMockDDService creates a new mock instance. -func NewMockDDService(ctrl *gomock.Controller) *MockDDService { - mock := &MockDDService{ctrl: ctrl} - mock.recorder = &MockDDServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockDDService) EXPECT() *MockDDServiceMockRecorder { - return m.recorder -} - -// GetStatus mocks base method. -func (m *MockDDService) GetStatus(ctx context.Context, req GetStatusRequest) (GetStatusResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetStatus", ctx, req) - ret0, _ := ret[0].(GetStatusResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetStatus indicates an expected call of GetStatus. -func (mr *MockDDServiceMockRecorder) GetStatus(ctx, req any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatus", reflect.TypeOf((*MockDDService)(nil).GetStatus), ctx, req) -} - -// ImageRarenessScore mocks base method. -func (m *MockDDService) ImageRarenessScore(ctx context.Context, req RarenessScoreRequest) (ImageRarenessScoreResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ImageRarenessScore", ctx, req) - ret0, _ := ret[0].(ImageRarenessScoreResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ImageRarenessScore indicates an expected call of ImageRarenessScore. -func (mr *MockDDServiceMockRecorder) ImageRarenessScore(ctx, req any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageRarenessScore", reflect.TypeOf((*MockDDService)(nil).ImageRarenessScore), ctx, req) -} diff --git a/pkg/dd/dd_server_client.go b/pkg/dd/dd_server_client.go deleted file mode 100644 index 5f927805..00000000 --- a/pkg/dd/dd_server_client.go +++ /dev/null @@ -1,24 +0,0 @@ -package dd - -import ( - dd "github.com/LumeraProtocol/supernode/v2/gen/dupedetection" -) - -type ddServerClientImpl struct { - config *Config - conn *clientConn - ddService dd.DupeDetectionServerClient -} - -// NewDDServerClient returns a new dd-server-client instance. -func newDDServerClient(conn *clientConn, c *Config) DDService { - return &ddServerClientImpl{ - config: c, - conn: conn, - ddService: dd.NewDupeDetectionServerClient(conn), - } -} - -func (c *ddServerClientImpl) Close() { - c.conn.Close() -} diff --git a/pkg/dd/image_rareness.go b/pkg/dd/image_rareness.go deleted file mode 100644 index d021da1b..00000000 --- a/pkg/dd/image_rareness.go +++ /dev/null @@ -1,108 +0,0 @@ -package dd - -import ( - "context" - "fmt" - - ddService "github.com/LumeraProtocol/supernode/v2/gen/dupedetection" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/net" -) - -type RarenessScoreRequest struct { - Filepath string -} - -type ImageRarenessScoreResponse struct { - PastelBlockHashWhenRequestSubmitted string - PastelBlockHeightWhenRequestSubmitted string - UtcTimestampWhenRequestSubmitted string - PastelIdOfSubmitter string - PastelIdOfRegisteringSupernode_1 string - PastelIdOfRegisteringSupernode_2 string - PastelIdOfRegisteringSupernode_3 string - IsPastelOpenapiRequest bool - ImageFilePath string - DupeDetectionSystemVersion string - IsLikelyDupe bool - IsRareOnInternet bool - OverallRarenessScore float32 - PctOfTop_10MostSimilarWithDupeProbAbove_25Pct float32 - PctOfTop_10MostSimilarWithDupeProbAbove_33Pct float32 - PctOfTop_10MostSimilarWithDupeProbAbove_50Pct float32 - RarenessScoresTableJsonCompressedB64 string - InternetRareness *ddService.InternetRareness - OpenNsfwScore float32 - AlternativeNsfwScores *ddService.AltNsfwScores - ImageFingerprintOfCandidateImageFile []float64 - CollectionNameString string - HashOfCandidateImageFile string - OpenApiGroupIdString string - GroupRarenessScore float32 - CandidateImageThumbnailWebpAsBase64String string - DoesNotImpactTheFollowingCollectionStrings string - IsInvalidSenseRequest bool - InvalidSenseRequestReason string - SimilarityScoreToFirstEntryInCollection float32 - CpProbability float32 - ChildProbability float32 - ImageFingerprintSetChecksum string -} - -// ImageRarenessScore gets the image rareness score -func (c *ddServerClientImpl) ImageRarenessScore(ctx context.Context, req RarenessScoreRequest) (ImageRarenessScoreResponse, error) { - ctx = net.AddCorrelationID(ctx) - fields := logtrace.Fields{ - logtrace.FieldMethod: "ImageRarenessScore", - logtrace.FieldRequest: req, - } - logtrace.Info(ctx, "getting image rareness score", fields) - - res, err := c.ddService.ImageRarenessScore(ctx, &ddService.RarenessScoreRequest{ImageFilepath: req.Filepath}) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to get image rareness score", fields) - return ImageRarenessScoreResponse{}, fmt.Errorf("dd image rareness score error: %w", err) - } - - logtrace.Info(ctx, "successfully got image rareness score", fields) - return toImageRarenessScoreResponse(res), nil -} - -func toImageRarenessScoreResponse(reply *ddService.ImageRarenessScoreReply) ImageRarenessScoreResponse { - return ImageRarenessScoreResponse{ - PastelBlockHashWhenRequestSubmitted: reply.PastelBlockHashWhenRequestSubmitted, - PastelBlockHeightWhenRequestSubmitted: reply.PastelBlockHeightWhenRequestSubmitted, - UtcTimestampWhenRequestSubmitted: reply.UtcTimestampWhenRequestSubmitted, - PastelIdOfSubmitter: reply.PastelIdOfSubmitter, - PastelIdOfRegisteringSupernode_1: reply.PastelIdOfRegisteringSupernode_1, - PastelIdOfRegisteringSupernode_2: reply.PastelIdOfRegisteringSupernode_2, - PastelIdOfRegisteringSupernode_3: reply.PastelIdOfRegisteringSupernode_3, - IsPastelOpenapiRequest: reply.IsPastelOpenapiRequest, - ImageFilePath: reply.ImageFilePath, - DupeDetectionSystemVersion: reply.DupeDetectionSystemVersion, - IsLikelyDupe: reply.IsLikelyDupe, - IsRareOnInternet: reply.IsRareOnInternet, - OverallRarenessScore: reply.OverallRarenessScore, - PctOfTop_10MostSimilarWithDupeProbAbove_25Pct: reply.PctOfTop_10MostSimilarWithDupeProbAbove_25Pct, - PctOfTop_10MostSimilarWithDupeProbAbove_33Pct: reply.PctOfTop_10MostSimilarWithDupeProbAbove_33Pct, - PctOfTop_10MostSimilarWithDupeProbAbove_50Pct: reply.PctOfTop_10MostSimilarWithDupeProbAbove_50Pct, - RarenessScoresTableJsonCompressedB64: reply.RarenessScoresTableJsonCompressedB64, - InternetRareness: reply.InternetRareness, - OpenNsfwScore: reply.OpenNsfwScore, - AlternativeNsfwScores: reply.AlternativeNsfwScores, - ImageFingerprintOfCandidateImageFile: reply.ImageFingerprintOfCandidateImageFile, - CollectionNameString: reply.CollectionNameString, - HashOfCandidateImageFile: reply.HashOfCandidateImageFile, - OpenApiGroupIdString: reply.OpenApiGroupIdString, - GroupRarenessScore: reply.GroupRarenessScore, - CandidateImageThumbnailWebpAsBase64String: reply.CandidateImageThumbnailWebpAsBase64String, - DoesNotImpactTheFollowingCollectionStrings: reply.DoesNotImpactTheFollowingCollectionStrings, - IsInvalidSenseRequest: reply.IsInvalidSenseRequest, - InvalidSenseRequestReason: reply.InvalidSenseRequestReason, - SimilarityScoreToFirstEntryInCollection: reply.SimilarityScoreToFirstEntryInCollection, - CpProbability: reply.CpProbability, - ChildProbability: reply.ChildProbability, - ImageFingerprintSetChecksum: reply.ImageFingerprintSetChecksum, - } -} diff --git a/pkg/dd/interfaces.go b/pkg/dd/interfaces.go deleted file mode 100644 index 45b196d3..00000000 --- a/pkg/dd/interfaces.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:generate mockgen -destination=dd_mock.go -package=dd -source=interfaces.go - -package dd - -import "context" - -// ClientInterface represents a base connection interface. -type ClientInterface interface { - // Connect connects to the server at the given address. - Connect(ctx context.Context, address string) (Connection, error) -} - -// Connection represents a client connection -type Connection interface { - // Close closes connection. - Close() error - - // DDService returns a new dd-service stream. - DDService(config *Config) DDService - - // FIXME: - // Done returns a channel that's closed when connection is shutdown. - //Done() <-chan struct{} -} - -// DDService contains methods for request services from dd-service. -type DDService interface { - ImageRarenessScore(ctx context.Context, req RarenessScoreRequest) (ImageRarenessScoreResponse, error) - GetStatus(ctx context.Context, req GetStatusRequest) (GetStatusResponse, error) -} diff --git a/pkg/dd/status.go b/pkg/dd/status.go deleted file mode 100644 index fc7f4d30..00000000 --- a/pkg/dd/status.go +++ /dev/null @@ -1,44 +0,0 @@ -package dd - -import ( - "context" - "fmt" - - ddService "github.com/LumeraProtocol/supernode/v2/gen/dupedetection" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/net" -) - -type GetStatusRequest struct { -} - -type GetStatusResponse struct { - Version string - TaskCount *ddService.TaskCount - TaskMetrics *ddService.TaskMetrics -} - -// GetStatus retrieves the status. -func (c *ddServerClientImpl) GetStatus(ctx context.Context, req GetStatusRequest) (GetStatusResponse, error) { - ctx = net.AddCorrelationID(ctx) - - fields := logtrace.Fields{ - logtrace.FieldMethod: "GetStatus", - logtrace.FieldRequest: req, - } - logtrace.Info(ctx, "getting status", fields) - - res, err := c.ddService.GetStatus(ctx, &ddService.GetStatusRequest{}) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to get status", fields) - return GetStatusResponse{}, fmt.Errorf("dd get status error: %w", err) - } - - logtrace.Info(ctx, "successfully got status", fields) - return GetStatusResponse{ - Version: res.GetVersion(), - TaskCount: res.GetTaskCount(), - TaskMetrics: res.GetTaskMetrics(), - }, nil -} diff --git a/sn-manager/internal/github/client.go b/pkg/github/client.go similarity index 96% rename from sn-manager/internal/github/client.go rename to pkg/github/client.go index 70e99d6a..721a02e1 100644 --- a/sn-manager/internal/github/client.go +++ b/pkg/github/client.go @@ -6,6 +6,7 @@ import ( "io" "log" "net/http" + "strings" "time" ) @@ -127,7 +128,7 @@ func (c *Client) ListReleases() ([]*Release, error) { func (c *Client) GetLatestStableRelease() (*Release, error) { // Try the latest release endpoint first (single API call) release, err := c.GetLatestRelease() - if err == nil && !release.Draft && !release.Prerelease { + if err == nil && !release.Draft && !release.Prerelease && !strings.Contains(release.TagName, "-") { return release, nil } @@ -139,7 +140,7 @@ func (c *Client) GetLatestStableRelease() (*Release, error) { // Filter for stable releases (not draft, not prerelease) for _, release := range releases { - if !release.Draft && !release.Prerelease { + if !release.Draft && !release.Prerelease && !strings.Contains(release.TagName, "-") { return release, nil } } diff --git a/pkg/logtrace/datadog.go b/pkg/logtrace/datadog.go new file mode 100644 index 00000000..5c739d12 --- /dev/null +++ b/pkg/logtrace/datadog.go @@ -0,0 +1,208 @@ +package logtrace + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/json" + "net/http" + "os" + "strings" + "sync" + "time" + + "go.uber.org/zap/zapcore" +) + +// Minimal Datadog Logs Forwarder (hard-coded config) kept separate for cleanliness. + +type ddCfg struct { + APIKey string + Site string // e.g. "datadoghq.com", "datadoghq.eu" + Service string // e.g. used as Datadog 'service'; we will set to node IP + Host string // optional; defaults to machine hostname +} + +var ( + ddOnce sync.Once + ddConfig ddCfg + ddClient = &http.Client{Timeout: 5 * time.Second} + ddQueue chan map[string]any + // Optional build-time injection via -ldflags + // -ldflags "-X github.com/LumeraProtocol/supernode/v2/pkg/logtrace.DDAPIKey=... -X github.com/LumeraProtocol/supernode/v2/pkg/logtrace.DDSite=us5.datadoghq.com" + DDAPIKey string + DDSite string +) + +// SetupDatadog initializes the Datadog forwarding once. +func SetupDatadog(service string) { + ddOnce.Do(func() { + initDatadog(service) + }) +} + +// ForwardDatadog enqueues a log line for forwarding (non-blocking). +func ForwardDatadog(level zapcore.Level, ctx context.Context, msg string, fields Fields) { + ddForward(level, ctx, msg, fields) +} + +// SetDatadogService allows setting the Datadog service (e.g., to the node IP) +func SetDatadogService(service string) { + if s := strings.TrimSpace(service); s != "" { + ddConfig.Service = s + } +} + +// SetDatadogHost sets the Datadog host field (use the supernode identity) +func SetDatadogHost(host string) { + if h := strings.TrimSpace(host); h != "" { + ddConfig.Host = h + } +} + +func initDatadog(service string) { + // Base defaults (site default chosen based on earlier validation) + ddConfig = ddCfg{Site: "us5.datadoghq.com", Service: service, Host: ""} + + // Resolve from env and build flags + apiKey := strings.TrimSpace(os.Getenv("DD_API_KEY")) + if apiKey == "" { + apiKey = strings.TrimSpace(DDAPIKey) + } + + site := strings.TrimSpace(os.Getenv("DD_SITE")) + if site == "" { + site = strings.TrimSpace(DDSite) + if site == "" { + site = ddConfig.Site + } + } + + ddConfig.APIKey = apiKey + ddConfig.Site = site + + // Only enable forwarding when a real key is present + if ddConfig.APIKey == "" { + return + } + + ddQueue = make(chan map[string]any, 256) + go ddLoop() +} + +// ddForward enqueues a single log entry for Datadog intake. +func ddForward(level zapcore.Level, ctx context.Context, msg string, fields Fields) { + if ddQueue == nil { + return + } + + // Map zap level to Datadog status + status := "info" + switch level { + case zapcore.DebugLevel: + status = "debug" + case zapcore.InfoLevel: + status = "info" + case zapcore.WarnLevel: + status = "warn" + case zapcore.ErrorLevel: + status = "error" + case zapcore.FatalLevel: + status = "critical" + } + + // Build a compact attributes map + attrs := map[string]any{} + for k, v := range fields { + attrs[k] = v + } + // Attach correlation ID if present + if cid := extractCorrelationID(ctx); cid != "unknown" { + attrs["correlation_id"] = cid + } + // Attach origin/phase if present (first_pass | worker | download) + if o := OriginFromContext(ctx); o != "" { + attrs["origin"] = o + } + + entry := map[string]any{ + "message": msg, + "status": status, + "service": ddConfig.Service, + "host": ddConfig.Host, + "attributes": attrs, // avoid collisions with top-level fields + } + + select { + case ddQueue <- entry: + default: + // drop if queue is full to avoid blocking critical paths + } +} + +// ddLoop batches log entries and sends to Datadog intake. +func ddLoop() { + ticker := time.NewTicker(2 * time.Second) + defer ticker.Stop() + + batch := make([]map[string]any, 0, 32) + flush := func() { + if len(batch) == 0 { + return + } + // Marshal batch + buf := &bytes.Buffer{} + if err := json.NewEncoder(buf).Encode(batch); err != nil { + batch = batch[:0] + return + } + _ = ddPost(buf.Bytes()) + batch = batch[:0] + } + + for { + select { + case e, ok := <-ddQueue: + if !ok { + flush() + return + } + batch = append(batch, e) + if len(batch) >= 32 { + flush() + } + case <-ticker.C: + flush() + } + } +} + +func ddPost(payload []byte) error { + url := "https://http-intake.logs." + strings.TrimSpace(ddConfig.Site) + "/api/v2/logs" + + // gzip the JSON payload + var gzBuf bytes.Buffer + gw := gzip.NewWriter(&gzBuf) + if _, err := gw.Write(payload); err == nil { + _ = gw.Close() + } else { + _ = gw.Close() + gzBuf = *bytes.NewBuffer(payload) + } + + req, err := http.NewRequest(http.MethodPost, url, &gzBuf) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json") + req.Header.Set("Content-Encoding", "gzip") + req.Header.Set("DD-API-KEY", ddConfig.APIKey) + + resp, err := ddClient.Do(req) + if err != nil { + return err + } + _ = resp.Body.Close() + return nil +} diff --git a/pkg/logtrace/fields.go b/pkg/logtrace/fields.go index 8554137b..40e4e5f1 100644 --- a/pkg/logtrace/fields.go +++ b/pkg/logtrace/fields.go @@ -5,6 +5,8 @@ type Fields map[string]interface{} const ( FieldCorrelationID = "correlation_id" + FieldOrigin = "origin" + FieldRole = "role" FieldMethod = "method" FieldModule = "module" FieldError = "error" diff --git a/pkg/logtrace/log.go b/pkg/logtrace/log.go index 02b8f36e..6e27b020 100644 --- a/pkg/logtrace/log.go +++ b/pkg/logtrace/log.go @@ -16,7 +16,13 @@ type ContextKey string // CorrelationIDKey is the key for storing correlation ID in context const CorrelationIDKey ContextKey = "correlation_id" -var logger *zap.Logger +// OriginKey marks which phase produced the log (first_pass | worker | download) +const OriginKey ContextKey = "origin" + +var ( + logger *zap.Logger + minLevel zapcore.Level = zapcore.InfoLevel // effective minimum log level +) // Setup initializes the logger for readable output in all modes. func Setup(serviceName string) { @@ -34,7 +40,11 @@ func Setup(serviceName string) { config.DisableStacktrace = true // Always respect the LOG_LEVEL environment variable. - config.Level = zap.NewAtomicLevelAt(getLogLevel()) + lvl := getLogLevel() + config.Level = zap.NewAtomicLevelAt(lvl) + // Persist the effective minimum so non-core sinks (e.g., Datadog) can + // filter entries consistently with the console logger. + minLevel = lvl // Build the logger from the customized config. if tracingEnabled { @@ -45,6 +55,9 @@ func Setup(serviceName string) { if err != nil { panic(err) } + + // Initialize Datadog forwarding (minimal integration in separate file) + SetupDatadog(serviceName) } // getLogLevel returns the log level from environment variable LOG_LEVEL @@ -76,6 +89,27 @@ func CtxWithCorrelationID(ctx context.Context, correlationID string) context.Con return context.WithValue(ctx, CorrelationIDKey, correlationID) } +// CorrelationIDFromContext returns the correlation ID from context or "unknown". +func CorrelationIDFromContext(ctx context.Context) string { + return extractCorrelationID(ctx) +} + +// CtxWithOrigin stores a phase/origin tag in context +func CtxWithOrigin(ctx context.Context, origin string) context.Context { + if origin == "" { + return ctx + } + return context.WithValue(ctx, OriginKey, origin) +} + +// OriginFromContext returns the origin tag from context or "" +func OriginFromContext(ctx context.Context) string { + if v, ok := ctx.Value(OriginKey).(string); ok { + return v + } + return "" +} + // extractCorrelationID retrieves the correlation ID from context func extractCorrelationID(ctx context.Context) string { if correlationID, ok := ctx.Value(CorrelationIDKey).(string); ok { @@ -90,12 +124,10 @@ func logWithLevel(level zapcore.Level, ctx context.Context, message string, fiel Setup("unknown-service") // Fallback if Setup wasn't called } - // Always enrich logs with the correlation ID. - // allFields := make(Fields, len(fields)+1) - // for k, v := range fields { - // allFields[k] = v - // } - // allFields[FieldCorrelationID] = extractCorrelationID(ctx) + // Drop early if below the configured level (keeps Datadog in sync) + if !logger.Core().Enabled(level) { + return + } // Convert the map to a slice of zap.Field zapFields := make([]zap.Field, 0, len(fields)) @@ -116,18 +148,19 @@ func logWithLevel(level zapcore.Level, ctx context.Context, message string, fiel } } - // Log with the structured fields. - switch level { - case zapcore.DebugLevel: - logger.Debug(message, zapFields...) - case zapcore.InfoLevel: - logger.Info(message, zapFields...) - case zapcore.WarnLevel: - logger.Warn(message, zapFields...) - case zapcore.ErrorLevel: - logger.Error(message, zapFields...) - case zapcore.FatalLevel: - logger.Fatal(message, zapFields...) + // Log with the structured fields using a level check/write + if ce := logger.Check(level, message); ce != nil { + ce.Write(zapFields...) + } else { + // Should not happen due to early Enabled check, but guard anyway + return + } + + // Forward to Datadog (non-blocking, best-effort) only if level is enabled + // for the current configuration. This prevents forwarding debug entries + // when the logger is configured for info and above. + if level >= minLevel { + ForwardDatadog(level, ctx, message, fields) } } diff --git a/pkg/lumera/client.go b/pkg/lumera/client.go index bac35d68..2e25877c 100644 --- a/pkg/lumera/client.go +++ b/pkg/lumera/client.go @@ -2,10 +2,12 @@ package lumera import ( "context" + "fmt" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action_msg" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/auth" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/bank" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/node" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx" @@ -16,6 +18,7 @@ type lumeraClient struct { authMod auth.Module actionMod action.Module actionMsgMod action_msg.Module + bankMod bank.Module supernodeMod supernode.Module txMod tx.Module nodeMod node.Module @@ -53,12 +56,30 @@ func newClient(ctx context.Context, cfg *Config) (Client, error) { return nil, err } + bankModule, err := bank.NewModule(conn.GetConn()) + if err != nil { + conn.Close() + return nil, err + } + nodeModule, err := node.NewModule(conn.GetConn(), cfg.keyring) if err != nil { conn.Close() return nil, err } + // Preflight: verify configured ChainID matches node's reported network + if nodeInfo, nerr := nodeModule.GetNodeInfo(ctx); nerr != nil { + conn.Close() + return nil, fmt.Errorf("failed to get node info for chain verification: %w", nerr) + } else if nodeInfo != nil && nodeInfo.DefaultNodeInfo != nil { + // Cosmos SDK exposes chain-id in DefaultNodeInfo.Network + if reported := nodeInfo.DefaultNodeInfo.Network; reported != "" && reported != cfg.ChainID { + conn.Close() + return nil, fmt.Errorf("chain ID mismatch: configured=%s node=%s", cfg.ChainID, reported) + } + } + actionMsgModule, err := action_msg.NewModule( conn.GetConn(), authModule, // For account info @@ -77,6 +98,7 @@ func newClient(ctx context.Context, cfg *Config) (Client, error) { authMod: authModule, actionMod: actionModule, actionMsgMod: actionMsgModule, + bankMod: bankModule, supernodeMod: supernodeModule, txMod: txModule, nodeMod: nodeModule, @@ -96,6 +118,10 @@ func (c *lumeraClient) ActionMsg() action_msg.Module { return c.actionMsgMod } +func (c *lumeraClient) Bank() bank.Module { + return c.bankMod +} + func (c *lumeraClient) SuperNode() supernode.Module { return c.supernodeMod } diff --git a/pkg/lumera/connection.go b/pkg/lumera/connection.go index ab28702c..8abdc0f5 100644 --- a/pkg/lumera/connection.go +++ b/pkg/lumera/connection.go @@ -14,8 +14,6 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" - "os" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" ) @@ -127,14 +125,11 @@ func newGRPCConnection(ctx context.Context, rawAddr string) (Connection, error) if firstCand.useTLS { scheme = "tls" } - logtrace.Info(ctx, "gRPC connection established", logtrace.Fields{ + logtrace.Debug(ctx, "gRPC connection established", logtrace.Fields{ "target": firstCand.target, "scheme": scheme, }) - // Start a monitor to terminate the app if connection is lost - go monitorConnection(ctx, firstConn) - return &grpcConnection{conn: firstConn}, nil } @@ -275,35 +270,6 @@ func createGRPCConnection(ctx context.Context, hostPort string, creds credential } } -// monitorConnection watches the connection state and exits the process if the -// connection transitions to Shutdown or remains in TransientFailure beyond a grace period. -func monitorConnection(ctx context.Context, conn *grpc.ClientConn) { - for { - state := conn.GetState() - switch state { - case connectivity.Shutdown: - logtrace.Error(ctx, "gRPC connection shutdown", logtrace.Fields{"action": "exit"}) - os.Exit(1) - case connectivity.TransientFailure: - // Allow some time to recover to Ready - gctx, cancel := context.WithTimeout(ctx, reconnectionGracePeriod) - for conn.GetState() == connectivity.TransientFailure { - if !conn.WaitForStateChange(gctx, connectivity.TransientFailure) { - cancel() - logtrace.Error(ctx, "gRPC connection lost (transient failure)", logtrace.Fields{"grace": reconnectionGracePeriod.String(), "action": "exit"}) - os.Exit(1) - } - } - cancel() - default: - // Idle/Connecting/Ready: just wait for state change - if !conn.WaitForStateChange(ctx, state) { - return - } - } - } -} - // Close closes the gRPC connection. func (c *grpcConnection) Close() error { if c.conn != nil { diff --git a/pkg/lumera/interface.go b/pkg/lumera/interface.go index eba47684..2fb25c13 100644 --- a/pkg/lumera/interface.go +++ b/pkg/lumera/interface.go @@ -7,6 +7,7 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action_msg" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/auth" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/bank" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/node" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx" @@ -18,6 +19,7 @@ type Client interface { Action() action.Module ActionMsg() action_msg.Module SuperNode() supernode.Module + Bank() bank.Module Tx() tx.Module Node() node.Module diff --git a/pkg/lumera/lumera_mock.go b/pkg/lumera/lumera_mock.go index 25d30789..e19ddfdb 100644 --- a/pkg/lumera/lumera_mock.go +++ b/pkg/lumera/lumera_mock.go @@ -15,6 +15,7 @@ import ( action "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action" action_msg "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action_msg" auth "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/auth" + bank "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/bank" node "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/node" supernode "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" tx "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx" @@ -87,6 +88,20 @@ func (mr *MockClientMockRecorder) Auth() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Auth", reflect.TypeOf((*MockClient)(nil).Auth)) } +// Bank mocks base method. +func (m *MockClient) Bank() bank.Module { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Bank") + ret0, _ := ret[0].(bank.Module) + return ret0 +} + +// Bank indicates an expected call of Bank. +func (mr *MockClientMockRecorder) Bank() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bank", reflect.TypeOf((*MockClient)(nil).Bank)) +} + // Close mocks base method. func (m *MockClient) Close() error { m.ctrl.T.Helper() diff --git a/pkg/lumera/modules/action_msg/helpers.go b/pkg/lumera/modules/action_msg/helpers.go index 6de5fb9f..a1fb7e71 100644 --- a/pkg/lumera/modules/action_msg/helpers.go +++ b/pkg/lumera/modules/action_msg/helpers.go @@ -1,58 +1,57 @@ package action_msg import ( - "fmt" - "strconv" - "time" + "encoding/json" + "fmt" + "strconv" + "time" - actionapi "github.com/LumeraProtocol/lumera/api/lumera/action" - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera/util" - "google.golang.org/protobuf/encoding/protojson" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/util" ) func validateRequestActionParams(actionType, metadata, price, expirationTime string) error { - if actionType == "" { - return fmt.Errorf("action type cannot be empty") - } - if metadata == "" { - return fmt.Errorf("metadata cannot be empty") - } - if price == "" { - return fmt.Errorf("price cannot be empty") - } - // Validate price: must be integer coin in ulume (e.g., "1000ulume") - if err := util.ValidateUlumeIntCoin(price); err != nil { - return fmt.Errorf("invalid price: %w", err) - } - if expirationTime == "" { - return fmt.Errorf("expiration time cannot be empty") - } - // Validate expiration is a future unix timestamp - exp, err := strconv.ParseInt(expirationTime, 10, 64) - if err != nil { - return fmt.Errorf("invalid expirationTime: %w", err) - } - // Allow small clock skew; require strictly in the future - if exp <= time.Now().Add(30*time.Second).Unix() { - return fmt.Errorf("expiration time must be in the future") - } - return nil + if actionType == "" { + return fmt.Errorf("action type cannot be empty") + } + if metadata == "" { + return fmt.Errorf("metadata cannot be empty") + } + if price == "" { + return fmt.Errorf("price cannot be empty") + } + // Validate price: must be integer coin in ulume (e.g., "1000ulume") + if err := util.ValidateUlumeIntCoin(price); err != nil { + return fmt.Errorf("invalid price: %w", err) + } + if expirationTime == "" { + return fmt.Errorf("expiration time cannot be empty") + } + // Validate expiration is a future unix timestamp + exp, err := strconv.ParseInt(expirationTime, 10, 64) + if err != nil { + return fmt.Errorf("invalid expirationTime: %w", err) + } + // Allow small clock skew; require strictly in the future + if exp <= time.Now().Add(30*time.Second).Unix() { + return fmt.Errorf("expiration time must be in the future") + } + return nil } func validateFinalizeActionParams(actionId string, rqIdsIds []string) error { - if actionId == "" { - return fmt.Errorf("action ID cannot be empty") - } - if len(rqIdsIds) == 0 { - return fmt.Errorf("rq_ids_ids cannot be empty for cascade action") - } - for i, s := range rqIdsIds { - if s == "" { - return fmt.Errorf("rq_ids_ids[%d] cannot be empty", i) - } - } - return nil + if actionId == "" { + return fmt.Errorf("action ID cannot be empty") + } + if len(rqIdsIds) == 0 { + return fmt.Errorf("rq_ids_ids cannot be empty for cascade action") + } + for i, s := range rqIdsIds { + if s == "" { + return fmt.Errorf("rq_ids_ids[%d] cannot be empty", i) + } + } + return nil } func createRequestActionMessage(creator, actionType, metadata, price, expirationTime string) *actiontypes.MsgRequestAction { @@ -66,11 +65,11 @@ func createRequestActionMessage(creator, actionType, metadata, price, expiration } func createFinalizeActionMessage(creator, actionId string, rqIdsIds []string) (*actiontypes.MsgFinalizeAction, error) { - cascadeMeta := actionapi.CascadeMetadata{ + cascadeMeta := actiontypes.CascadeMetadata{ RqIdsIds: rqIdsIds, } - metadataBytes, err := protojson.Marshal(&cascadeMeta) + metadataBytes, err := json.Marshal(&cascadeMeta) if err != nil { return nil, fmt.Errorf("failed to marshal cascade metadata: %w", err) } diff --git a/pkg/lumera/modules/auth/impl.go b/pkg/lumera/modules/auth/impl.go index 3597d7a9..4304e2dd 100644 --- a/pkg/lumera/modules/auth/impl.go +++ b/pkg/lumera/modules/auth/impl.go @@ -45,8 +45,7 @@ func (m *module) Verify(ctx context.Context, accAddress string, data, signature if err != nil { return fmt.Errorf("invalid address: %w", err) } - - logtrace.Info(ctx, "Verifying signature", logtrace.Fields{"address": addr.String()}) + logtrace.Info(ctx, "auth: verify signature start", logtrace.Fields{"address": addr.String()}) // Use Account RPC instead of AccountInfo to get the full account with public key accResp, err := m.client.Account(ctx, &authtypes.QueryAccountRequest{ @@ -66,10 +65,10 @@ func (m *module) Verify(ctx context.Context, accAddress string, data, signature if pubKey == nil { return fmt.Errorf("public key is nil") } - logtrace.Info(ctx, "Public key retrieved", logtrace.Fields{"pubKey": pubKey.String()}) + logtrace.Info(ctx, "auth: public key loaded", logtrace.Fields{"address": addr.String()}) if !pubKey.VerifySignature(data, signature) { return fmt.Errorf("invalid signature") } - + logtrace.Info(ctx, "auth: verify signature ok", logtrace.Fields{"address": addr.String()}) return nil } diff --git a/pkg/lumera/modules/bank/impl.go b/pkg/lumera/modules/bank/impl.go new file mode 100644 index 00000000..157eb97f --- /dev/null +++ b/pkg/lumera/modules/bank/impl.go @@ -0,0 +1,30 @@ +package bank + +import ( + "context" + "fmt" + + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + "google.golang.org/grpc" +) + +type module struct { + client banktypes.QueryClient +} + +func newModule(conn *grpc.ClientConn) (Module, error) { + if conn == nil { + return nil, fmt.Errorf("connection cannot be nil") + } + return &module{client: banktypes.NewQueryClient(conn)}, nil +} + +func (m *module) Balance(ctx context.Context, address string, denom string) (*banktypes.QueryBalanceResponse, error) { + if address == "" { + return nil, fmt.Errorf("address cannot be empty") + } + if denom == "" { + return nil, fmt.Errorf("denom cannot be empty") + } + return m.client.Balance(ctx, &banktypes.QueryBalanceRequest{Address: address, Denom: denom}) +} diff --git a/pkg/lumera/modules/bank/interface.go b/pkg/lumera/modules/bank/interface.go new file mode 100644 index 00000000..b88093cf --- /dev/null +++ b/pkg/lumera/modules/bank/interface.go @@ -0,0 +1,18 @@ +//go:generate mockgen -destination=bank_mock.go -package=bank -source=interface.go +package bank + +import ( + "context" + + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + "google.golang.org/grpc" +) + +// Module provides access to Cosmos SDK bank queries. +type Module interface { + // Balance returns the balance for a specific denom at an address. + Balance(ctx context.Context, address string, denom string) (*banktypes.QueryBalanceResponse, error) +} + +// NewModule constructs a bank Module backed by the given gRPC connection. +func NewModule(conn *grpc.ClientConn) (Module, error) { return newModule(conn) } diff --git a/pkg/lumera/modules/supernode/impl.go b/pkg/lumera/modules/supernode/impl.go index d0b633a8..93e2d7e0 100644 --- a/pkg/lumera/modules/supernode/impl.go +++ b/pkg/lumera/modules/supernode/impl.go @@ -26,11 +26,12 @@ func newModule(conn *grpc.ClientConn) (Module, error) { }, nil } -// GetTopSuperNodesForBlock gets the top supernodes for a specific block height -func (m *module) GetTopSuperNodesForBlock(ctx context.Context, blockHeight uint64) (*types.QueryGetTopSuperNodesForBlockResponse, error) { - resp, err := m.client.GetTopSuperNodesForBlock(ctx, &types.QueryGetTopSuperNodesForBlockRequest{ - BlockHeight: int32(blockHeight), - }) +// GetTopSuperNodesForBlock gets the top supernodes for a specific request +func (m *module) GetTopSuperNodesForBlock(ctx context.Context, req *types.QueryGetTopSuperNodesForBlockRequest) (*types.QueryGetTopSuperNodesForBlockResponse, error) { + if req == nil { + return nil, fmt.Errorf("request cannot be nil") + } + resp, err := m.client.GetTopSuperNodesForBlock(ctx, req) if err != nil { return nil, fmt.Errorf("failed to get top supernodes: %w", err) } diff --git a/pkg/lumera/modules/supernode/interface.go b/pkg/lumera/modules/supernode/interface.go index 0b3ed7d8..2dc70a29 100644 --- a/pkg/lumera/modules/supernode/interface.go +++ b/pkg/lumera/modules/supernode/interface.go @@ -19,7 +19,7 @@ type SuperNodeInfo struct { // Module defines the interface for interacting with the supernode module type Module interface { - GetTopSuperNodesForBlock(ctx context.Context, blockHeight uint64) (*types.QueryGetTopSuperNodesForBlockResponse, error) + GetTopSuperNodesForBlock(ctx context.Context, req *types.QueryGetTopSuperNodesForBlockRequest) (*types.QueryGetTopSuperNodesForBlockResponse, error) GetSuperNode(ctx context.Context, address string) (*types.QueryGetSuperNodeResponse, error) GetSupernodeBySupernodeAddress(ctx context.Context, address string) (*types.SuperNode, error) GetSupernodeWithLatestAddress(ctx context.Context, address string) (*SuperNodeInfo, error) diff --git a/pkg/lumera/modules/supernode/supernode_mock.go b/pkg/lumera/modules/supernode/supernode_mock.go index a4247cba..9a0899de 100644 --- a/pkg/lumera/modules/supernode/supernode_mock.go +++ b/pkg/lumera/modules/supernode/supernode_mock.go @@ -102,16 +102,16 @@ func (mr *MockModuleMockRecorder) GetSupernodeWithLatestAddress(ctx, address any } // GetTopSuperNodesForBlock mocks base method. -func (m *MockModule) GetTopSuperNodesForBlock(ctx context.Context, blockHeight uint64) (*types.QueryGetTopSuperNodesForBlockResponse, error) { +func (m *MockModule) GetTopSuperNodesForBlock(ctx context.Context, req *types.QueryGetTopSuperNodesForBlockRequest) (*types.QueryGetTopSuperNodesForBlockResponse, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTopSuperNodesForBlock", ctx, blockHeight) + ret := m.ctrl.Call(m, "GetTopSuperNodesForBlock", ctx, req) ret0, _ := ret[0].(*types.QueryGetTopSuperNodesForBlockResponse) ret1, _ := ret[1].(error) return ret0, ret1 } // GetTopSuperNodesForBlock indicates an expected call of GetTopSuperNodesForBlock. -func (mr *MockModuleMockRecorder) GetTopSuperNodesForBlock(ctx, blockHeight any) *gomock.Call { +func (mr *MockModuleMockRecorder) GetTopSuperNodesForBlock(ctx, req any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTopSuperNodesForBlock", reflect.TypeOf((*MockModule)(nil).GetTopSuperNodesForBlock), ctx, blockHeight) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTopSuperNodesForBlock", reflect.TypeOf((*MockModule)(nil).GetTopSuperNodesForBlock), ctx, req) } diff --git a/pkg/lumera/modules/tx/impl.go b/pkg/lumera/modules/tx/impl.go index d342601b..6ac625ca 100644 --- a/pkg/lumera/modules/tx/impl.go +++ b/pkg/lumera/modules/tx/impl.go @@ -5,7 +5,6 @@ import ( "fmt" "math" "strconv" - "time" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" lumeracodec "github.com/LumeraProtocol/supernode/v2/pkg/lumera/codec" @@ -48,6 +47,18 @@ func newModule(conn *grpc.ClientConn) (Module, error) { // SimulateTransaction simulates a transaction with given messages and returns gas used func (m *module) SimulateTransaction(ctx context.Context, msgs []types.Msg, accountInfo *authtypes.BaseAccount, config *TxConfig) (*sdktx.SimulateResponse, error) { + if config == nil { + return nil, fmt.Errorf("tx config cannot be nil") + } + if accountInfo == nil { + return nil, fmt.Errorf("account info cannot be nil") + } + if config.Keyring == nil { + return nil, fmt.Errorf("keyring cannot be nil") + } + if config.KeyName == "" { + return nil, fmt.Errorf("key name cannot be empty") + } // Create encoding config and client context encCfg := lumeracodec.GetEncodingConfig() clientCtx := client.Context{}. @@ -103,12 +114,24 @@ func (m *module) SimulateTransaction(ctx context.Context, msgs []types.Msg, acco return nil, fmt.Errorf("simulation error: %w", err) } - logtrace.Info(ctx, fmt.Sprintf("simulation complete | gasUsed=%d", simRes.GasInfo.GasUsed), nil) + logtrace.Debug(ctx, fmt.Sprintf("simulation complete | gasUsed=%d", simRes.GasInfo.GasUsed), nil) return simRes, nil } // BuildAndSignTransaction builds and signs a transaction with the given parameters func (m *module) BuildAndSignTransaction(ctx context.Context, msgs []types.Msg, accountInfo *authtypes.BaseAccount, gasLimit uint64, fee string, config *TxConfig) ([]byte, error) { + if config == nil { + return nil, fmt.Errorf("tx config cannot be nil") + } + if accountInfo == nil { + return nil, fmt.Errorf("account info cannot be nil") + } + if config.Keyring == nil { + return nil, fmt.Errorf("keyring cannot be nil") + } + if config.KeyName == "" { + return nil, fmt.Errorf("key name cannot be empty") + } // Create encoding config encCfg := lumeracodec.GetEncodingConfig() @@ -116,10 +139,9 @@ func (m *module) BuildAndSignTransaction(ctx context.Context, msgs []types.Msg, clientCtx := client.Context{}. WithCodec(encCfg.Codec). WithTxConfig(encCfg.TxConfig). - WithKeyring(config.Keyring). - WithBroadcastMode("sync") + WithKeyring(config.Keyring) - // Create transaction factory + // Create transaction factory factory := tx.Factory{}. WithTxConfig(clientCtx.TxConfig). WithKeybase(config.Keyring). @@ -127,7 +149,6 @@ func (m *module) BuildAndSignTransaction(ctx context.Context, msgs []types.Msg, WithSequence(accountInfo.Sequence). WithChainID(config.ChainID). WithGas(gasLimit). - WithGasAdjustment(config.GasAdjustment). WithSignMode(signingtypes.SignMode_SIGN_MODE_DIRECT). WithFees(fee) @@ -143,7 +164,7 @@ func (m *module) BuildAndSignTransaction(ctx context.Context, msgs []types.Msg, return nil, fmt.Errorf("failed to sign transaction: %w", err) } - logtrace.Info(ctx, "transaction signed successfully", nil) + logtrace.Debug(ctx, "transaction signed successfully", nil) // Encode signed transaction txBytes, err := clientCtx.TxConfig.TxEncoder()(txBuilder.GetTx()) @@ -157,10 +178,7 @@ func (m *module) BuildAndSignTransaction(ctx context.Context, msgs []types.Msg, // BroadcastTransaction broadcasts a signed transaction and returns the result func (m *module) BroadcastTransaction(ctx context.Context, txBytes []byte) (*sdktx.BroadcastTxResponse, error) { // Broadcast transaction - req := &sdktx.BroadcastTxRequest{ - TxBytes: txBytes, - Mode: sdktx.BroadcastMode_BROADCAST_MODE_SYNC, - } + req := &sdktx.BroadcastTxRequest{TxBytes: txBytes, Mode: sdktx.BroadcastMode_BROADCAST_MODE_SYNC} resp, err := m.client.BroadcastTx(ctx, req) @@ -273,7 +291,7 @@ func (m *module) ProcessTransaction(ctx context.Context, msgs []types.Msg, accou // Step 3: Calculate fee based on adjusted gas fee := m.CalculateFee(gasToUse, config) - logtrace.Info(ctx, fmt.Sprintf("using simulated gas and calculated fee | simulatedGas=%d adjustedGas=%d fee=%s", simulatedGasUsed, gasToUse, fee), nil) + logtrace.Debug(ctx, fmt.Sprintf("using simulated gas and calculated fee | simulatedGas=%d gasToUse=%d fee=%s", simulatedGasUsed, gasToUse, fee), nil) // Step 4: Build and sign transaction txBytes, err := m.BuildAndSignTransaction(ctx, msgs, accountInfo, gasToUse, fee, config) @@ -281,38 +299,12 @@ func (m *module) ProcessTransaction(ctx context.Context, msgs []types.Msg, accou return nil, fmt.Errorf("failed to build and sign transaction: %w", err) } - // Step 5: Broadcast transaction + // Step 5: Broadcast transaction (SYNC mode) result, err := m.BroadcastTransaction(ctx, txBytes) if err != nil { return result, fmt.Errorf("failed to broadcast transaction: %w", err) } - if result != nil && result.TxResponse != nil && result.TxResponse.Code == 0 && len(result.TxResponse.Events) == 0 { - logtrace.Info(ctx, "Transaction broadcast successful, waiting for inclusion to get events...", nil) - - // Retry 5 times with 1 second intervals - var txResp *sdktx.GetTxResponse - for i := 0; i < 5; i++ { - time.Sleep(1 * time.Second) - - txResp, err = m.GetTransaction(ctx, result.TxResponse.TxHash) - if err == nil && txResp != nil && txResp.TxResponse != nil { - // Successfully got the transaction with events - logtrace.Info(ctx, fmt.Sprintf("Retrieved transaction with %d events", len(txResp.TxResponse.Events)), nil) - result.TxResponse = txResp.TxResponse - break - } - - if err != nil { - logtrace.Warn(ctx, fmt.Sprintf("Attempt %d: failed to query transaction: %v", i+1, err), nil) - } - } - } - - if len(result.TxResponse.Events) == 0 { - logtrace.Error(ctx, "Failed to retrieve transaction events after 5 attempts", nil) - } - return result, nil } diff --git a/pkg/lumera/util/coin.go b/pkg/lumera/util/coin.go index 561f5560..6632c1fe 100644 --- a/pkg/lumera/util/coin.go +++ b/pkg/lumera/util/coin.go @@ -1,35 +1,34 @@ package util import ( - "fmt" - "strings" + "fmt" + "strings" ) // ValidateUlumeIntCoin checks that the input is a positive integer amount // with the 'ulume' denom, e.g., "1000ulume". It keeps validation simple // without pulling in SDK dependencies. func ValidateUlumeIntCoin(s string) error { - const denom = "ulume" - if !strings.HasSuffix(s, denom) { - return fmt.Errorf("denom must be '%s'", denom) - } - num := s[:len(s)-len(denom)] - if num == "" { - return fmt.Errorf("amount is required before denom") - } - // must be all digits, no leading +/-, no decimals - var val uint64 - for i := 0; i < len(num); i++ { - c := num[i] - if c < '0' || c > '9' { - return fmt.Errorf("amount must be an integer number") - } - // simple overflow-safe accumulation for uint64 - val = val*10 + uint64(c-'0') - } - if val == 0 { - return fmt.Errorf("amount must be greater than zero") - } - return nil + const denom = "ulume" + if !strings.HasSuffix(s, denom) { + return fmt.Errorf("denom must be '%s'", denom) + } + num := s[:len(s)-len(denom)] + if num == "" { + return fmt.Errorf("amount is required before denom") + } + // must be all digits, no leading +/-, no decimals + var val uint64 + for i := 0; i < len(num); i++ { + c := num[i] + if c < '0' || c > '9' { + return fmt.Errorf("amount must be an integer number") + } + // simple overflow-safe accumulation for uint64 + val = val*10 + uint64(c-'0') + } + if val == 0 { + return fmt.Errorf("amount must be greater than zero") + } + return nil } - diff --git a/pkg/net/grpc/server/server.go b/pkg/net/grpc/server/server.go index 64dfe0f2..ae1a3524 100644 --- a/pkg/net/grpc/server/server.go +++ b/pkg/net/grpc/server/server.go @@ -203,7 +203,7 @@ func (s *Server) createListener(ctx context.Context, address string) (net.Listen if err != nil { return nil, errors.Errorf("failed to create listener: %w", err).WithField("address", address) } - logtrace.Info(ctx, "gRPC server listening", logtrace.Fields{"address": address}) + logtrace.Debug(ctx, "gRPC server listening", logtrace.Fields{"address": address}) return lis, nil } @@ -256,7 +256,7 @@ func (s *Server) Serve(ctx context.Context, address string, opts *ServerOptions) // Wait for context cancellation or error select { case <-ctx.Done(): - logtrace.Info(ctx, "Shutting down gRPC server", logtrace.Fields{"address": address}) + logtrace.Debug(ctx, "Shutting down gRPC server", logtrace.Fields{"address": address}) return s.Stop(opts.GracefulShutdownTime) case err := <-serveErr: return err diff --git a/pkg/net/interceptor.go b/pkg/net/interceptor.go index f29d88a1..b33aadcf 100644 --- a/pkg/net/interceptor.go +++ b/pkg/net/interceptor.go @@ -34,7 +34,7 @@ func UnaryServerInterceptor() grpc.UnaryServerInterceptor { logtrace.FieldMethod: info.FullMethod, logtrace.FieldCorrelationID: correlationID, } - logtrace.Info(ctx, "received gRPC request", fields) + logtrace.Debug(ctx, "received gRPC request", fields) resp, err := handler(ctx, req) @@ -42,7 +42,7 @@ func UnaryServerInterceptor() grpc.UnaryServerInterceptor { fields[logtrace.FieldError] = err.Error() logtrace.Error(ctx, "gRPC request failed", fields) } else { - logtrace.Info(ctx, "gRPC request processed successfully", fields) + logtrace.Debug(ctx, "gRPC request processed successfully", fields) } return resp, err diff --git a/pkg/p2pmetrics/metrics.go b/pkg/p2pmetrics/metrics.go deleted file mode 100644 index 165f0eaa..00000000 --- a/pkg/p2pmetrics/metrics.go +++ /dev/null @@ -1,397 +0,0 @@ -package p2pmetrics - -import ( - "context" - "sync" -) - -// Call represents a single per-node RPC outcome (store or retrieve). -type Call struct { - IP string `json:"ip"` - Address string `json:"address"` - Keys int `json:"keys"` - Success bool `json:"success"` - Error string `json:"error,omitempty"` - DurationMS int64 `json:"duration_ms"` - Noop bool `json:"noop,omitempty"` -} - -// -------- Lightweight hooks ------------------------- - -var ( - storeMu sync.RWMutex - storeHook = make(map[string]func(Call)) - - retrieveMu sync.RWMutex - retrieveHook = make(map[string]func(Call)) - - foundLocalMu sync.RWMutex - foundLocalCb = make(map[string]func(int)) -) - -// RegisterStoreHook registers a callback to receive store RPC calls for a task. -func RegisterStoreHook(taskID string, fn func(Call)) { - storeMu.Lock() - defer storeMu.Unlock() - if fn == nil { - delete(storeHook, taskID) - return - } - storeHook[taskID] = fn -} - -// UnregisterStoreHook removes the registered store callback for a task. -func UnregisterStoreHook(taskID string) { RegisterStoreHook(taskID, nil) } - -// RecordStore invokes the registered store callback for the given task, if any. -func RecordStore(taskID string, c Call) { - storeMu.RLock() - fn := storeHook[taskID] - storeMu.RUnlock() - if fn != nil { - fn(c) - } -} - -// RegisterRetrieveHook registers a callback to receive retrieve RPC calls. -func RegisterRetrieveHook(taskID string, fn func(Call)) { - retrieveMu.Lock() - defer retrieveMu.Unlock() - if fn == nil { - delete(retrieveHook, taskID) - return - } - retrieveHook[taskID] = fn -} - -// UnregisterRetrieveHook removes the registered retrieve callback for a task. -func UnregisterRetrieveHook(taskID string) { RegisterRetrieveHook(taskID, nil) } - -// RecordRetrieve invokes the registered retrieve callback for the given task. -func RecordRetrieve(taskID string, c Call) { - retrieveMu.RLock() - fn := retrieveHook[taskID] - retrieveMu.RUnlock() - if fn != nil { - fn(c) - } -} - -// RegisterFoundLocalHook registers a callback to receive found-local counts. -func RegisterFoundLocalHook(taskID string, fn func(int)) { - foundLocalMu.Lock() - defer foundLocalMu.Unlock() - if fn == nil { - delete(foundLocalCb, taskID) - return - } - foundLocalCb[taskID] = fn -} - -// UnregisterFoundLocalHook removes the registered found-local callback. -func UnregisterFoundLocalHook(taskID string) { RegisterFoundLocalHook(taskID, nil) } - -// ReportFoundLocal invokes the registered found-local callback for the task. -func ReportFoundLocal(taskID string, count int) { - foundLocalMu.RLock() - fn := foundLocalCb[taskID] - foundLocalMu.RUnlock() - if fn != nil { - fn(count) - } -} - -// -------- Minimal in-process collectors for events -------------------------- - -// Store session -type storeSession struct { - CallsByIP map[string][]Call - SymbolsFirstPass int - SymbolsTotal int - IDFilesCount int - DurationMS int64 -} - -var storeSessions = struct{ m map[string]*storeSession }{m: map[string]*storeSession{}} - -// RegisterStoreBridge hooks store callbacks into the store session collector. -func StartStoreCapture(taskID string) { - RegisterStoreHook(taskID, func(c Call) { - s := storeSessions.m[taskID] - if s == nil { - s = &storeSession{CallsByIP: map[string][]Call{}} - storeSessions.m[taskID] = s - } - key := c.IP - if key == "" { - key = c.Address - } - s.CallsByIP[key] = append(s.CallsByIP[key], c) - }) -} - -func StopStoreCapture(taskID string) { UnregisterStoreHook(taskID) } - -// SetStoreSummary sets store summary fields for the first pass and totals. -// -// - symbolsFirstPass: number of symbols sent during the first pass -// - symbolsTotal: total symbols available in the directory -// - idFilesCount: number of ID/metadata files included in the first combined batch -// - durationMS: elapsed time of the first-pass store phase -func SetStoreSummary(taskID string, symbolsFirstPass, symbolsTotal, idFilesCount int, durationMS int64) { - if taskID == "" { - return - } - s := storeSessions.m[taskID] - if s == nil { - s = &storeSession{CallsByIP: map[string][]Call{}} - storeSessions.m[taskID] = s - } - s.SymbolsFirstPass = symbolsFirstPass - s.SymbolsTotal = symbolsTotal - s.IDFilesCount = idFilesCount - s.DurationMS = durationMS -} - -// BuildStoreEventPayloadFromCollector builds the store event payload (minimal). -func BuildStoreEventPayloadFromCollector(taskID string) map[string]any { - s := storeSessions.m[taskID] - if s == nil { - return map[string]any{ - "store": map[string]any{ - "duration_ms": int64(0), - "symbols_first_pass": 0, - "symbols_total": 0, - "id_files_count": 0, - "success_rate_pct": float64(0), - "calls_by_ip": map[string][]Call{}, - }, - } - } - // Compute per-call success rate across first-pass store RPC attempts - totalCalls := 0 - successCalls := 0 - for _, calls := range s.CallsByIP { - for _, c := range calls { - totalCalls++ - if c.Success { - successCalls++ - } - } - } - var successRate float64 - if totalCalls > 0 { - successRate = float64(successCalls) / float64(totalCalls) * 100.0 - } - return map[string]any{ - "store": map[string]any{ - "duration_ms": s.DurationMS, - "symbols_first_pass": s.SymbolsFirstPass, - "symbols_total": s.SymbolsTotal, - "id_files_count": s.IDFilesCount, - "success_rate_pct": successRate, - "calls_by_ip": s.CallsByIP, - }, - } -} - -// Retrieve session -type retrieveSession struct { - mu sync.RWMutex - CallsByIP map[string][]Call - FoundLocal int - FoundNet int - Keys int - Required int - RetrieveMS int64 - DecodeMS int64 -} - -var retrieveSessions = struct{ m map[string]*retrieveSession }{m: map[string]*retrieveSession{}} - -// internal event channel for retrieve metrics (per task) -type retrieveEvent struct { - typ int // 0: per-node call, 1: found-local update - call Call - n int -} - -var retrieveEventChans = struct { - mu sync.Mutex - m map[string]chan retrieveEvent -}{m: map[string]chan retrieveEvent{}} - -// StartRetrieveCapture hooks retrieve callbacks into a buffered channel and a -// single goroutine that serializes updates to avoid concurrent map writes. -func StartRetrieveCapture(taskID string) { - // Create or get session upfront - s := retrieveSessions.m[taskID] - if s == nil { - s = &retrieveSession{CallsByIP: map[string][]Call{}} - retrieveSessions.m[taskID] = s - } - - // Per-task buffered channel - ch := make(chan retrieveEvent, 4096) - retrieveEventChans.mu.Lock() - retrieveEventChans.m[taskID] = ch - retrieveEventChans.mu.Unlock() - - // Worker goroutine to serialize writes - go func(taskID string, ch <-chan retrieveEvent) { - for ev := range ch { - sess := retrieveSessions.m[taskID] - if sess == nil { - sess = &retrieveSession{CallsByIP: map[string][]Call{}} - retrieveSessions.m[taskID] = sess - } - switch ev.typ { - case 0: // per-node call - key := ev.call.IP - if key == "" { - key = ev.call.Address - } - sess.mu.Lock() - if sess.CallsByIP == nil { - sess.CallsByIP = map[string][]Call{} - } - sess.CallsByIP[key] = append(sess.CallsByIP[key], ev.call) - sess.mu.Unlock() - case 1: // found-local update - sess.FoundLocal = ev.n - } - } - }(taskID, ch) - - // Register hooks that enqueue events (non-blocking) - RegisterRetrieveHook(taskID, func(c Call) { - retrieveEventChans.mu.Lock() - ch, ok := retrieveEventChans.m[taskID] - retrieveEventChans.mu.Unlock() - if ok { - select { - case ch <- retrieveEvent{typ: 0, call: c}: - default: // drop if buffer is full - } - } - }) - RegisterFoundLocalHook(taskID, func(n int) { - retrieveEventChans.mu.Lock() - ch, ok := retrieveEventChans.m[taskID] - retrieveEventChans.mu.Unlock() - if ok { - select { - case ch <- retrieveEvent{typ: 1, n: n}: - default: - } - } - }) -} - -func StopRetrieveCapture(taskID string) { - UnregisterRetrieveHook(taskID) - UnregisterFoundLocalHook(taskID) - retrieveEventChans.mu.Lock() - if ch, ok := retrieveEventChans.m[taskID]; ok { - delete(retrieveEventChans.m, taskID) - close(ch) - } - retrieveEventChans.mu.Unlock() -} - -// SetRetrieveBatchSummary sets counts for a retrieval attempt. -func SetRetrieveBatchSummary(taskID string, keys, required, foundLocal, foundNet int, retrieveMS int64) { - if taskID == "" { - return - } - s := retrieveSessions.m[taskID] - if s == nil { - s = &retrieveSession{CallsByIP: map[string][]Call{}} - retrieveSessions.m[taskID] = s - } - s.Keys = keys - s.Required = required - s.FoundLocal = foundLocal - s.FoundNet = foundNet - s.RetrieveMS = retrieveMS -} - -// SetRetrieveSummary sets timing info for retrieve/decode phases. -func SetRetrieveSummary(taskID string, retrieveMS, decodeMS int64) { - if taskID == "" { - return - } - s := retrieveSessions.m[taskID] - if s == nil { - s = &retrieveSession{CallsByIP: map[string][]Call{}} - retrieveSessions.m[taskID] = s - } - s.RetrieveMS = retrieveMS - s.DecodeMS = decodeMS -} - -// BuildDownloadEventPayloadFromCollector builds the download section payload. -func BuildDownloadEventPayloadFromCollector(taskID string) map[string]any { - s := retrieveSessions.m[taskID] - if s == nil { - return map[string]any{ - "retrieve": map[string]any{ - "keys": 0, - "required": 0, - "found_local": 0, - "found_net": 0, - "retrieve_ms": int64(0), - "decode_ms": int64(0), - "calls_by_ip": map[string][]Call{}, - }, - } - } - // Create a snapshot copy of CallsByIP to avoid concurrent map access - s.mu.RLock() - callsCopy := make(map[string][]Call, len(s.CallsByIP)) - for k, v := range s.CallsByIP { - vv := make([]Call, len(v)) - copy(vv, v) - callsCopy[k] = vv - } - s.mu.RUnlock() - - return map[string]any{ - "retrieve": map[string]any{ - "keys": s.Keys, - "required": s.Required, - "found_local": s.FoundLocal, - "found_net": s.FoundNet, - "retrieve_ms": s.RetrieveMS, - "decode_ms": s.DecodeMS, - "calls_by_ip": callsCopy, - }, - } -} - -// -------- Context helpers (dedicated to metrics tagging) -------------------- - -type ctxKey string - -var taskIDKey ctxKey = "p2pmetrics-task-id" - -// WithTaskID returns a child context with the metrics task ID set. -func WithTaskID(ctx context.Context, taskID string) context.Context { - if ctx == nil { - return context.Background() - } - return context.WithValue(ctx, taskIDKey, taskID) -} - -// TaskIDFromContext extracts the metrics task ID from context (or ""). -func TaskIDFromContext(ctx context.Context) string { - if ctx == nil { - return "" - } - if v := ctx.Value(taskIDKey); v != nil { - if s, ok := v.(string); ok { - return s - } - } - return "" -} diff --git a/pkg/raptorq/helper.go b/pkg/raptorq/helper.go deleted file mode 100644 index ea36b1ab..00000000 --- a/pkg/raptorq/helper.go +++ /dev/null @@ -1,49 +0,0 @@ -package raptorq - -import ( - "bytes" - "context" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/cosmos/btcutil/base58" - "strconv" -) - -const ( - InputEncodeFileName = "input.data" - SeparatorByte byte = 46 // separator in dd_and_fingerprints.signature i.e. '.' -) - -// GetIDFiles generates ID Files for dd_and_fingerprints files and rq_id files -// file is b64 encoded file appended with signatures and compressed, ic is the initial counter -// and max is the number of ids to generate -func GetIDFiles(ctx context.Context, file []byte, ic uint32, max uint32) (ids []string, files [][]byte, err error) { - idFiles := make([][]byte, 0, max) - ids = make([]string, 0, max) - var buffer bytes.Buffer - - for i := uint32(0); i < max; i++ { - buffer.Reset() - counter := ic + i - - buffer.Write(file) - buffer.WriteByte(SeparatorByte) - buffer.WriteString(strconv.Itoa(int(counter))) // Using the string representation to maintain backward compatibility - - compressedData, err := utils.HighCompress(ctx, buffer.Bytes()) // Ensure you're using the same compression level - if err != nil { - return ids, idFiles, errors.Errorf("compress identifiers file: %w", err) - } - - idFiles = append(idFiles, compressedData) - - hash, err := utils.Blake3Hash(compressedData) - if err != nil { - return ids, idFiles, errors.Errorf("sha3-256-hash error getting an id file: %w", err) - } - - ids = append(ids, base58.Encode(hash)) - } - - return ids, idFiles, nil -} diff --git a/pkg/storage/queries/health_check.go b/pkg/storage/queries/health_check.go index e76799da..96802dd8 100644 --- a/pkg/storage/queries/health_check.go +++ b/pkg/storage/queries/health_check.go @@ -98,10 +98,10 @@ func (s *SQLiteStore) GetHCSummaryStats(from time.Time) (hcMetrics metrics.HCMet if err != nil { return hcMetrics, err } - logtrace.Info(context.Background(), "observer evaluations retrieved", logtrace.Fields{"observer_evaluations": len(hcObserversEvaluations), "from": from}) + logtrace.Debug(context.Background(), "observer evaluations retrieved", logtrace.Fields{"observer_evaluations": len(hcObserversEvaluations), "from": from}) observerEvaluationMetrics := processHCObserverEvaluations(hcObserversEvaluations) - logtrace.Info(context.Background(), "observer evaluation metrics retrieved", logtrace.Fields{"observer_evaluation_metrics": len(observerEvaluationMetrics), "from": from}) + logtrace.Debug(context.Background(), "observer evaluation metrics retrieved", logtrace.Fields{"observer_evaluation_metrics": len(observerEvaluationMetrics), "from": from}) for _, obMetrics := range observerEvaluationMetrics { if obMetrics.ChallengesVerified >= 3 { @@ -154,7 +154,7 @@ func (s *SQLiteStore) GetMetricsDataByHealthCheckChallengeID(ctx context.Context if err != nil { return healthCheckChallengeMessages, err } - logtrace.Info(ctx, "health-check-challenge metrics row count", logtrace.Fields{"rows": len(hcMetrics), "challenge_id": challengeID}) + logtrace.Debug(ctx, "health-check-challenge metrics row count", logtrace.Fields{"rows": len(hcMetrics), "challenge_id": challengeID}) for _, hcMetric := range hcMetrics { msg := types.HealthCheckMessageData{} diff --git a/pkg/storage/queries/self_healing.go b/pkg/storage/queries/self_healing.go index 47145a0b..61e7c63c 100644 --- a/pkg/storage/queries/self_healing.go +++ b/pkg/storage/queries/self_healing.go @@ -257,7 +257,7 @@ func (s *SQLiteStore) GetSHExecutionMetrics(ctx context.Context, from time.Time) if err != nil { return m, err } - logtrace.Info(ctx, "self-healing execution metrics row count", logtrace.Fields{"rows": len(rows)}) + logtrace.Debug(ctx, "self-healing execution metrics row count", logtrace.Fields{"rows": len(rows)}) challenges := make(map[string]SHChallengeMetric) for _, row := range rows { @@ -361,11 +361,11 @@ func (s *SQLiteStore) GetSHExecutionMetrics(ctx context.Context, from time.Time) } } - logtrace.Info(ctx, "self-healing execution metrics challenges count", logtrace.Fields{"challenges": len(challenges)}) + logtrace.Debug(ctx, "self-healing execution metrics challenges count", logtrace.Fields{"challenges": len(challenges)}) for _, challenge := range challenges { - logtrace.Info(ctx, "self-healing challenge metric", logtrace.Fields{ + logtrace.Debug(ctx, "self-healing challenge metric", logtrace.Fields{ "challenge-id": challenge.ChallengeID, "is-accepted": challenge.IsAccepted, "is-verified": challenge.IsVerified, @@ -475,7 +475,7 @@ func (s *SQLiteStore) GetLastNSHChallenges(ctx context.Context, n int) (types.Se if err != nil { return challenges, err } - logtrace.Info(ctx, "self-healing execution metrics row count", logtrace.Fields{"rows": len(rows)}) + logtrace.Debug(ctx, "self-healing execution metrics row count", logtrace.Fields{"rows": len(rows)}) challengesInserted := 0 for _, row := range rows { @@ -507,7 +507,7 @@ func (s *SQLiteStore) GetSHChallengeReport(ctx context.Context, challengeID stri if err != nil { return challenges, err } - logtrace.Info(ctx, "self-healing execution metrics row count", logtrace.Fields{"rows": len(rows)}) + logtrace.Debug(ctx, "self-healing execution metrics row count", logtrace.Fields{"rows": len(rows)}) for _, row := range rows { if row.ChallengeID == challengeID { diff --git a/pkg/storage/queries/storage_challenge.go b/pkg/storage/queries/storage_challenge.go index 574e7f4f..164ed2be 100644 --- a/pkg/storage/queries/storage_challenge.go +++ b/pkg/storage/queries/storage_challenge.go @@ -97,7 +97,7 @@ func (s *SQLiteStore) GetMetricsDataByStorageChallengeID(ctx context.Context, ch return storageChallengeMessages, err } // log.WithContext(ctx).WithField("rows", len(scMetrics)).Info("storage-challenge metrics row count") - logtrace.Info(ctx, "storage-challenge metrics row count", logtrace.Fields{ + logtrace.Debug(ctx, "storage-challenge metrics row count", logtrace.Fields{ "rows": len(scMetrics), }) @@ -210,13 +210,13 @@ func (s *SQLiteStore) GetSCSummaryStats(from time.Time) (scMetrics metrics.SCMet return scMetrics, err } // log.WithField("observer_evaluations", len(observersEvaluations)).Info("observer evaluations retrieved") - logtrace.Info(context.Background(), "observer evaluations retrieved", logtrace.Fields{ + logtrace.Debug(context.Background(), "observer evaluations retrieved", logtrace.Fields{ "observer_evaluations": len(observersEvaluations), }) observerEvaluationMetrics := processObserverEvaluations(observersEvaluations) // log.WithField("observer_evaluation_metrics", len(observerEvaluationMetrics)).Info("observer evaluation metrics retrieved") - logtrace.Info(context.Background(), "observer evaluation metrics retrieved", logtrace.Fields{ + logtrace.Debug(context.Background(), "observer evaluation metrics retrieved", logtrace.Fields{ "observer_evaluation_metrics": len(observerEvaluationMetrics), }) diff --git a/pkg/storage/queries/task_history.go b/pkg/storage/queries/task_history.go index 73a55ef8..29539a49 100644 --- a/pkg/storage/queries/task_history.go +++ b/pkg/storage/queries/task_history.go @@ -59,7 +59,7 @@ func (s *SQLiteStore) QueryTaskHistory(taskID string) (history []types.TaskHisto err = json.Unmarshal([]byte(details), &i.Details) if err != nil { - logtrace.Info(context.Background(), "Detals", logtrace.Fields{"details": details}) + logtrace.Debug(context.Background(), "Detals", logtrace.Fields{"details": details}) logtrace.Error(context.Background(), fmt.Sprintf("cannot unmarshal task history details: %s", details), logtrace.Fields{"error": err}) i.Details = nil } diff --git a/pkg/task/handle.go b/pkg/task/handle.go new file mode 100644 index 00000000..74f6e406 --- /dev/null +++ b/pkg/task/handle.go @@ -0,0 +1,66 @@ +package task + +import ( + "context" + "sync" + "time" + + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" +) + +// Handle manages a running task with an optional watchdog. +// It ensures Start and End are paired, logs start/end, and auto-ends on timeout. +type Handle struct { + tr Tracker + service string + id string + stop chan struct{} + once sync.Once +} + +// Start starts tracking a task and returns a Handle that will ensure the +// task is ended. A watchdog is started to auto-end the task after timeout +// to avoid indefinitely stuck running tasks in status reporting. +func StartWith(tr Tracker, ctx context.Context, service, id string, timeout time.Duration) *Handle { + if tr == nil || service == "" || id == "" { + return &Handle{} + } + tr.Start(service, id) + logtrace.Info(ctx, "task: started", logtrace.Fields{"service": service, "task_id": id}) + + g := &Handle{tr: tr, service: service, id: id, stop: make(chan struct{})} + if timeout > 0 { + go func() { + select { + case <-time.After(timeout): + g.endWith(ctx, true) + case <-g.stop: + } + }() + } + return g +} + +// End stops tracking the task. Safe to call multiple times. +func (g *Handle) End(ctx context.Context) { + g.endWith(ctx, false) +} + +// EndWith ends the guard and logs accordingly. If expired is true, +// it emits a warning and ends the task to avoid stuck status. +func (g *Handle) endWith(ctx context.Context, expired bool) { + if g == nil || g.service == "" || g.id == "" { + return + } + g.once.Do(func() { + close(g.stop) + if g.tr != nil { + g.tr.End(g.service, g.id) + } + if expired { + logtrace.Warn(ctx, "task: watchdog expired", logtrace.Fields{"service": g.service, "task_id": g.id}) + } else { + logtrace.Info(ctx, "task: ended", logtrace.Fields{"service": g.service, "task_id": g.id}) + } + }) +} diff --git a/pkg/task/task.go b/pkg/task/task.go new file mode 100644 index 00000000..8d0c0052 --- /dev/null +++ b/pkg/task/task.go @@ -0,0 +1,79 @@ +// Package task provides a lean, concurrency-safe, in-memory tracker for +// live tasks running inside a service. It is designed to be generic and +// reusable across multiple features (e.g., cascade upload/download) and +// only tracks tasks while the enclosing RPC/handler is alive. No +// persistence, progress reporting, or background processing is included. +package task + +import "sync" + +// Tracker defines a minimal interface for tracking live tasks per service. +// Implementations must be concurrency-safe. All methods are non-blocking +// and best-effort; invalid inputs are ignored. +type Tracker interface { + Start(service, taskID string) + End(service, taskID string) + Snapshot() map[string][]string +} + +// InMemoryTracker is a lean, concurrency-safe tracker of live tasks. +// It stores only in-memory state for the lifetime of the process and +// returns copies when asked for a snapshot to ensure isolation. +type InMemoryTracker struct { + mu sync.RWMutex + // service -> set(taskID) + data map[string]map[string]struct{} +} + +// New creates and returns a new in-memory tracker. +func New() *InMemoryTracker { + return &InMemoryTracker{data: make(map[string]map[string]struct{})} +} + +// Start marks a task as running under a given service. Empty arguments +// are ignored. Calling Start with the same (service, taskID) pair is idempotent. +func (t *InMemoryTracker) Start(service, taskID string) { + if service == "" || taskID == "" { + return + } + t.mu.Lock() + m, ok := t.data[service] + if !ok { + m = make(map[string]struct{}) + t.data[service] = m + } + m[taskID] = struct{}{} + t.mu.Unlock() +} + +// End removes a running task under a given service. Empty arguments +// are ignored. Removing a non-existent (service, taskID) pair is a no-op. +func (t *InMemoryTracker) End(service, taskID string) { + if service == "" || taskID == "" { + return + } + t.mu.Lock() + if m, ok := t.data[service]; ok { + delete(m, taskID) + if len(m) == 0 { + delete(t.data, service) + } + } + t.mu.Unlock() +} + +// Snapshot returns a copy of the current running tasks per service. +// The returned map and slices are independent of internal state. +func (t *InMemoryTracker) Snapshot() map[string][]string { + out := make(map[string][]string) + t.mu.RLock() + for svc, m := range t.data { + ids := make([]string, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + out[svc] = ids + } + t.mu.RUnlock() + return out +} diff --git a/pkg/task/task_test.go b/pkg/task/task_test.go new file mode 100644 index 00000000..1550bc37 --- /dev/null +++ b/pkg/task/task_test.go @@ -0,0 +1,157 @@ +package task + +import ( + "context" + "sync" + "testing" + "time" +) + +func TestStartEndSnapshot(t *testing.T) { + tr := New() + + // Initially empty + if snap := tr.Snapshot(); len(snap) != 0 { + t.Fatalf("expected empty snapshot, got %#v", snap) + } + + // Start two tasks under same service + tr.Start("svc", "id1") + tr.Start("svc", "id2") + + snap := tr.Snapshot() + ids, ok := snap["svc"] + if !ok { + t.Fatalf("expected service 'svc' in snapshot") + } + if len(ids) != 2 { + t.Fatalf("expected 2 ids, got %d (%v)", len(ids), ids) + } + + // End one task + tr.End("svc", "id1") + snap = tr.Snapshot() + ids = snap["svc"] + if len(ids) != 1 { + t.Fatalf("expected 1 id, got %d (%v)", len(ids), ids) + } + if ids[0] != "id2" && ids[0] != "id1" { // order not guaranteed; check that id2 remains by set membership + // Build a small set for clarity + m := map[string]struct{}{} + for _, v := range ids { + m[v] = struct{}{} + } + if _, ok := m["id2"]; !ok { + t.Fatalf("expected id2 to remain, got %v", ids) + } + } + + // End last task + tr.End("svc", "id2") + snap = tr.Snapshot() + if _, ok := snap["svc"]; ok { + t.Fatalf("expected service removed after last task ended, got %v", snap) + } +} + +func TestInvalidInputsAndIsolation(t *testing.T) { + tr := New() + + // Invalid inputs should be ignored + tr.Start("", "id") + tr.Start("svc", "") + tr.End("", "id") + tr.End("svc", "") + if snap := tr.Snapshot(); len(snap) != 0 { + t.Fatalf("expected empty snapshot for invalid inputs, got %#v", snap) + } + + // Snapshot must be a copy + tr.Start("svc", "id") + snap := tr.Snapshot() + // mutate snapshot map and slice + delete(snap, "svc") + snap2 := tr.Snapshot() + if _, ok := snap2["svc"]; !ok { + t.Fatalf("mutating snapshot should not affect tracker state") + } +} + +// TestConcurrentAccessNoPanic ensures that concurrent Start/End/Snapshot +// operations do not panic due to unsafe map access. +func TestConcurrentAccessNoPanic(t *testing.T) { + tr := New() + + // Run a mix of writers and readers concurrently. + var wg sync.WaitGroup + startWriters := 8 + snapReaders := 4 + loops := 1000 + + // Writers: repeatedly start/end tasks across a few services. + for w := 0; w < startWriters; w++ { + wg.Add(1) + go func(id int) { + defer wg.Done() + for i := 0; i < loops; i++ { + svc := "svc" + string('A'+rune(id%3)) // svcA, svcB, svcC + tid := svc + ":t" + fmtInt(i%5) + tr.Start(svc, tid) + if i%2 == 0 { + tr.End(svc, tid) + } + } + }(w) + } + + // Readers: take snapshots concurrently. + for r := 0; r < snapReaders; r++ { + wg.Add(1) + go func() { + defer wg.Done() + for i := 0; i < loops; i++ { + _ = tr.Snapshot() + } + }() + } + + // If there is any concurrent map access bug, the test runner would panic. + done := make(chan struct{}) + go func() { wg.Wait(); close(done) }() + select { + case <-done: + // ok + case <-time.After(5 * time.Second): + t.Fatal("concurrent access test timed out") + } +} + +// fmtInt provides a tiny int-to-string helper to avoid importing strconv. +func fmtInt(i int) string { return string('0' + rune(i)) } + +func TestHandleIdempotentAndWatchdog(t *testing.T) { + tr := New() + ctx := context.Background() + + // Idempotent End + g := StartWith(tr, ctx, "svc.handle", "id-1", 0) + g.End(ctx) + g.End(ctx) // no panic, no double-end crash + + // Watchdog auto-end: use a small timeout + g2 := StartWith(tr, ctx, "svc.handle", "id-2", 50*time.Millisecond) + _ = g2 // ensure handle stays referenced until timeout path + // Do not call End; let the watchdog fire + time.Sleep(120 * time.Millisecond) + + // After watchdog, the task should not be listed + snap := tr.Snapshot() + if ids, ok := snap["svc.handle"]; ok { + // If still present, ensure id-2 is not in the list + for _, id := range ids { + if id == "id-2" { + t.Fatalf("expected watchdog to remove id-2 from svc.handle; snapshot: %v", ids) + } + } + } +} diff --git a/pkg/testutil/lumera.go b/pkg/testutil/lumera.go index 3f556a97..20596b85 100644 --- a/pkg/testutil/lumera.go +++ b/pkg/testutil/lumera.go @@ -9,15 +9,18 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action_msg" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/auth" + bankmod "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/bank" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/node" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx" + sdkmath "cosmossdk.io/math" cmtservice "github.com/cosmos/cosmos-sdk/client/grpc/cmtservice" "github.com/cosmos/cosmos-sdk/crypto/keyring" sdktypes "github.com/cosmos/cosmos-sdk/types" sdktx "github.com/cosmos/cosmos-sdk/types/tx" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" ) // MockLumeraClient implements the lumera.Client interface for testing purposes @@ -25,6 +28,7 @@ type MockLumeraClient struct { authMod *MockAuthModule actionMod *MockActionModule actionMsgMod *MockActionMsgModule + bankMod *MockBankModule supernodeMod *MockSupernodeModule txMod *MockTxModule nodeMod *MockNodeModule @@ -36,6 +40,7 @@ type MockLumeraClient struct { func NewMockLumeraClient(kr keyring.Keyring, addresses []string) (lumera.Client, error) { actionMod := &MockActionModule{} actionMsgMod := &MockActionMsgModule{} + bankMod := &MockBankModule{} supernodeMod := &MockSupernodeModule{addresses: addresses} txMod := &MockTxModule{} nodeMod := &MockNodeModule{} @@ -44,6 +49,7 @@ func NewMockLumeraClient(kr keyring.Keyring, addresses []string) (lumera.Client, authMod: &MockAuthModule{}, actionMod: actionMod, actionMsgMod: actionMsgMod, + bankMod: bankMod, supernodeMod: supernodeMod, txMod: txMod, nodeMod: nodeMod, @@ -67,6 +73,11 @@ func (c *MockLumeraClient) ActionMsg() action_msg.Module { return c.actionMsgMod } +// Bank returns the Bank module client +func (c *MockLumeraClient) Bank() bankmod.Module { + return c.bankMod +} + // SuperNode returns the SuperNode module client func (c *MockLumeraClient) SuperNode() supernode.Module { return c.supernodeMod @@ -87,6 +98,15 @@ func (c *MockLumeraClient) Close() error { return nil } +// MockBankModule implements the bank.Module interface for testing +type MockBankModule struct{} + +// Balance returns a positive balance for any address/denom to pass checks by default +func (m *MockBankModule) Balance(ctx context.Context, address string, denom string) (*banktypes.QueryBalanceResponse, error) { + // Return >= 1 LUME in micro units to satisfy threshold checks + return &banktypes.QueryBalanceResponse{Balance: &sdktypes.Coin{Denom: denom, Amount: sdkmath.NewInt(1_000_000)}}, nil +} + // MockAuthModule implements the auth.Module interface for testing type MockAuthModule struct{} @@ -124,8 +144,8 @@ type MockActionMsgModule struct{} // RequestAction mocks the behavior of requesting an action. func (m *MockActionMsgModule) RequestAction(ctx context.Context, actionType, metadata, price, expirationTime string) (*sdktx.BroadcastTxResponse, error) { - // Mock implementation returns success with empty result - return &sdktx.BroadcastTxResponse{}, nil + // Mock implementation returns success with empty result + return &sdktx.BroadcastTxResponse{}, nil } // FinalizeCascadeAction implements the required method from action_msg.Module interface @@ -145,7 +165,7 @@ type MockSupernodeModule struct { addresses []string } -func (m *MockSupernodeModule) GetTopSuperNodesForBlock(ctx context.Context, blockHeight uint64) (*supernodeTypes.QueryGetTopSuperNodesForBlockResponse, error) { +func (m *MockSupernodeModule) GetTopSuperNodesForBlock(ctx context.Context, req *supernodeTypes.QueryGetTopSuperNodesForBlockRequest) (*supernodeTypes.QueryGetTopSuperNodesForBlockResponse, error) { return &supernodeTypes.QueryGetTopSuperNodesForBlockResponse{}, nil } diff --git a/pkg/utils/hasher.go b/pkg/utils/hasher.go new file mode 100644 index 00000000..3aad065b --- /dev/null +++ b/pkg/utils/hasher.go @@ -0,0 +1,127 @@ +package utils + +import ( + "encoding/hex" + "io" + "os" + + "lukechampine.com/blake3" +) + +// hashReaderBLAKE3 computes a BLAKE3 hash using an adaptive, +// manual buffered read loop to avoid the *os.File.WriteTo fast-path +// that limits throughput when using io.Copy/io.CopyBuffer. +// +// The buffer size is chosen based on data size: +// +// ≤ 4 MiB → 512 KiB buffer +// 4–32 MiB → 1 MiB buffer +// 32 MiB–2 GiB → 2 MiB buffer +// > 2 GiB → 4 MiB buffer +// +// Buffers are reused from a concurrent-safe pool to reduce allocations. +// This approach achieved the following throughput in benchmarks +// on AMD Ryzen 9 5900X (Linux, lukechampine.com/blake3): +// +// Data size | Adaptive | Manual(1MiB) | io.Copy(~32KiB) +// ----------|-------------|--------------|---------------- +// 1 MiB | 1.80 GB/s | 1.26 GB/s | 0.52 GB/s +// 32 MiB | 3.00 GB/s | 3.02 GB/s | 0.50 GB/s +// 256 MiB | 3.79 GB/s | 3.35 GB/s | 0.48 GB/s +// 1 GiB | 3.91 GB/s | 3.27 GB/s | 0.53 GB/s +// +// Compared to io.Copy/io.CopyBuffer, the adaptive manual loop is +// up to ~7× faster on large files, with fewer allocations. +func hashReaderBLAKE3(r io.Reader, chunkSize int64) ([]byte, error) { + chunk := chunkSize + if chunk <= 0 { + chunk = chunkSizeFor(0) // fallback to default chunk size + } + buf := make([]byte, chunk) + + h := blake3.New(32, nil) + for { + n, rerr := r.Read(buf) + if n > 0 { + if _, werr := h.Write(buf[:n]); werr != nil { + return nil, werr + } + } + if rerr == io.EOF { + break + } + if rerr != nil { + return nil, rerr + } + } + return h.Sum(nil), nil +} + +// chunkSizeFor returns the hashing chunk size based on total input size. +func chunkSizeFor(total int64) int64 { + if total <= 0 { + return 512 << 10 // 512 KiB default when total size is unknown + } + switch { + case total <= 4<<20: // ≤ 4 MiB + return 512 << 10 // 512 KiB + case total <= 32<<20: // ≤ 32 MiB + return 1 << 20 // 1 MiB + case total <= 2<<30: // ≤ 2 GiB + return 2 << 20 // 2 MiB + default: // very large files > 2 GiB + return 4 << 20 // 4 MiB cap + } +} + +// Blake3HashFile returns BLAKE3 hash of a file (auto-selects chunk size). +func Blake3HashFile(filePath string) ([]byte, error) { + return Blake3HashFileWithChunkSize(filePath, 0) +} + +// Blake3HashFileWithChunkSize returns the BLAKE3 hash of a file. +// Use chunkSize > 0 to specify chunk size; otherwise auto-selects based on file size. +func Blake3HashFileWithChunkSize(filePath string, chunkSize int64) ([]byte, error) { + // If chunkSize > 0, honor caller; otherwise auto-select based on file size. + f, err := os.Open(filePath) + if err != nil { + return nil, err + } + defer f.Close() + + if chunkSize <= 0 { + fi, err := f.Stat() + if err != nil { + return nil, err + } + chunkSize = chunkSizeFor(fi.Size()) + } + return hashReaderBLAKE3(f, chunkSize) +} + +// Blake3Hash returns BLAKE3 hash of msg. +func Blake3Hash(msg []byte) ([]byte, error) { + h := blake3.New(32, nil) + if _, err := h.Write(msg); err != nil { + return nil, err + } + return h.Sum(nil), nil +} + +// GetHashFromBytes generate blake3 hash string from a given byte array +// and return it as a hex-encoded string. If an error occurs during hashing, +// an empty string is returned. +func GetHashFromBytes(msg []byte) string { + sum, err := Blake3Hash(msg) + if err != nil { + return "" + } + + return hex.EncodeToString(sum) +} + +// GetHashFromString returns blake3 hash of a given string +func GetHashFromString(s string) []byte { + sum := blake3.Sum256([]byte(s)) + return sum[:] +} diff --git a/pkg/utils/hasher_test.go b/pkg/utils/hasher_test.go new file mode 100644 index 00000000..e6f8448c --- /dev/null +++ b/pkg/utils/hasher_test.go @@ -0,0 +1,251 @@ +package utils + +import ( + "bytes" + "encoding/hex" + "errors" + "os" + "path/filepath" + "strings" + "testing" + + "lukechampine.com/blake3" +) + +func TestChunkSizeFor(t *testing.T) { + const ( + kib = 1 << 10 + mib = 1 << 20 + gib = 1 << 30 + ) + + cases := []struct { + name string + input int64 + want int64 + }{ + {"unknownOrZero", 0, 512 * kib}, + {"negative", -1, 512 * kib}, + {"under4MiB", 3*mib + 512*kib, 512 * kib}, + {"exact4MiB", 4 * mib, 512 * kib}, + {"justOver4MiB", 4*mib + 1, 1 * mib}, + {"exact32MiB", 32 * mib, 1 * mib}, + {"justOver32MiB", 32*mib + 1, 2 * mib}, + {"exact2GiB", 2 * gib, 2 * mib}, + {"above2GiB", 2*gib + 1, 4 * mib}, + } + + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + if got := chunkSizeFor(tc.input); got != tc.want { + t.Fatalf("chunkSizeFor(%d) = %d, want %d", tc.input, got, tc.want) + } + }) + } +} + +func TestBlake3Hash(t *testing.T) { + t.Parallel() + + msg := []byte(strings.Repeat("blake3 data", 1024)) + want := blake3.Sum256(msg) + + got, err := Blake3Hash(msg) + if err != nil { + t.Fatalf("Blake3Hash returned error: %v", err) + } + if !bytes.Equal(got, want[:]) { + t.Fatalf("hash mismatch for auto chunking") + } + + got, err = Blake3Hash(msg) + if err != nil { + t.Fatalf("Blake3Hash with buf size returned error: %v", err) + } + if !bytes.Equal(got, want[:]) { + t.Fatalf("hash mismatch with explicit chunk size") + } +} + +func TestBlake3HashFileWithChunkSize(t *testing.T) { + t.Parallel() + + dir := t.TempDir() + + blakeHex := func(data []byte) string { + sum := blake3.Sum256(data) + return hex.EncodeToString(sum[:]) + } + + createFile := func(name string, content []byte) string { + path := filepath.Join(dir, name) + if err := os.WriteFile(path, content, 0o600); err != nil { + t.Fatalf("write file: %v", err) + } + return path + } + + smallData := []byte(strings.Repeat("0123456789abcdef", 1<<10)) + emptyData := []byte{} + largeData := make([]byte, 5<<20) // 5 MiB zeroed payload + + smallFile := createFile("small.bin", smallData) + emptyFile := createFile("empty.bin", emptyData) + largeFile := createFile("large.bin", largeData) + + tests := []struct { + name string + path string + chunkSize int64 + wantHex string + wantErr bool + }{ + { + name: "small file", + path: smallFile, + chunkSize: 4 << 10, + wantHex: blakeHex(smallData), + }, + { + name: "empty file", + path: emptyFile, + chunkSize: 1 << 10, + wantHex: blakeHex(emptyData), + }, + { + name: "large file", + path: largeFile, + chunkSize: 1 << 20, + wantHex: blakeHex(largeData), + }, + { + name: "file missing", + path: filepath.Join(dir, "missing.bin"), + chunkSize: 4 << 10, + wantErr: true, + }, + { + name: "zero chunk uses default", + path: smallFile, + chunkSize: 0, + wantHex: blakeHex(smallData), + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + got, err := Blake3HashFileWithChunkSize(tc.path, tc.chunkSize) + if (err != nil) != tc.wantErr { + t.Fatalf("error mismatch: wantErr=%v err=%v", tc.wantErr, err) + } + if tc.wantErr { + return + } + + gotHex := hex.EncodeToString(got) + if gotHex != tc.wantHex { + t.Fatalf("hash mismatch: got %s want %s", gotHex, tc.wantHex) + } + }) + } +} + +func TestBlake3HashWrapper(t *testing.T) { + t.Parallel() + + msg := []byte(strings.Repeat("wrapper data", 512)) + want := blake3.Sum256(msg) + + got, err := Blake3Hash(msg) + if err != nil { + t.Fatalf("Blake3Hash returned error: %v", err) + } + if !bytes.Equal(got, want[:]) { + t.Fatalf("Blake3Hash returned unexpected digest") + } +} + +func TestBlake3HashFileWrapper(t *testing.T) { + t.Parallel() + + dir := t.TempDir() + file := filepath.Join(dir, "wrapper.bin") + msg := []byte(strings.Repeat("file data", 1024)) + if err := os.WriteFile(file, msg, 0o600); err != nil { + t.Fatalf("write file: %v", err) + } + + want := blake3.Sum256(msg) + + got, err := Blake3HashFile(file) + if err != nil { + t.Fatalf("Blake3HashFile returned error: %v", err) + } + if !bytes.Equal(got, want[:]) { + t.Fatalf("Blake3HashFile returned unexpected digest") + } +} + +func TestGetHashFromBytes(t *testing.T) { + t.Parallel() + + msg := []byte(strings.Repeat("hash from bytes", 256)) + sum := blake3.Sum256(msg) + want := hex.EncodeToString(sum[:]) + + got := GetHashFromBytes(msg) + if got != want { + t.Fatalf("GetHashFromBytes() = %q, want %q", got, want) + } + if got == "" { + t.Fatalf("GetHashFromBytes returned empty string") + } +} + +func TestGetHashFromString(t *testing.T) { + t.Parallel() + + input := "string payload for hashing" + sum := blake3.Sum256([]byte(input)) + + got := GetHashFromString(input) + if !bytes.Equal(got, sum[:]) { + t.Fatalf("GetHashFromString() = %x, want %x", got, sum) + } + if len(got) != len(sum) { + t.Fatalf("unexpected digest length: got %d, want %d", len(got), len(sum)) + } +} + +type errorAfterFirstRead struct { + first bool + err error + data []byte +} + +func (r *errorAfterFirstRead) Read(p []byte) (int, error) { + if !r.first { + r.first = true + n := copy(p, r.data) + return n, nil + } + return 0, r.err +} + +func TestHashReaderBLAKE3ReadError(t *testing.T) { + t.Parallel() + + readErr := errors.New("read boom") + r := &errorAfterFirstRead{ + data: []byte("abc"), + err: readErr, + } + + if _, err := hashReaderBLAKE3(r, 0); !errors.Is(err, readErr) { + t.Fatalf("expected read error to propagate, got %v", err) + } +} diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 81291cb0..555f0936 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -7,7 +7,6 @@ import ( "crypto/rand" "encoding/base64" "encoding/binary" - "encoding/hex" "fmt" "io" "log" @@ -22,8 +21,6 @@ import ( "strings" "time" - "lukechampine.com/blake3" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" "golang.org/x/sync/semaphore" @@ -132,46 +129,6 @@ func EqualStrList(a, b []string) error { return nil } -// Blake3Hash returns Blake3 hash of input message -func Blake3Hash(msg []byte) ([]byte, error) { - hasher := blake3.New(32, nil) - if _, err := io.Copy(hasher, bytes.NewReader(msg)); err != nil { - return nil, err - } - return hasher.Sum(nil), nil -} - -// GetHashFromBytes generate blake3 hash string from a given byte array -func GetHashFromBytes(msg []byte) string { - h := blake3.New(32, nil) - if _, err := io.Copy(h, bytes.NewReader(msg)); err != nil { - return "" - } - - return hex.EncodeToString(h.Sum(nil)) -} - -// GetHashFromString returns blake3 hash of a given string -func GetHashFromString(s string) []byte { - sum := blake3.Sum256([]byte(s)) - return sum[:] -} - -func ComputeHashOfFile(filePath string) ([]byte, error) { - file, err := os.Open(filePath) - if err != nil { - return nil, err - } - defer file.Close() - - hasher := blake3.New(32, nil) - if _, err := io.Copy(hasher, file); err != nil { - return nil, err - } - - return hasher.Sum(nil), nil -} - // XORBytes returns the XOR of two same-length byte slices. func XORBytes(a, b []byte) ([]byte, error) { if len(a) != len(b) { diff --git a/profile_cascade.sh b/profile_cascade.sh new file mode 100755 index 00000000..9b6fe71a --- /dev/null +++ b/profile_cascade.sh @@ -0,0 +1,98 @@ +#!/bin/bash + +# Cascade Download Heap Profiling Script +# Samples heap every 30 seconds during cascade downloads + +# Configuration - modify these as needed +PROFILE_URL="http://localhost:8002/api/v1/debug/raw/pprof/heap" +INTERVAL=30 +TIMESTAMP=$(date +%Y%m%d_%H%M%S) +PROFILE_DIR="profiles_${TIMESTAMP}" + +# Allow override via command line +if [ "$1" != "" ]; then + PROFILE_URL="$1" +fi + +echo "=== Cascade Heap Profiling ===" +echo "Profile URL: $PROFILE_URL" +echo "Interval: ${INTERVAL}s" +echo "Output Dir: $PROFILE_DIR" +echo + +# Create profile directory +mkdir -p "$PROFILE_DIR" +cd "$PROFILE_DIR" + +# Test connection first +echo "Testing connection to profiling server..." +if ! curl -s --fail "$PROFILE_URL" > /dev/null; then + echo "ERROR: Cannot connect to profiling server at $PROFILE_URL" + echo "Make sure your supernode is running on testnet!" + exit 1 +fi + +echo "✓ Connected to profiling server" +echo + +# Take baseline +echo "Taking baseline heap snapshot..." +curl -s -o "heap_00s.prof" "$PROFILE_URL" +echo "✓ Baseline saved: heap_00s.prof" +echo + +echo "*** NOW START YOUR CASCADE DOWNLOAD ***" +echo "Press ENTER when download has started..." +read + +echo "Starting heap profiling every ${INTERVAL}s..." +echo "Press Ctrl+C to stop" +echo + +# Counter for snapshots +counter=1 + +# Function to handle cleanup on exit +cleanup() { + echo + echo "Profiling stopped. Taking final snapshot..." + final_elapsed=$((counter * INTERVAL)) + curl -s -o "heap_${final_elapsed}s_final.prof" "$PROFILE_URL" + + echo + echo "=== Profiling Complete ===" + echo "Location: $(pwd)" + echo "Files created:" + ls -la *.prof + echo + echo "Analysis commands:" + echo "# Compare baseline to final:" + echo "go tool pprof -http=:8080 -base heap_00s.prof heap_${final_elapsed}s_final.prof" + exit 0 +} + +# Set up signal handler +trap cleanup INT TERM + +# Main profiling loop +while true; do + sleep $INTERVAL + + elapsed=$((counter * INTERVAL)) + minutes=$((elapsed / 60)) + seconds=$((elapsed % 60)) + + timestamp=$(date +%H:%M:%S) + filename="heap_${elapsed}s.prof" + + echo "[$timestamp] Taking snapshot $counter (${minutes}m ${seconds}s elapsed)..." + + if curl -s -o "$filename" "$PROFILE_URL"; then + size=$(ls -lh "$filename" | awk '{print $5}') + echo "✓ Saved: $filename ($size)" + else + echo "✗ Failed to get snapshot $counter" + fi + + ((counter++)) +done \ No newline at end of file diff --git a/proto/dupedetection/dd-server.proto b/proto/dupedetection/dd-server.proto deleted file mode 100644 index 0217aece..00000000 --- a/proto/dupedetection/dd-server.proto +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (c) 2021-2021 The Pastel Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -syntax = "proto3"; - -option go_package = "github.com/LumeraProtocol/supernode/v2/gen/dupedetection"; - -package dupedetection; - -service DupeDetectionServer { - rpc ImageRarenessScore(RarenessScoreRequest) returns(ImageRarenessScoreReply); - rpc GetStatus(GetStatusRequest) returns(GetStatusResponse); -} - -message RarenessScoreRequest { - string image_filepath = 1; - string pastel_block_hash_when_request_submitted = 2; - string pastel_block_height_when_request_submitted = 3; - string utc_timestamp_when_request_submitted = 4; - string pastel_id_of_submitter = 5; - string pastel_id_of_registering_supernode_1 = 6; - string pastel_id_of_registering_supernode_2 = 7; - string pastel_id_of_registering_supernode_3 = 8; - bool is_pastel_openapi_request = 9; - string open_api_group_id_string = 10; - string collection_name_string = 11; -} - -message ImageRarenessScoreReply { - string pastel_block_hash_when_request_submitted = 1; - string pastel_block_height_when_request_submitted = 2; - string utc_timestamp_when_request_submitted = 3; - string pastel_id_of_submitter = 4; - string pastel_id_of_registering_supernode_1 = 5; - string pastel_id_of_registering_supernode_2 = 6; - string pastel_id_of_registering_supernode_3 = 7; - bool is_pastel_openapi_request = 8; - string image_file_path = 9; - string dupe_detection_system_version = 10; - bool is_likely_dupe = 11; - bool is_rare_on_internet = 12; - float overall_rareness_score = 13; - float pct_of_top_10_most_similar_with_dupe_prob_above_25pct = 14; - float pct_of_top_10_most_similar_with_dupe_prob_above_33pct = 15; - float pct_of_top_10_most_similar_with_dupe_prob_above_50pct = 16; - string rareness_scores_table_json_compressed_b64 = 17; - InternetRareness internet_rareness = 18; - float open_nsfw_score = 19; - AltNsfwScores alternative_nsfw_scores = 20; - repeated double image_fingerprint_of_candidate_image_file = 21; - string collection_name_string = 22; - string hash_of_candidate_image_file = 23; - string open_api_group_id_string = 24; - float group_rareness_score = 25; - string candidate_image_thumbnail_webp_as_base64_string = 26; - string does_not_impact_the_following_collection_strings = 27; - bool is_invalid_sense_request = 28; - string invalid_sense_request_reason = 29; - float similarity_score_to_first_entry_in_collection = 30; - float cp_probability = 31; - float child_probability = 32; - string image_fingerprint_set_checksum = 33; -} - - -message InternetRareness { - string rare_on_internet_summary_table_as_json_compressed_b64 = 1; - string rare_on_internet_graph_json_compressed_b64 = 2; - string alternative_rare_on_internet_dict_as_json_compressed_b64 = 3; - uint32 min_number_of_exact_matches_in_page = 4; - string earliest_available_date_of_internet_results = 5; -} - -message AltNsfwScores { - float drawings = 1; - float hentai = 2; - float neutral = 3; - float porn = 4; - float sexy = 5; -} - -message GetStatusRequest {} - -message TaskCount { - int32 max_concurrent = 1; - int32 executing = 2; - int32 waiting_in_queue = 3; - int32 succeeded = 4; - int32 failed = 5; - int32 cancelled = 6; -} - -message TaskMetrics { - float average_task_wait_time_secs = 1; - float max_task_wait_time_secs = 2; - float average_task_execution_time_secs = 3; - int64 average_task_virtual_memory_usage_bytes = 4; - int64 average_task_rss_memory_usage_bytes = 5; - int64 peak_task_rss_memory_usage_bytes = 6; - int64 peak_task_vms_memory_usage_bytes = 7; -} - -message GetStatusResponse { - string version = 1; - TaskCount task_count = 2; - TaskMetrics task_metrics = 3; -} diff --git a/proto/proto.go b/proto/proto.go deleted file mode 100644 index 34045007..00000000 --- a/proto/proto.go +++ /dev/null @@ -1,6 +0,0 @@ -package proto - -const ( - // MetadataKeySessID is unique numeric for every registration process, encompasses for all connections. - MetadataKeySessID = "sessID" -) diff --git a/proto/raptorq/raptorq.proto b/proto/raptorq/raptorq.proto deleted file mode 100644 index 07db9baa..00000000 --- a/proto/raptorq/raptorq.proto +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) 2021-2021 The Pastel Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. -syntax = "proto3"; - -option go_package = "github.com/LumeraProtocol/supernode/v2/gen/raptorq"; - -package raptorq; - -service RaptorQ { - rpc EncodeMetaData(EncodeMetaDataRequest) returns(EncodeMetaDataReply); - rpc Encode(EncodeRequest) returns(EncodeReply); - rpc Decode(DecodeRequest) returns(DecodeReply); -} - -message EncodeMetaDataRequest { - string path = 1; - uint32 files_number = 2; - string block_hash = 3; - string pastel_id = 4; -} - -message EncodeMetaDataReply { - bytes encoder_parameters = 1; - uint32 symbols_count = 2; - string path = 3; -} - -message EncodeRequest { - string path = 1; -} - -message EncodeReply { - bytes encoder_parameters = 1; - uint32 symbols_count = 2; - string path = 3; -} - -message DecodeRequest { - bytes encoder_parameters = 1; - string path = 2; -} - -message DecodeReply { - string path = 1; -} \ No newline at end of file diff --git a/proto/supernode/service.proto b/proto/supernode/service.proto new file mode 100644 index 00000000..d51de355 --- /dev/null +++ b/proto/supernode/service.proto @@ -0,0 +1,114 @@ +syntax = "proto3"; +package supernode; +option go_package = "github.com/LumeraProtocol/supernode/v2/gen/supernode"; + +import "supernode/status.proto"; +import "google/api/annotations.proto"; + +// SupernodeService provides status information for all services +service SupernodeService { + rpc GetStatus(StatusRequest) returns (StatusResponse) { + option (google.api.http) = { + get: "/api/v1/status" + }; + } + + rpc ListServices(ListServicesRequest) returns (ListServicesResponse) { + option (google.api.http) = { + get: "/api/v1/services" + }; + } + + // Raw pprof endpoints - return standard pprof output directly + rpc GetRawPprof(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof" + }; + } + + rpc GetRawPprofHeap(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/heap" + }; + } + + rpc GetRawPprofGoroutine(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/goroutine" + }; + } + + rpc GetRawPprofAllocs(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/allocs" + }; + } + + rpc GetRawPprofBlock(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/block" + }; + } + + rpc GetRawPprofMutex(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/mutex" + }; + } + + rpc GetRawPprofThreadcreate(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/threadcreate" + }; + } + + rpc GetRawPprofProfile(RawPprofCpuRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/profile" + }; + } + + rpc GetRawPprofCmdline(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/cmdline" + }; + } + + rpc GetRawPprofSymbol(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/symbol" + }; + } + + rpc GetRawPprofTrace(RawPprofRequest) returns (RawPprofResponse) { + option (google.api.http) = { + get: "/api/v1/debug/raw/pprof/trace" + }; + } +} + +message ListServicesRequest {} + +message ListServicesResponse { + repeated ServiceInfo services = 1; + int32 count = 2; +} + +message ServiceInfo { + string name = 1; + repeated string methods = 2; +} + +// Raw pprof request/response messages +message RawPprofRequest { + int32 debug = 1; // Debug level (0 for binary, >0 for text) +} + +message RawPprofCpuRequest { + int32 seconds = 1; // CPU profile duration in seconds (default 30) +} + +message RawPprofResponse { + bytes data = 1; // Raw pprof data exactly as returned by runtime/pprof +} + diff --git a/proto/supernode/supernode.proto b/proto/supernode/status.proto similarity index 71% rename from proto/supernode/supernode.proto rename to proto/supernode/status.proto index 50597e90..d944d614 100644 --- a/proto/supernode/supernode.proto +++ b/proto/supernode/status.proto @@ -2,41 +2,13 @@ syntax = "proto3"; package supernode; option go_package = "github.com/LumeraProtocol/supernode/v2/gen/supernode"; -import "google/api/annotations.proto"; - -// SupernodeService provides status information for all services -service SupernodeService { - rpc GetStatus(StatusRequest) returns (StatusResponse) { - option (google.api.http) = { - get: "/api/v1/status" - }; - } - - rpc ListServices(ListServicesRequest) returns (ListServicesResponse) { - option (google.api.http) = { - get: "/api/v1/services" - }; - } -} - +// StatusRequest controls optional metrics in the status response message StatusRequest { // Optional: include detailed P2P metrics in the response // Maps to query param via grpc-gateway: /api/v1/status?include_p2p_metrics=true bool include_p2p_metrics = 1; } -message ListServicesRequest {} - -message ListServicesResponse { - repeated ServiceInfo services = 1; - int32 count = 2; -} - -message ServiceInfo { - string name = 1; - repeated string methods = 2; -} - // The StatusResponse represents system status with clear organization message StatusResponse { string version = 1; // Supernode version @@ -46,7 +18,7 @@ message StatusResponse { message Resources { message CPU { double usage_percent = 1; // CPU usage percentage (0-100) - int32 cores = 2; // Number of CPU cores + int32 cores = 2; // Number of CPU cores } message Memory { @@ -76,7 +48,7 @@ message StatusResponse { repeated string task_ids = 2; int32 task_count = 3; } - + // Network information message Network { int32 peers_count = 1; // Number of connected peers in P2P network @@ -154,39 +126,7 @@ message StatusResponse { repeated BanEntry ban_list = 4; DatabaseStats database = 5; DiskStatus disk = 6; - - // Last handled BatchStoreData requests (most recent first) - message RecentBatchStoreEntry { - int64 time_unix = 1; - string sender_id = 2; - string sender_ip = 3; - int32 keys = 4; - int64 duration_ms = 5; - bool ok = 6; - string error = 7; - } - - // Last handled BatchGetValues requests (most recent first) - message RecentBatchRetrieveEntry { - int64 time_unix = 1; - string sender_id = 2; - string sender_ip = 3; - int32 requested = 4; - int32 found = 5; - int64 duration_ms = 6; - string error = 7; - } - - repeated RecentBatchStoreEntry recent_batch_store = 7; - repeated RecentBatchRetrieveEntry recent_batch_retrieve = 8; - - // Per-IP buckets: last 10 per sender IP - message RecentBatchStoreList { repeated RecentBatchStoreEntry entries = 1; } - message RecentBatchRetrieveList { repeated RecentBatchRetrieveEntry entries = 1; } - map recent_batch_store_by_ip = 9; - map recent_batch_retrieve_by_ip = 10; } - - P2PMetrics p2p_metrics = 9; + P2PMetrics p2p_metrics = 9; } diff --git a/sdk/README.md b/sdk/README.md index b0aecb20..cea41654 100644 --- a/sdk/README.md +++ b/sdk/README.md @@ -2,8 +2,49 @@ The Lumera Supernode SDK is a comprehensive toolkit for interacting with the Lumera Protocol's supernode network to perform cascade operations +## Cascade End-to-End + +This walks through building Cascade metadata, submitting the on‑chain action, starting Cascade, and downloading the result using the SDK (sdk/action), low‑level helpers (pkg/cascadekit), and the Lumera client (pkg/lumera). + +1) Build metadata (+ price, expiration) +``` +meta, price, expiration, err := client.BuildCascadeMetadataFromFile(ctx, filePath, /*public=*/false) +if err != nil { /* handle */ } +``` +Under the hood: encodes file to a single‑block layout, signs layout/index (creator key), computes blake3(data), picks a random ic (1..100), derives max from chain params, computes price from file size + fee params, and expiration from chain duration (+1h buffer). + +2) Submit RequestAction (via pkg/lumera) +``` +b, _ := json.Marshal(meta) +resp, err := lumeraClient.ActionMsg().RequestAction(ctx, "CASCADE", string(b), price, expiration) +if err != nil { /* handle */ } +// Extract actionID from tx events or query later +``` + +3) Start Cascade +``` +sig, _ := client.GenerateStartCascadeSignatureFromFile(ctx, filePath) +taskID, err := client.StartCascade(ctx, filePath, actionID, sig) +``` + +4) Download Cascade +``` +// Public (meta.Public == true): empty signature +taskID, _ := client.DownloadCascade(ctx, actionID, outDir, "") + +// Private: sign only the actionID with the creator's key (helper shown) +dlSig, _ := client.GenerateDownloadSignature(ctx, actionID, creatorAddr) +taskID, _ = client.DownloadCascade(ctx, actionID, outDir, dlSig) +``` + +Notes +- Public downloads require no signature. +- The SDK derives ic/max/price/expiration internally; you don’t need to fetch params yourself. + ## Table of Contents +- [Cascade End-to-End](#cascade-end-to-end) + - [Configuration](#configuration) - [Client Initialization](#client-initialization) - [Action Client Methods](#action-client-methods) @@ -221,27 +262,20 @@ if err != nil { // taskID can be used to track the download progress ``` +Note: If the action's cascade metadata sets `public: true`, the signature may be left empty to allow anonymous download. + **Parameters:** - `ctx context.Context`: Context for the operation - `actionID string`: ID of the action to download - `outputDir string`: Directory where the downloaded file will be saved -- `signature string`: Base64-encoded signature for download authorization +- `signature string`: Base64-encoded signature for download authorization (leave empty for public cascades) **Signature Creation for Download:** -The download signature is created by combining the action ID with the creator's address, signing it, and base64 encoding the result. +For private cascades, sign only the action ID with the creator's key and base64‑encode the result. ```go -// Create signature data: actionID.creatorAddress -signatureData := fmt.Sprintf("%s.%s", actionID, creatorAddress) - -// Sign the signature data -signedSignature, err := keyring.SignBytes(keyring, keyName, []byte(signatureData)) -if err != nil { - // Handle error -} - -// Base64 encode the signature -signature := base64.StdEncoding.EncodeToString(signedSignature) +sig, err := client.GenerateDownloadSignature(ctx, actionID, creatorAddress) +// Pass `sig` to DownloadCascade ``` **Returns:** @@ -286,7 +320,7 @@ if err != nil { **Returns:** - `error`: Error if the task doesn't exist or deletion fails -### GetSupernodeStatus +### GetSupernodeStatus (Status API) Retrieves the current status and resource information of a specific supernode. @@ -303,27 +337,12 @@ if err != nil { - `supernodeAddress string`: Cosmos address of the supernode **Returns:** -- `*supernodeservice.SupernodeStatusresponse`: Status information including CPU usage, memory stats, and active services -- `error`: Error if the supernode is unreachable or query fails - -Include detailed P2P metrics (optional): +- `*supernode.StatusResponse`: Status information including CPU usage, memory stats, active services, and P2P metrics +- `error`: Error if the supernode is unreachable or the query fails -By default, peer info and P2P metrics are not returned to keep calls lightweight. To include them, set an option in the context: - -```go -import snsvc "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" - -// Opt-in via context -ctxWithMetrics := snsvc.WithIncludeP2PMetrics(ctx) -status, err := client.GetSupernodeStatus(ctxWithMetrics, "lumera1abc...") -if err != nil { - // handle error -} - -// Access optional fields when present -fmt.Println("Peers:", status.Network.PeersCount) -fmt.Println("DHT hot path bans:", status.P2PMetrics.DhtMetrics.HotPathBanIncrements) -``` +Notes: +- The SDK always requests P2P metrics to ensure `Network.PeersCount` is populated for eligibility checks. +- Status response is the generated type; no mapping layer in the SDK. ### SubscribeToEvents diff --git a/sdk/action/client.go b/sdk/action/client.go index fc3c2d7e..81aa806b 100644 --- a/sdk/action/client.go +++ b/sdk/action/client.go @@ -2,16 +2,28 @@ package action import ( "context" + crand "crypto/rand" + "encoding/base64" "fmt" + "math/big" + "os" + "path/filepath" + "strconv" + "time" + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" - "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" "github.com/LumeraProtocol/supernode/v2/sdk/config" "github.com/LumeraProtocol/supernode/v2/sdk/event" "github.com/LumeraProtocol/supernode/v2/sdk/log" "github.com/LumeraProtocol/supernode/v2/sdk/net" "github.com/LumeraProtocol/supernode/v2/sdk/task" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + keyringpkg "github.com/LumeraProtocol/supernode/v2/pkg/keyring" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" "github.com/cosmos/cosmos-sdk/crypto/keyring" ) @@ -26,9 +38,19 @@ type Client interface { GetTask(ctx context.Context, taskID string) (*task.TaskEntry, bool) SubscribeToEvents(ctx context.Context, eventType event.EventType, handler event.Handler) error SubscribeToAllEvents(ctx context.Context, handler event.Handler) error - GetSupernodeStatus(ctx context.Context, supernodeAddress string) (*supernodeservice.SupernodeStatusresponse, error) + GetSupernodeStatus(ctx context.Context, supernodeAddress string) (*pb.StatusResponse, error) // DownloadCascade downloads cascade to outputDir, filename determined by action ID DownloadCascade(ctx context.Context, actionID, outputDir, signature string) (string, error) + // BuildCascadeMetadataFromFile encodes the file to produce a single-block layout, + // generates the cascade signatures, computes the blake3 data hash (base64), + // and returns CascadeMetadata (with signatures) along with price and expiration time. + // Internally derives ic (random in [1..100]), max (from chain params), price (GetActionFee), + // and expiration (params duration + 1h buffer). + BuildCascadeMetadataFromFile(ctx context.Context, filePath string, public bool) (actiontypes.CascadeMetadata, string, string, error) + // GenerateStartCascadeSignatureFromFile computes blake3(file) and signs it with the configured key; returns base64 signature. + GenerateStartCascadeSignatureFromFile(ctx context.Context, filePath string) (string, error) + // GenerateDownloadSignature signs the payload "actionID" and returns a base64 signature. + GenerateDownloadSignature(ctx context.Context, actionID, creatorAddr string) (string, error) } // ClientImpl implements the Client interface @@ -151,7 +173,7 @@ func (c *ClientImpl) SubscribeToAllEvents(ctx context.Context, handler event.Han } // GetSupernodeStatus retrieves the status of a specific supernode by its address -func (c *ClientImpl) GetSupernodeStatus(ctx context.Context, supernodeAddress string) (*supernodeservice.SupernodeStatusresponse, error) { +func (c *ClientImpl) GetSupernodeStatus(ctx context.Context, supernodeAddress string) (*pb.StatusResponse, error) { if supernodeAddress == "" { c.logger.Error(ctx, "Empty supernode address provided") return nil, fmt.Errorf("supernode address cannot be empty") @@ -216,3 +238,111 @@ func (c *ClientImpl) DownloadCascade(ctx context.Context, actionID, outputDir, s return taskID, nil } + +// BuildCascadeMetadataFromFile produces Cascade metadata (including signatures) from a local file path. +// It generates only the single-block RaptorQ layout metadata (no symbols), signs it, +// and returns metadata, price and expiration. +func (c *ClientImpl) BuildCascadeMetadataFromFile(ctx context.Context, filePath string, public bool) (actiontypes.CascadeMetadata, string, string, error) { + if filePath == "" { + return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("file path is empty") + } + fi, err := os.Stat(filePath) + if err != nil { + return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("stat file: %w", err) + } + + // Build layout metadata only (no symbols). Supernodes will create symbols. + rq := codec.NewRaptorQCodec("") + metaResp, err := rq.CreateMetadata(ctx, codec.CreateMetadataRequest{Path: filePath}) + if err != nil { + return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("raptorq create metadata: %w", err) + } + layout := metaResp.Layout + + // Derive `max` from chain params, then create signatures and index IDs + paramsResp, err := c.lumeraClient.GetActionParams(ctx) + if err != nil { + return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("get action params: %w", err) + } + // Use MaxRaptorQSymbols as the count for rq_ids generation. + var max uint32 + if paramsResp != nil && paramsResp.Params.MaxRaptorQSymbols > 0 { + max = uint32(paramsResp.Params.MaxRaptorQSymbols) + } else { + // Fallback to a sane default if params missing + max = 50 + } + // Pick a random initial counter in [1,100] + rnd, _ := crand.Int(crand.Reader, big.NewInt(100)) + ic := uint32(rnd.Int64() + 1) // 1..100 + // Create signatures from the layout struct + indexSignatureFormat, _, err := cascadekit.CreateSignaturesWithKeyring(layout, c.keyring, c.config.Account.KeyName, ic, max) + if err != nil { + return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("create signatures: %w", err) + } + + // Compute data hash (blake3) as base64 using a streaming file hash to avoid loading entire file + h, err := utils.Blake3HashFile(filePath) + if err != nil { + return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("hash data: %w", err) + } + dataHashB64 := base64.StdEncoding.EncodeToString(h) + + // Derive file name from path + fileName := filepath.Base(filePath) + + // Build metadata proto + meta := cascadekit.NewCascadeMetadata(dataHashB64, fileName, uint64(ic), indexSignatureFormat, public) + + // Fetch params (already fetched) to get denom and expiration duration + denom := paramsResp.Params.BaseActionFee.Denom + exp := paramsResp.Params.ExpirationDuration + + // Compute data size in KB for fee, rounding up to avoid underpaying + // Keep consistent with supernode verification which uses ceil(bytes/1024) + sizeBytes := fi.Size() + kb := (sizeBytes + 1023) / 1024 // int64 division + feeResp, err := c.lumeraClient.GetActionFee(ctx, strconv.FormatInt(kb, 10)) + if err != nil { + return actiontypes.CascadeMetadata{}, "", "", fmt.Errorf("get action fee: %w", err) + } + price := feeResp.Amount + denom + + // Expiration: now + chain duration + 1h buffer (to avoid off-by-margin rejections) + expirationUnix := time.Now().Add(exp).Add(1 * time.Hour).Unix() + expirationTime := fmt.Sprintf("%d", expirationUnix) + + return meta, price, expirationTime, nil +} + +// GenerateStartCascadeSignatureFromFile computes blake3(file) and signs it with the configured key. +// Returns base64-encoded signature suitable for StartCascade. +func (c *ClientImpl) GenerateStartCascadeSignatureFromFile(ctx context.Context, filePath string) (string, error) { + // Compute blake3(file), encode as base64 string, and sign the string bytes + h, err := utils.Blake3HashFile(filePath) + if err != nil { + return "", fmt.Errorf("blake3: %w", err) + } + dataHashB64 := base64.StdEncoding.EncodeToString(h) + sig, err := keyringpkg.SignBytes(c.keyring, c.config.Account.KeyName, []byte(dataHashB64)) + if err != nil { + return "", fmt.Errorf("sign hash string: %w", err) + } + return base64.StdEncoding.EncodeToString(sig), nil +} + +// GenerateDownloadSignature signs the payload "actionID" and returns base64 signature. +func (c *ClientImpl) GenerateDownloadSignature(ctx context.Context, actionID, creatorAddr string) (string, error) { + if actionID == "" { + return "", fmt.Errorf("actionID is empty") + } + if creatorAddr == "" { + return "", fmt.Errorf("creator address is empty") + } + // Sign only the actionID; creatorAddr is provided but not included in payload. + sig, err := keyringpkg.SignBytes(c.keyring, c.config.Account.KeyName, []byte(actionID)) + if err != nil { + return "", fmt.Errorf("sign download payload: %w", err) + } + return base64.StdEncoding.EncodeToString(sig), nil +} diff --git a/sdk/adapters/lumera/adapter.go b/sdk/adapters/lumera/adapter.go index 8fe7a1fb..0317e16f 100644 --- a/sdk/adapters/lumera/adapter.go +++ b/sdk/adapters/lumera/adapter.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "sort" + "time" "github.com/LumeraProtocol/supernode/v2/sdk/log" @@ -13,7 +14,19 @@ import ( lumeraclient "github.com/LumeraProtocol/supernode/v2/pkg/lumera" "github.com/cosmos/cosmos-sdk/crypto/keyring" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + ristretto "github.com/dgraph-io/ristretto/v2" "github.com/golang/protobuf/proto" + "golang.org/x/sync/singleflight" +) + +const ( + // Cache tuning: tiny LFU with TTL to avoid stale long-term entries + cacheNumCounters = 1_000 + cacheMaxCost = 100 + cacheBufferItems = 64 + cacheItemCost = 1 + cacheTTL = time.Hour ) //go:generate mockery --name=Client --output=testutil/mocks --outpkg=mocks --filename=lumera_mock.go @@ -25,6 +38,12 @@ type Client interface { GetSupernodeWithLatestAddress(ctx context.Context, address string) (*SuperNodeInfo, error) DecodeCascadeMetadata(ctx context.Context, action Action) (actiontypes.CascadeMetadata, error) VerifySignature(ctx context.Context, accountAddr string, data []byte, signature []byte) error + // GetBalance returns the bank balance for the given address and denom. + GetBalance(ctx context.Context, address string, denom string) (*banktypes.QueryBalanceResponse, error) + // GetActionParams returns the action module parameters. + GetActionParams(ctx context.Context) (*actiontypes.QueryParamsResponse, error) + // GetActionFee returns the fee amount for a given data size (in KB) for RequestAction. + GetActionFee(ctx context.Context, dataSizeKB string) (*actiontypes.QueryGetActionFeeResponse, error) } // SuperNodeInfo contains supernode information with latest address @@ -47,6 +66,11 @@ type ConfigParams struct { type Adapter struct { client lumeraclient.Client logger log.Logger + + // Lightweight caches to reduce repeated chain lookups when used as a validator + accountCache *ristretto.Cache[string, *authtypes.QueryAccountInfoResponse] + supernodeCache *ristretto.Cache[string, *sntypes.SuperNode] + sf singleflight.Group } // NewAdapter creates a new Adapter with dependencies explicitly injected @@ -70,31 +94,74 @@ func NewAdapter(ctx context.Context, config ConfigParams, logger log.Logger) (Cl logger.Info(ctx, "Lumera adapter created successfully") + // Initialize small, bounded caches return &Adapter{ - client: client, - logger: logger, + client: client, + logger: logger, + accountCache: newStringCache[*authtypes.QueryAccountInfoResponse](), + supernodeCache: newStringCache[*sntypes.SuperNode](), }, nil } +func newStringCache[T any]() *ristretto.Cache[string, T] { + c, _ := ristretto.NewCache(&ristretto.Config[string, T]{ + NumCounters: cacheNumCounters, + MaxCost: cacheMaxCost, + BufferItems: cacheBufferItems, + }) + return c +} + func (a *Adapter) GetSupernodeBySupernodeAddress(ctx context.Context, address string) (*sntypes.SuperNode, error) { - a.logger.Debug(ctx, "Getting supernode by address", "address", address) - resp, err := a.client.SuperNode().GetSupernodeBySupernodeAddress(ctx, address) + if address == "" { + return nil, fmt.Errorf("address cannot be empty") + } + // Fast path: cache hit + if a.supernodeCache != nil { + if val, ok := a.supernodeCache.Get(address); ok && val != nil { + return val, nil + } + } + + // Deduplicate concurrent lookups for same address + res, err, _ := a.sf.Do("sn:"+address, func() (any, error) { + // Double-check cache inside singleflight + if a.supernodeCache != nil { + if val, ok := a.supernodeCache.Get(address); ok && val != nil { + return val, nil + } + } + + a.logger.Debug(ctx, "Getting supernode by address", "address", address) + resp, err := a.client.SuperNode().GetSupernodeBySupernodeAddress(ctx, address) + if err != nil { + a.logger.Error(ctx, "Failed to get supernode", "address", address, "error", err) + return nil, fmt.Errorf("failed to get supernode: %w", err) + } + if resp == nil { + a.logger.Error(ctx, "Received nil response for supernode", "address", address) + return nil, fmt.Errorf("received nil response for supernode %s", address) + } + if a.supernodeCache != nil { + a.supernodeCache.SetWithTTL(address, resp, cacheItemCost, cacheTTL) + } + return resp, nil + }) if err != nil { - a.logger.Error(ctx, "Failed to get supernode", "address", address, "error", err) - return nil, fmt.Errorf("failed to get supernode: %w", err) + return nil, err } - if resp == nil { - a.logger.Error(ctx, "Received nil response for supernode", "address", address) - return nil, fmt.Errorf("received nil response for supernode %s", address) + sn, _ := res.(*sntypes.SuperNode) + if sn == nil { + return nil, fmt.Errorf("supernode is nil") } - a.logger.Debug(ctx, "Successfully retrieved supernode", "address", address) - return resp, nil + return sn, nil } func (a *Adapter) GetSupernodeWithLatestAddress(ctx context.Context, address string) (*SuperNodeInfo, error) { a.logger.Debug(ctx, "Getting supernode with latest address", "address", address) - resp, err := a.client.SuperNode().GetSupernodeBySupernodeAddress(ctx, address) + // Route through cached method to avoid duplicate chain calls + resp, err := a.GetSupernodeBySupernodeAddress(ctx, address) if err != nil { a.logger.Error(ctx, "Failed to get supernode", "address", address, "error", err) return nil, fmt.Errorf("failed to get supernode: %w", err) @@ -140,19 +207,49 @@ func (a *Adapter) GetSupernodeWithLatestAddress(ctx context.Context, address str } func (a *Adapter) AccountInfoByAddress(ctx context.Context, addr string) (*authtypes.QueryAccountInfoResponse, error) { - a.logger.Debug(ctx, "Getting account info by address", "address", addr) - resp, err := a.client.Auth().AccountInfoByAddress(ctx, addr) - if err != nil { - a.logger.Error(ctx, "Failed to get account info", "address", addr, "error", err) - return nil, fmt.Errorf("failed to get account info: %w", err) + if addr == "" { + return nil, fmt.Errorf("address cannot be empty") } - if resp == nil { - a.logger.Error(ctx, "Received nil response for account info", "address", addr) - return nil, fmt.Errorf("received nil response for account info %s", addr) + // Fast path: cache hit + if a.accountCache != nil { + if val, ok := a.accountCache.Get(addr); ok && val != nil { + return val, nil + } } - a.logger.Debug(ctx, "Successfully retrieved account info", "address", addr) - return resp, nil + // Deduplicate concurrent fetches + res, err, _ := a.sf.Do("acct:"+addr, func() (any, error) { + // Double-check cache inside singleflight window + if a.accountCache != nil { + if val, ok := a.accountCache.Get(addr); ok && val != nil { + return val, nil + } + } + + a.logger.Debug(ctx, "Getting account info by address", "address", addr) + resp, err := a.client.Auth().AccountInfoByAddress(ctx, addr) + if err != nil { + a.logger.Error(ctx, "Failed to get account info", "address", addr, "error", err) + return nil, fmt.Errorf("failed to get account info: %w", err) + } + if resp == nil { + a.logger.Error(ctx, "Received nil response for account info", "address", addr) + return nil, fmt.Errorf("received nil response for account info %s", addr) + } + if a.accountCache != nil { + a.accountCache.SetWithTTL(addr, resp, cacheItemCost, cacheTTL) + } + a.logger.Debug(ctx, "Successfully retrieved account info", "address", addr) + return resp, nil + }) + if err != nil { + return nil, err + } + ai, _ := res.(*authtypes.QueryAccountInfoResponse) + if ai == nil { + return nil, fmt.Errorf("account info is nil") + } + return ai, nil } func (a *Adapter) GetAction(ctx context.Context, actionID string) (Action, error) { @@ -190,7 +287,10 @@ func (a *Adapter) GetSupernodes(ctx context.Context, height int64) ([]Supernode, } blockHeight = uint64(height) - resp, err := a.client.SuperNode().GetTopSuperNodesForBlock(ctx, blockHeight) + resp, err := a.client.SuperNode().GetTopSuperNodesForBlock(ctx, &sntypes.QueryGetTopSuperNodesForBlockRequest{ + BlockHeight: int32(blockHeight), + //TODO : Update after hotfix on chain + }) if err != nil { a.logger.Error(ctx, "Failed to get supernodes", "height", height, "error", err) return nil, fmt.Errorf("failed to get supernodes: %w", err) @@ -213,6 +313,42 @@ func (a *Adapter) VerifySignature(ctx context.Context, accountAddr string, data, return nil } +// RequestAction intentionally not exposed via this adapter; use pkg/lumera directly if needed. + +// GetActionParams fetches the action module parameters via the underlying lumera client. +func (a *Adapter) GetActionParams(ctx context.Context) (*actiontypes.QueryParamsResponse, error) { + resp, err := a.client.Action().GetParams(ctx) + if err != nil { + return nil, fmt.Errorf("get action params: %w", err) + } + return resp, nil +} + +// GetActionFee fetches the action fee for a given data size (in KB). +func (a *Adapter) GetActionFee(ctx context.Context, dataSizeKB string) (*actiontypes.QueryGetActionFeeResponse, error) { + resp, err := a.client.Action().GetActionFee(ctx, dataSizeKB) + if err != nil { + return nil, fmt.Errorf("get action fee: %w", err) + } + return resp, nil +} + +// GetBalance fetches the balance for a given address and denom via the underlying lumera client. +func (a *Adapter) GetBalance(ctx context.Context, address string, denom string) (*banktypes.QueryBalanceResponse, error) { + a.logger.Debug(ctx, "Querying bank balance", "address", address, "denom", denom) + resp, err := a.client.Bank().Balance(ctx, address, denom) + if err != nil { + a.logger.Error(ctx, "Failed to query bank balance", "address", address, "denom", denom, "error", err) + return nil, fmt.Errorf("failed to query bank balance: %w", err) + } + if resp == nil || resp.Balance == nil { + a.logger.Error(ctx, "Nil balance response", "address", address, "denom", denom) + return nil, fmt.Errorf("nil balance response for %s %s", address, denom) + } + a.logger.Debug(ctx, "Successfully fetched bank balance", "amount", resp.Balance.Amount.String(), "denom", resp.Balance.Denom) + return resp, nil +} + // DecodeCascadeMetadata decodes the raw metadata bytes into CascadeMetadata func (a *Adapter) DecodeCascadeMetadata(ctx context.Context, action Action) (actiontypes.CascadeMetadata, error) { if action.ActionType != "ACTION_TYPE_CASCADE" { diff --git a/sdk/adapters/supernodeservice/adapter.go b/sdk/adapters/supernodeservice/adapter.go index f9e9e6da..3195b694 100644 --- a/sdk/adapters/supernodeservice/adapter.go +++ b/sdk/adapters/supernodeservice/adapter.go @@ -2,7 +2,6 @@ package supernodeservice import ( "context" - "encoding/json" "fmt" "io" "os" @@ -226,7 +225,9 @@ func (a *cascadeAdapter) CascadeSupernodeRegister(ctx context.Context, in *Casca bytesRead += int64(n) progress := float64(bytesRead) / float64(totalBytes) * 100 - a.logger.Debug(ctx, "Sent data chunk", "chunkIndex", chunkIndex, "chunkSize", n, "progress", fmt.Sprintf("%.1f%%", progress)) + // Print upload progress directly to stdout + fmt.Printf("Upload progress: task_id=%s action_id=%s chunk_index=%d chunk_size=%d progress=%.1f%% bytes=%d/%d\n", + in.TaskId, in.ActionID, chunkIndex, n, progress, bytesRead, totalBytes) chunkIndex++ } @@ -345,30 +346,7 @@ func (a *cascadeAdapter) CascadeSupernodeRegister(ctx context.Context, in *Casca event.KeyTaskID: in.TaskId, event.KeyActionID: in.ActionID, } - // For artefacts stored, parse JSON payload with metrics (new minimal shape) - if resp.EventType == cascade.SupernodeEventType_ARTEFACTS_STORED { - var payload map[string]any - if err := json.Unmarshal([]byte(resp.Message), &payload); err == nil { - if store, ok := payload["store"].(map[string]any); ok { - if v, ok := store["duration_ms"].(float64); ok { - edata[event.KeyStoreDurationMS] = int64(v) - } - if v, ok := store["symbols_first_pass"].(float64); ok { - edata[event.KeyStoreSymbolsFirstPass] = int64(v) - } - if v, ok := store["symbols_total"].(float64); ok { - edata[event.KeyStoreSymbolsTotal] = int64(v) - } - if v, ok := store["id_files_count"].(float64); ok { - edata[event.KeyStoreIDFilesCount] = int64(v) - } - if v, ok := store["calls_by_ip"]; ok { - edata[event.KeyStoreCallsByIP] = v - } - } - } - } - in.EventLogger(ctx, toSdkEventWithMessage(resp.EventType, resp.Message), resp.Message, edata) + in.EventLogger(ctx, toSdkEvent(resp.EventType), resp.Message, edata) } // Optionally capture the final response @@ -395,18 +373,18 @@ func (a *cascadeAdapter) CascadeSupernodeRegister(ctx context.Context, in *Casca }, nil } -func (a *cascadeAdapter) GetSupernodeStatus(ctx context.Context) (SupernodeStatusresponse, error) { - // Gate P2P metrics via context option to keep API backward compatible - req := &supernode.StatusRequest{IncludeP2PMetrics: includeP2PMetrics(ctx)} +func (a *cascadeAdapter) GetSupernodeStatus(ctx context.Context) (*supernode.StatusResponse, error) { + // Always include P2P metrics to populate peers count for eligibility checks + req := &supernode.StatusRequest{IncludeP2PMetrics: true} resp, err := a.statusClient.GetStatus(ctx, req) if err != nil { a.logger.Error(ctx, "Failed to get supernode status", "error", err) - return SupernodeStatusresponse{}, fmt.Errorf("failed to get supernode status: %w", err) + return nil, fmt.Errorf("failed to get supernode status: %w", err) } a.logger.Debug(ctx, "Supernode status retrieved", "status", resp) - return *toSdkSupernodeStatus(resp), nil + return resp, nil } // CascadeSupernodeDownload downloads a file from a supernode gRPC stream @@ -446,6 +424,7 @@ func (a *cascadeAdapter) CascadeSupernodeDownload( bytesWritten int64 chunkIndex int startedEmitted bool + downloadStart time.Time ) // 3. Receive streamed responses @@ -470,46 +449,11 @@ func (a *cascadeAdapter) CascadeSupernodeDownload( event.KeyEventType: x.Event.EventType, event.KeyMessage: x.Event.Message, } - // Parse detailed metrics for downloaded event if JSON payload provided (new minimal shape) - if x.Event.EventType == cascade.SupernodeEventType_ARTEFACTS_DOWNLOADED { - var payload map[string]any - if err := json.Unmarshal([]byte(x.Event.Message), &payload); err == nil { - if retrieve, ok := payload["retrieve"].(map[string]any); ok { - if v, ok := retrieve["found_local"].(float64); ok { - edata[event.KeyRetrieveFoundLocal] = int64(v) - } - if v, ok := retrieve["retrieve_ms"].(float64); ok { - edata[event.KeyRetrieveMS] = int64(v) - } - if v, ok := retrieve["decode_ms"].(float64); ok { - edata[event.KeyDecodeMS] = int64(v) - } - if v, ok := retrieve["calls_by_ip"]; ok { - edata[event.KeyRetrieveCallsByIP] = v - } - // Optional additional retrieve fields - if v, ok := retrieve["keys"].(float64); ok { - edata[event.KeyRetrieveKeys] = int64(v) - } - if v, ok := retrieve["required"].(float64); ok { - edata[event.KeyRetrieveRequired] = int64(v) - } - if v, ok := retrieve["found_net"].(float64); ok { - edata[event.KeyRetrieveFoundNet] = int64(v) - } - if v, ok := retrieve["target_required_percent"].(float64); ok { - edata[event.KeyTargetRequiredPercent] = v - } - if v, ok := retrieve["target_required_count"].(float64); ok { - edata[event.KeyTargetRequiredCount] = int64(v) - } - if v, ok := retrieve["total_symbols"].(float64); ok { - edata[event.KeyTotalSymbols] = int64(v) - } - } - } - } - in.EventLogger(ctx, toSdkEvent(x.Event.EventType), x.Event.Message, edata) + // Avoid blocking Recv loop on event handling; dispatch asynchronously + evtType := toSdkEvent(x.Event.EventType) + go func(ed event.EventData, et event.EventType, msg string) { + in.EventLogger(ctx, et, msg, ed) + }(edata, evtType, x.Event.Message) } // 3b. Actual data chunk @@ -520,7 +464,10 @@ func (a *cascadeAdapter) CascadeSupernodeDownload( } if !startedEmitted { if in.EventLogger != nil { - in.EventLogger(ctx, event.SDKDownloadStarted, "Download started", event.EventData{event.KeyActionID: in.ActionID}) + // mark start to compute throughput at completion + downloadStart = time.Now() + // Emit started asynchronously to avoid blocking + go in.EventLogger(ctx, event.SDKDownloadStarted, "Download started", event.EventData{event.KeyActionID: in.ActionID}) } startedEmitted = true } @@ -532,13 +479,35 @@ func (a *cascadeAdapter) CascadeSupernodeDownload( chunkIndex++ a.logger.Debug(ctx, "received chunk", "chunk_index", chunkIndex, "chunk_size", len(data), "bytes_written", bytesWritten) + + // Print download progress directly to stdout (similar to upload progress) + fmt.Printf("Download progress: action_id=%s chunk_index=%d chunk_size=%d bytes=%d\n", + in.ActionID, chunkIndex, len(data), bytesWritten) } } a.logger.Info(ctx, "download complete", "bytes_written", bytesWritten, "path", in.OutputPath, "action_id", in.ActionID) if in.EventLogger != nil { - in.EventLogger(ctx, event.SDKDownloadCompleted, "Download completed", event.EventData{event.KeyActionID: in.ActionID, event.KeyOutputPath: in.OutputPath}) + // Compute metrics if we marked a start + var elapsed float64 + var throughput float64 + if !downloadStart.IsZero() { + elapsed = time.Since(downloadStart).Seconds() + mb := float64(bytesWritten) / (1024.0 * 1024.0) + if elapsed > 0 { + throughput = mb / elapsed + } + } + // Emit completion asynchronously with metrics + go in.EventLogger(ctx, event.SDKDownloadCompleted, "Download completed", event.EventData{ + event.KeyActionID: in.ActionID, + event.KeyOutputPath: in.OutputPath, + event.KeyBytesTotal: bytesWritten, + event.KeyChunks: chunkIndex, + event.KeyElapsedSeconds: elapsed, + event.KeyThroughputMBS: throughput, + }) } return &CascadeSupernodeDownloadResponse{ Success: true, @@ -588,185 +557,3 @@ func toSdkEvent(e cascade.SupernodeEventType) event.EventType { return event.SupernodeUnknown } } - -// toSdkEventWithMessage extends event mapping using message content for finer granularity -func toSdkEventWithMessage(e cascade.SupernodeEventType, msg string) event.EventType { - // Detect finalize simulation pass piggybacked on RQID_VERIFIED - if e == cascade.SupernodeEventType_RQID_VERIFIED && msg == "finalize action simulation passed" { - return event.SupernodeFinalizeSimulated - } - return toSdkEvent(e) -} - -func toSdkSupernodeStatus(resp *supernode.StatusResponse) *SupernodeStatusresponse { - result := &SupernodeStatusresponse{} - result.Version = resp.Version - result.UptimeSeconds = resp.UptimeSeconds - - // Convert Resources data - if resp.Resources != nil { - // Convert CPU data - if resp.Resources.Cpu != nil { - result.Resources.CPU.UsagePercent = resp.Resources.Cpu.UsagePercent - result.Resources.CPU.Cores = resp.Resources.Cpu.Cores - } - - // Convert Memory data - if resp.Resources.Memory != nil { - result.Resources.Memory.TotalGB = resp.Resources.Memory.TotalGb - result.Resources.Memory.UsedGB = resp.Resources.Memory.UsedGb - result.Resources.Memory.AvailableGB = resp.Resources.Memory.AvailableGb - result.Resources.Memory.UsagePercent = resp.Resources.Memory.UsagePercent - } - - // Convert Storage data - result.Resources.Storage = make([]StorageInfo, 0, len(resp.Resources.StorageVolumes)) - for _, storage := range resp.Resources.StorageVolumes { - result.Resources.Storage = append(result.Resources.Storage, StorageInfo{ - Path: storage.Path, - TotalBytes: storage.TotalBytes, - UsedBytes: storage.UsedBytes, - AvailableBytes: storage.AvailableBytes, - UsagePercent: storage.UsagePercent, - }) - } - - // Copy hardware summary - result.Resources.HardwareSummary = resp.Resources.HardwareSummary - } - - // Convert RunningTasks data - result.RunningTasks = make([]ServiceTasks, 0, len(resp.RunningTasks)) - for _, service := range resp.RunningTasks { - result.RunningTasks = append(result.RunningTasks, ServiceTasks{ - ServiceName: service.ServiceName, - TaskIDs: service.TaskIds, - TaskCount: service.TaskCount, - }) - } - - // Convert RegisteredServices data - result.RegisteredServices = make([]string, len(resp.RegisteredServices)) - copy(result.RegisteredServices, resp.RegisteredServices) - - // Convert Network data - if resp.Network != nil { - result.Network.PeersCount = resp.Network.PeersCount - result.Network.PeerAddresses = make([]string, len(resp.Network.PeerAddresses)) - copy(result.Network.PeerAddresses, resp.Network.PeerAddresses) - } - - // Copy rank and IP address - result.Rank = resp.Rank - result.IPAddress = resp.IpAddress - - // Map optional P2P metrics - if resp.P2PMetrics != nil { - // DHT metrics - if resp.P2PMetrics.DhtMetrics != nil { - // Store success recent - for _, p := range resp.P2PMetrics.DhtMetrics.StoreSuccessRecent { - result.P2PMetrics.DhtMetrics.StoreSuccessRecent = append(result.P2PMetrics.DhtMetrics.StoreSuccessRecent, struct { - TimeUnix int64 - Requests int32 - Successful int32 - SuccessRate float64 - }{ - TimeUnix: p.TimeUnix, - Requests: p.Requests, - Successful: p.Successful, - SuccessRate: p.SuccessRate, - }) - } - // Batch retrieve recent - for _, p := range resp.P2PMetrics.DhtMetrics.BatchRetrieveRecent { - result.P2PMetrics.DhtMetrics.BatchRetrieveRecent = append(result.P2PMetrics.DhtMetrics.BatchRetrieveRecent, struct { - TimeUnix int64 - Keys int32 - Required int32 - FoundLocal int32 - FoundNetwork int32 - DurationMS int64 - }{ - TimeUnix: p.TimeUnix, - Keys: p.Keys, - Required: p.Required, - FoundLocal: p.FoundLocal, - FoundNetwork: p.FoundNetwork, - DurationMS: p.DurationMs, - }) - } - result.P2PMetrics.DhtMetrics.HotPathBannedSkips = resp.P2PMetrics.DhtMetrics.HotPathBannedSkips - result.P2PMetrics.DhtMetrics.HotPathBanIncrements = resp.P2PMetrics.DhtMetrics.HotPathBanIncrements - } - - // Network handle metrics - if resp.P2PMetrics.NetworkHandleMetrics != nil { - if result.P2PMetrics.NetworkHandleMetrics == nil { - result.P2PMetrics.NetworkHandleMetrics = map[string]struct { - Total int64 - Success int64 - Failure int64 - Timeout int64 - }{} - } - for k, v := range resp.P2PMetrics.NetworkHandleMetrics { - result.P2PMetrics.NetworkHandleMetrics[k] = struct { - Total int64 - Success int64 - Failure int64 - Timeout int64 - }{ - Total: v.Total, - Success: v.Success, - Failure: v.Failure, - Timeout: v.Timeout, - } - } - } - - // Conn pool metrics - if resp.P2PMetrics.ConnPoolMetrics != nil { - if result.P2PMetrics.ConnPoolMetrics == nil { - result.P2PMetrics.ConnPoolMetrics = map[string]int64{} - } - for k, v := range resp.P2PMetrics.ConnPoolMetrics { - result.P2PMetrics.ConnPoolMetrics[k] = v - } - } - - // Ban list - for _, b := range resp.P2PMetrics.BanList { - result.P2PMetrics.BanList = append(result.P2PMetrics.BanList, struct { - ID string - IP string - Port uint32 - Count int32 - CreatedAtUnix int64 - AgeSeconds int64 - }{ - ID: b.Id, - IP: b.Ip, - Port: b.Port, - Count: b.Count, - CreatedAtUnix: b.CreatedAtUnix, - AgeSeconds: b.AgeSeconds, - }) - } - - // Database - if resp.P2PMetrics.Database != nil { - result.P2PMetrics.Database.P2PDBSizeMB = resp.P2PMetrics.Database.P2PDbSizeMb - result.P2PMetrics.Database.P2PDBRecordsCount = resp.P2PMetrics.Database.P2PDbRecordsCount - } - - // Disk - if resp.P2PMetrics.Disk != nil { - result.P2PMetrics.Disk.AllMB = resp.P2PMetrics.Disk.AllMb - result.P2PMetrics.Disk.UsedMB = resp.P2PMetrics.Disk.UsedMb - result.P2PMetrics.Disk.FreeMB = resp.P2PMetrics.Disk.FreeMb - } - } - - return result -} diff --git a/sdk/adapters/supernodeservice/options.go b/sdk/adapters/supernodeservice/options.go deleted file mode 100644 index 547a28c9..00000000 --- a/sdk/adapters/supernodeservice/options.go +++ /dev/null @@ -1,29 +0,0 @@ -package supernodeservice - -import "context" - -// internal context key to toggle P2P metrics in status requests -type ctxKey string - -const ctxKeyIncludeP2P ctxKey = "include_p2p_metrics" - -// WithIncludeP2PMetrics returns a child context that requests detailed P2P metrics -// (and peer info) in status responses. -func WithIncludeP2PMetrics(ctx context.Context) context.Context { - return context.WithValue(ctx, ctxKeyIncludeP2P, true) -} - -// WithP2PMetrics allows explicitly setting the include flag. -func WithP2PMetrics(ctx context.Context, include bool) context.Context { - return context.WithValue(ctx, ctxKeyIncludeP2P, include) -} - -// includeP2PMetrics reads the flag from context; defaults to false when unset. -func includeP2PMetrics(ctx context.Context) bool { - v := ctx.Value(ctxKeyIncludeP2P) - if b, ok := v.(bool); ok { - return b - } - return false -} - diff --git a/sdk/adapters/supernodeservice/types.go b/sdk/adapters/supernodeservice/types.go index 4dbdd7b6..1ba82c8f 100644 --- a/sdk/adapters/supernodeservice/types.go +++ b/sdk/adapters/supernodeservice/types.go @@ -3,6 +3,7 @@ package supernodeservice import ( "context" + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" "google.golang.org/grpc" "github.com/LumeraProtocol/supernode/v2/sdk/event" @@ -28,93 +29,7 @@ type CascadeSupernodeRegisterResponse struct { TxHash string } -// ServiceTasks contains task information for a specific service -type ServiceTasks struct { - ServiceName string - TaskIDs []string - TaskCount int32 -} - -// StorageInfo contains storage metrics for a specific path -type StorageInfo struct { - Path string - TotalBytes uint64 - UsedBytes uint64 - AvailableBytes uint64 - UsagePercent float64 -} - -type SupernodeStatusresponse struct { - Version string // Supernode version - UptimeSeconds uint64 // Uptime in seconds - Resources struct { - CPU struct { - UsagePercent float64 - Cores int32 - } - Memory struct { - TotalGB float64 - UsedGB float64 - AvailableGB float64 - UsagePercent float64 - } - Storage []StorageInfo - HardwareSummary string // Formatted hardware summary - } - RunningTasks []ServiceTasks // Services with running tasks - RegisteredServices []string // All available service names - Network struct { - PeersCount int32 // Number of connected peers - PeerAddresses []string // List of peer addresses - } - Rank int32 // Rank in top supernodes list (0 if not in top list) - IPAddress string // Supernode IP address with port - // Optional detailed P2P metrics (present when requested) - P2PMetrics struct { - DhtMetrics struct { - StoreSuccessRecent []struct { - TimeUnix int64 - Requests int32 - Successful int32 - SuccessRate float64 - } - BatchRetrieveRecent []struct { - TimeUnix int64 - Keys int32 - Required int32 - FoundLocal int32 - FoundNetwork int32 - DurationMS int64 - } - HotPathBannedSkips int64 - HotPathBanIncrements int64 - } - NetworkHandleMetrics map[string]struct { - Total int64 - Success int64 - Failure int64 - Timeout int64 - } - ConnPoolMetrics map[string]int64 - BanList []struct { - ID string - IP string - Port uint32 - Count int32 - CreatedAtUnix int64 - AgeSeconds int64 - } - Database struct { - P2PDBSizeMB float64 - P2PDBRecordsCount int64 - } - Disk struct { - AllMB float64 - UsedMB float64 - FreeMB float64 - } - } -} +// Use generated proto types directly for status type CascadeSupernodeDownloadRequest struct { ActionID string TaskID string @@ -132,6 +47,6 @@ type CascadeSupernodeDownloadResponse struct { //go:generate mockery --name=CascadeServiceClient --output=testutil/mocks --outpkg=mocks --filename=cascade_service_mock.go type CascadeServiceClient interface { CascadeSupernodeRegister(ctx context.Context, in *CascadeSupernodeRegisterRequest, opts ...grpc.CallOption) (*CascadeSupernodeRegisterResponse, error) - GetSupernodeStatus(ctx context.Context) (SupernodeStatusresponse, error) + GetSupernodeStatus(ctx context.Context) (*pb.StatusResponse, error) CascadeSupernodeDownload(ctx context.Context, in *CascadeSupernodeDownloadRequest, opts ...grpc.CallOption) (*CascadeSupernodeDownloadResponse, error) } diff --git a/sdk/docs/cascade-timeouts.md b/sdk/docs/cascade-timeouts.md index 716804bc..7568dd28 100644 --- a/sdk/docs/cascade-timeouts.md +++ b/sdk/docs/cascade-timeouts.md @@ -34,8 +34,8 @@ This document explains how timeouts and deadlines are applied across the SDK cas 3) `sdk/task/cascade.go: CascadeTask.Run(ctx)` - Validates file size; fetches healthy supernodes; registers with one. -4) Discovery: `sdk/task/task.go: BaseTask.fetchSupernodes` → `BaseTask.isServing` - - `context.WithTimeout(parent, 10s)` for health probe (create client + `HealthCheck`). +4) Discovery: `sdk/task/task.go: BaseTask.fetchSupernodesWithLoads` (single-pass sanitize + load) + - `context.WithTimeout(parent, 10s)` per node: `HealthCheck` + `GetStatus` (peers, running_tasks) + balance. 5) Registration attempt: `sdk/task/cascade.go: attemptRegistration` - Client connect: uses task context (no deadline); gRPC injects a 30s default at connect if needed. @@ -136,7 +136,7 @@ This approach requires no request‑struct changes and preserves existing call s - `supernode/sdk/action/client.go` — entrypoints, no timeouts added. - `supernode/sdk/task/manager.go` — detaches from caller context; creates and runs tasks. - `supernode/sdk/task/timeouts.go` — `connectionTimeout` for health checks. - - `supernode/sdk/task/task.go` — discovery + health checks using `connectionTimeout`. + - `supernode/sdk/task/task.go` — discovery with single-pass probe (`fetchSupernodesWithLoads`) using `connectionTimeout`. - `supernode/sdk/adapters/supernodeservice/timeouts.go` — upload/processing timeout constants. - `supernode/sdk/adapters/supernodeservice/adapter.go` — upload and progress stream handling (phase timers + events). - `supernode/sdk/net/factory.go` — client options tuned for streaming. @@ -170,7 +170,7 @@ This document describes how the SDK applies timeouts and deadlines during cascad 1) `sdk/action/client.go: ClientImpl.StartCascade(ctx, ...)` — forwards `ctx` to the Task Manager. 2) `sdk/task/manager.go: ManagerImpl.CreateCascadeTask(...)` — detaches from caller (`context.WithCancel(context.Background())`). 3) `sdk/task/cascade.go: CascadeTask.Run(ctx)` — validates file size, discovers healthy supernodes, attempts registration. -4) `sdk/task/task.go: BaseTask.fetchSupernodes` → `BaseTask.isServing` — health probe with `connectionTimeout = 10s` per node. +4) `sdk/task/task.go: BaseTask.fetchSupernodesWithLoads` — single-pass probe with `connectionTimeout = 10s` per node (health, status, balance) and load snapshot. 5) `sdk/task/cascade.go: attemptRegistration` — creates client and calls `RegisterCascade` with task context. 6) `sdk/adapters/supernodeservice/adapter.go: CascadeSupernodeRegister` — applies phase timers: - Upload phase: send chunks and metadata; cancel if `cascadeUploadTimeout` elapses. diff --git a/sdk/event/keys.go b/sdk/event/keys.go index 9d68b818..4668edd2 100644 --- a/sdk/event/keys.go +++ b/sdk/event/keys.go @@ -7,6 +7,7 @@ const ( // Common data keys KeyError EventDataKey = "error" KeyCount EventDataKey = "count" + KeyTotal EventDataKey = "total" KeySupernode EventDataKey = "supernode" KeySupernodeAddress EventDataKey = "sn-address" KeyIteration EventDataKey = "iteration" @@ -30,26 +31,5 @@ const ( KeyTaskID EventDataKey = "task_id" KeyActionID EventDataKey = "action_id" - // Removed legacy cascade storage metrics keys (meta/sym timings and nodes) - - // Combined store metrics (metadata + symbols) — new minimal only - KeyStoreDurationMS EventDataKey = "store_duration_ms" - // New minimal store metrics - KeyStoreSymbolsFirstPass EventDataKey = "store_symbols_first_pass" - KeyStoreSymbolsTotal EventDataKey = "store_symbols_total" - KeyStoreIDFilesCount EventDataKey = "store_id_files_count" - KeyStoreCallsByIP EventDataKey = "store_calls_by_ip" - - // Download (retrieve) detailed metrics — new minimal only - KeyRetrieveFoundLocal EventDataKey = "retrieve_found_local" - KeyRetrieveMS EventDataKey = "retrieve_ms" - KeyDecodeMS EventDataKey = "decode_ms" - KeyRetrieveCallsByIP EventDataKey = "retrieve_calls_by_ip" - // Additional retrieve summary fields - KeyRetrieveKeys EventDataKey = "retrieve_keys" - KeyRetrieveRequired EventDataKey = "retrieve_required" - KeyRetrieveFoundNet EventDataKey = "retrieve_found_net" - KeyTargetRequiredPercent EventDataKey = "target_required_percent" - KeyTargetRequiredCount EventDataKey = "target_required_count" - KeyTotalSymbols EventDataKey = "total_symbols" + // Removed legacy cascade storage/retrieve metrics keys ) diff --git a/sdk/event/types.go b/sdk/event/types.go index 635b1e2f..10f44856 100644 --- a/sdk/event/types.go +++ b/sdk/event/types.go @@ -14,7 +14,7 @@ type EventType string // These events are used to track the progress of tasks // and to notify subscribers about important changes in the system. const ( - SDKTaskStarted EventType = "sdk:started" + SDKTaskStarted EventType = "sdk:started" SDKSupernodesUnavailable EventType = "sdk:supernodes_unavailable" SDKSupernodesFound EventType = "sdk:supernodes_found" SDKRegistrationAttempt EventType = "sdk:registration_attempt" @@ -22,41 +22,41 @@ const ( SDKRegistrationSuccessful EventType = "sdk:registration_successful" SDKTaskTxHashReceived EventType = "sdk:txhash_received" SDKTaskCompleted EventType = "sdk:completed" - SDKTaskFailed EventType = "sdk:failed" - SDKConnectionEstablished EventType = "sdk:connection_established" - // Upload/processing phase events for cascade registration - SDKUploadStarted EventType = "sdk:upload_started" - SDKUploadCompleted EventType = "sdk:upload_completed" - SDKUploadFailed EventType = "sdk:upload_failed" // reason includes timeout - SDKProcessingStarted EventType = "sdk:processing_started" - SDKProcessingFailed EventType = "sdk:processing_failed" - SDKProcessingTimeout EventType = "sdk:processing_timeout" + SDKTaskFailed EventType = "sdk:failed" + SDKConnectionEstablished EventType = "sdk:connection_established" + // Upload/processing phase events for cascade registration + SDKUploadStarted EventType = "sdk:upload_started" + SDKUploadCompleted EventType = "sdk:upload_completed" + SDKUploadFailed EventType = "sdk:upload_failed" // reason includes timeout + SDKProcessingStarted EventType = "sdk:processing_started" + SDKProcessingFailed EventType = "sdk:processing_failed" + SDKProcessingTimeout EventType = "sdk:processing_timeout" - SDKDownloadAttempt EventType = "sdk:download_attempt" - SDKDownloadFailure EventType = "sdk:download_failure" - SDKDownloadStarted EventType = "sdk:download_started" - SDKDownloadCompleted EventType = "sdk:download_completed" + SDKDownloadAttempt EventType = "sdk:download_attempt" + SDKDownloadFailure EventType = "sdk:download_failure" + SDKDownloadStarted EventType = "sdk:download_started" + SDKDownloadCompleted EventType = "sdk:download_completed" ) const ( - SupernodeActionRetrieved EventType = "supernode:action_retrieved" - SupernodeActionFeeVerified EventType = "supernode:action_fee_verified" - SupernodeTopCheckPassed EventType = "supernode:top_check_passed" - SupernodeMetadataDecoded EventType = "supernode:metadata_decoded" - SupernodeDataHashVerified EventType = "supernode:data_hash_verified" - SupernodeInputEncoded EventType = "supernode:input_encoded" - SupernodeSignatureVerified EventType = "supernode:signature_verified" - SupernodeRQIDGenerated EventType = "supernode:rqid_generated" - SupernodeRQIDVerified EventType = "supernode:rqid_verified" - SupernodeFinalizeSimulated EventType = "supernode:finalize_simulated" - SupernodeArtefactsStored EventType = "supernode:artefacts_stored" - SupernodeActionFinalized EventType = "supernode:action_finalized" - SupernodeArtefactsDownloaded EventType = "supernode:artefacts_downloaded" - SupernodeNetworkRetrieveStarted EventType = "supernode:network_retrieve_started" - SupernodeDecodeCompleted EventType = "supernode:decode_completed" - SupernodeServeReady EventType = "supernode:serve_ready" - SupernodeUnknown EventType = "supernode:unknown" - SupernodeFinalizeSimulationFailed EventType = "supernode:finalize_simulation_failed" + SupernodeActionRetrieved EventType = "supernode:action_retrieved" + SupernodeActionFeeVerified EventType = "supernode:action_fee_verified" + SupernodeTopCheckPassed EventType = "supernode:top_check_passed" + SupernodeMetadataDecoded EventType = "supernode:metadata_decoded" + SupernodeDataHashVerified EventType = "supernode:data_hash_verified" + SupernodeInputEncoded EventType = "supernode:input_encoded" + SupernodeSignatureVerified EventType = "supernode:signature_verified" + SupernodeRQIDGenerated EventType = "supernode:rqid_generated" + SupernodeRQIDVerified EventType = "supernode:rqid_verified" + SupernodeFinalizeSimulated EventType = "supernode:finalize_simulated" + SupernodeArtefactsStored EventType = "supernode:artefacts_stored" + SupernodeActionFinalized EventType = "supernode:action_finalized" + SupernodeArtefactsDownloaded EventType = "supernode:artefacts_downloaded" + SupernodeNetworkRetrieveStarted EventType = "supernode:network_retrieve_started" + SupernodeDecodeCompleted EventType = "supernode:decode_completed" + SupernodeServeReady EventType = "supernode:serve_ready" + SupernodeUnknown EventType = "supernode:unknown" + SupernodeFinalizeSimulationFailed EventType = "supernode:finalize_simulation_failed" ) // EventData is a map of event data attributes using standardized keys diff --git a/sdk/net/client.go b/sdk/net/client.go index dc8950df..96e5d7f5 100644 --- a/sdk/net/client.go +++ b/sdk/net/client.go @@ -3,6 +3,7 @@ package net import ( "context" + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" "google.golang.org/grpc" "google.golang.org/grpc/health/grpc_health_v1" @@ -15,7 +16,7 @@ type SupernodeClient interface { // HealthCheck performs a health check on the supernode HealthCheck(ctx context.Context) (*grpc_health_v1.HealthCheckResponse, error) - GetSupernodeStatus(ctx context.Context) (*supernodeservice.SupernodeStatusresponse, error) + GetSupernodeStatus(ctx context.Context) (*pb.StatusResponse, error) // Download downloads the cascade action file Download(ctx context.Context, in *supernodeservice.CascadeSupernodeDownloadRequest, opts ...grpc.CallOption) (*supernodeservice.CascadeSupernodeDownloadResponse, error) diff --git a/sdk/net/factory.go b/sdk/net/factory.go index b9fad9fd..f3486780 100644 --- a/sdk/net/factory.go +++ b/sdk/net/factory.go @@ -39,9 +39,10 @@ func NewClientFactory(ctx context.Context, logger log.Logger, keyring keyring.Ke // Tuned for 1GB max files with 4MB chunks // Reduce in-flight memory by aligning windows and msg sizes to chunk size. opts := client.DefaultClientOptions() - opts.MaxRecvMsgSize = 8 * 1024 * 1024 // 8MB: supports 4MB chunks + overhead - opts.MaxSendMsgSize = 8 * 1024 * 1024 // 8MB: supports 4MB chunks + overhead - opts.InitialWindowSize = 4 * 1024 * 1024 // 4MB per-stream window ≈ chunk size + opts.MaxRecvMsgSize = 12 * 1024 * 1024 // 8MB: supports 4MB chunks + overhead + opts.MaxSendMsgSize = 12 * 1024 * 1024 // 8MB: supports 4MB chunks + overhead + // Increase per-stream window to provide headroom for first data chunk + events + opts.InitialWindowSize = 12 * 1024 * 1024 // 8MB per-stream window opts.InitialConnWindowSize = 64 * 1024 * 1024 // 64MB per-connection window return &ClientFactory{ diff --git a/sdk/net/impl.go b/sdk/net/impl.go index ab0f7b28..e597ccbb 100644 --- a/sdk/net/impl.go +++ b/sdk/net/impl.go @@ -3,6 +3,7 @@ package net import ( "context" "fmt" + "sync" "github.com/LumeraProtocol/lumera/x/lumeraid/securekeyx" ltc "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials" @@ -12,6 +13,7 @@ import ( "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" "github.com/LumeraProtocol/supernode/v2/sdk/log" + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" "github.com/cosmos/cosmos-sdk/crypto/keyring" "google.golang.org/grpc" "google.golang.org/grpc/health/grpc_health_v1" @@ -28,13 +30,17 @@ type supernodeClient struct { // Verify interface compliance at compile time var _ SupernodeClient = (*supernodeClient)(nil) +// ensure ALTS protocols are registered once per process +var registerALTSOnce sync.Once + // NewSupernodeClient creates a new supernode client func NewSupernodeClient(ctx context.Context, logger log.Logger, keyring keyring.Keyring, factoryConfig FactoryConfig, targetSupernode lumera.Supernode, lumeraClient lumera.Client, clientOptions *client.ClientOptions, ) (SupernodeClient, error) { - // Register ALTS protocols, just like in the test - conn.RegisterALTSRecordProtocols() + // Register ALTS protocols once (process-wide). These are global and should not + // be unregistered per-connection to avoid impacting concurrent clients. + registerALTSOnce.Do(func() { conn.RegisterALTSRecordProtocols() }) // Validate required parameters if logger == nil { @@ -128,14 +134,14 @@ func (c *supernodeClient) HealthCheck(ctx context.Context) (*grpc_health_v1.Heal return resp, nil } -func (c *supernodeClient) GetSupernodeStatus(ctx context.Context) (*supernodeservice.SupernodeStatusresponse, error) { +func (c *supernodeClient) GetSupernodeStatus(ctx context.Context) (*pb.StatusResponse, error) { resp, err := c.cascadeClient.GetSupernodeStatus(ctx) if err != nil { return nil, fmt.Errorf("failed to get supernode status: %w", err) } c.logger.Debug(ctx, "Supernode status retrieved successfully") - return &resp, nil + return resp, nil } // Download downloads the cascade action file @@ -153,10 +159,6 @@ func (c *supernodeClient) Close(ctx context.Context) error { if c.conn != nil { c.logger.Debug(ctx, "Closing connection to supernode") err := c.conn.Close() - - // Cleanup ALTS protocols when client is closed - conn.UnregisterALTSRecordProtocols() - return err } return nil diff --git a/sdk/task/cascade.go b/sdk/task/cascade.go index c13b94a1..1c3a57ff 100644 --- a/sdk/task/cascade.go +++ b/sdk/task/cascade.go @@ -46,7 +46,12 @@ func (t *CascadeTask) Run(ctx context.Context) error { return err } - t.LogEvent(ctx, event.SDKSupernodesFound, "Supernodes found.", event.EventData{event.KeyCount: len(supernodes)}) + // 2 - Pre-filter: balance -> health -> XOR rank -> resources, then hand over + originalCount := len(supernodes) + supernodes = t.filterByMinBalance(ctx, supernodes) + supernodes = t.filterByHealth(ctx, supernodes) + supernodes = t.orderByXORDistance(supernodes) + t.LogEvent(ctx, event.SDKSupernodesFound, "Supernodes filtered", event.EventData{event.KeyTotal: originalCount, event.KeyCount: len(supernodes)}) // 2 - Register with the supernodes if err := t.registerWithSupernodes(ctx, supernodes); err != nil { @@ -72,34 +77,45 @@ func (t *CascadeTask) registerWithSupernodes(ctx context.Context, supernodes lum TaskId: t.TaskID, } + ordered := supernodes + var lastErr error - for idx, sn := range supernodes { - // 1 + attempted := 0 + for i, sn := range ordered { + iteration := i + 1 + t.LogEvent(ctx, event.SDKRegistrationAttempt, "attempting registration with supernode", event.EventData{ event.KeySupernode: sn.GrpcEndpoint, event.KeySupernodeAddress: sn.CosmosAddress, - event.KeyIteration: idx + 1, + event.KeyIteration: iteration, }) - if err := t.attemptRegistration(ctx, idx, sn, clientFactory, req); err != nil { - // + + attempted++ + if err := t.attemptRegistration(ctx, iteration-1, sn, clientFactory, req); err != nil { t.LogEvent(ctx, event.SDKRegistrationFailure, "registration with supernode failed", event.EventData{ event.KeySupernode: sn.GrpcEndpoint, event.KeySupernodeAddress: sn.CosmosAddress, - event.KeyIteration: idx + 1, + event.KeyIteration: iteration, event.KeyError: err.Error(), }) lastErr = err continue } + t.LogEvent(ctx, event.SDKRegistrationSuccessful, "successfully registered with supernode", event.EventData{ event.KeySupernode: sn.GrpcEndpoint, event.KeySupernodeAddress: sn.CosmosAddress, - event.KeyIteration: idx + 1, + event.KeyIteration: iteration, }) return nil // success } - - return fmt.Errorf("failed to upload to all supernodes: %w", lastErr) + if attempted == 0 { + return fmt.Errorf("no eligible supernodes to register") + } + if lastErr != nil { + return fmt.Errorf("failed to upload to all supernodes: %w", lastErr) + } + return fmt.Errorf("failed to upload to all supernodes") } func (t *CascadeTask) attemptRegistration(ctx context.Context, _ int, sn lumera.Supernode, factory *net.ClientFactory, req *supernodeservice.CascadeSupernodeRegisterRequest) error { @@ -115,6 +131,15 @@ func (t *CascadeTask) attemptRegistration(ctx context.Context, _ int, sn lumera. event.KeySupernodeAddress: sn.CosmosAddress, }) + // Just-in-time resource check for uploads (storage + RAM >= 8x file size) + var minRam uint64 + if size := getFileSizeBytes(t.filePath); size > 0 { + minRam = uint64(size) * uploadRAMMultiplier + } + if ok := t.resourcesOK(ctx, client, sn, minStorageThresholdBytes, minRam); !ok { + return fmt.Errorf("resource check failed") + } + req.EventLogger = func(ctx context.Context, evt event.EventType, msg string, data event.EventData) { t.LogEvent(ctx, evt, msg, data) } diff --git a/sdk/task/download.go b/sdk/task/download.go index 3e85007a..4fefe0e6 100644 --- a/sdk/task/download.go +++ b/sdk/task/download.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "os" - "sort" "time" "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" @@ -37,14 +36,19 @@ func NewCascadeDownloadTask(base BaseTask, actionId string, outputPath string, s func (t *CascadeDownloadTask) Run(ctx context.Context) error { t.LogEvent(ctx, event.SDKTaskStarted, "Running cascade download task", nil) - // 1 – fetch super-nodes + // 1 – fetch super-nodes (plain) supernodes, err := t.fetchSupernodes(ctx, t.Action.Height) if err != nil { t.LogEvent(ctx, event.SDKSupernodesUnavailable, "super-nodes unavailable", event.EventData{event.KeyError: err.Error()}) t.LogEvent(ctx, event.SDKTaskFailed, "task failed", event.EventData{event.KeyError: err.Error()}) return err } - t.LogEvent(ctx, event.SDKSupernodesFound, "super-nodes found", event.EventData{event.KeyCount: len(supernodes)}) + // 2 - Pre-filter: balance -> health -> XOR rank + originalCount := len(supernodes) + supernodes = t.filterByMinBalance(ctx, supernodes) + supernodes = t.filterByHealth(ctx, supernodes) + supernodes = t.orderByXORDistance(supernodes) + t.LogEvent(ctx, event.SDKSupernodesFound, "super-nodes filtered", event.EventData{event.KeyTotal: originalCount, event.KeyCount: len(supernodes)}) // 2 – download from super-nodes if err := t.downloadFromSupernodes(ctx, supernodes); err != nil { @@ -77,55 +81,13 @@ func (t *CascadeDownloadTask) downloadFromSupernodes(ctx context.Context, supern } } - // Optionally rank supernodes by available memory to improve success for large files - // We keep a short timeout per status fetch to avoid delaying downloads. - type rankedSN struct { - sn lumera.Supernode - availGB float64 - hasStatus bool - } - ranked := make([]rankedSN, 0, len(supernodes)) - for _, sn := range supernodes { - ranked = append(ranked, rankedSN{sn: sn}) - } + // Strict XOR-first attempts over pre-filtered nodes (downloads) + ordered := supernodes - // Probe supernode status with short timeouts and close clients promptly - for i := range ranked { - sn := ranked[i].sn - // 2s status timeout to keep this pass fast - stx, cancel := context.WithTimeout(ctx, 2*time.Second) - client, err := clientFactory.CreateClient(stx, sn) - if err != nil { - cancel() - continue - } - status, err := client.GetSupernodeStatus(stx) - _ = client.Close(stx) - cancel() - if err != nil { - continue - } - ranked[i].hasStatus = true - ranked[i].availGB = status.Resources.Memory.AvailableGB - } - - // Sort: nodes with status first, higher available memory first - sort.Slice(ranked, func(i, j int) bool { - if ranked[i].hasStatus != ranked[j].hasStatus { - return ranked[i].hasStatus && !ranked[j].hasStatus - } - return ranked[i].availGB > ranked[j].availGB - }) - - // Rebuild the supernodes list in the sorted order - for i := range ranked { - supernodes[i] = ranked[i].sn - } - - // Try supernodes sequentially, one by one (now sorted) var lastErr error - for idx, sn := range supernodes { - iteration := idx + 1 + attempted := 0 + for i, sn := range ordered { + iteration := i + 1 // Log download attempt t.LogEvent(ctx, event.SDKDownloadAttempt, "attempting download from super-node", event.EventData{ @@ -134,8 +96,11 @@ func (t *CascadeDownloadTask) downloadFromSupernodes(ctx context.Context, supern event.KeyIteration: iteration, }) + // Pre-filtering done; attempt directly + + attempted++ if err := t.attemptDownload(ctx, sn, clientFactory, req); err != nil { - // Log failure and continue to next supernode + // Log failure and continue with the rest t.LogEvent(ctx, event.SDKDownloadFailure, "download from super-node failed", event.EventData{ event.KeySupernode: sn.GrpcEndpoint, event.KeySupernodeAddress: sn.CosmosAddress, @@ -146,8 +111,8 @@ func (t *CascadeDownloadTask) downloadFromSupernodes(ctx context.Context, supern continue } - // Success; return to caller - return nil + // Success; return to caller + return nil } if lastErr != nil { @@ -162,7 +127,6 @@ func (t *CascadeDownloadTask) attemptDownload( factory *net.ClientFactory, req *supernodeservice.CascadeSupernodeDownloadRequest, ) error { - ctx, cancel := context.WithTimeout(parent, downloadTimeout) defer cancel() @@ -172,128 +136,22 @@ func (t *CascadeDownloadTask) attemptDownload( } defer client.Close(ctx) - req.EventLogger = func(ctx context.Context, evt event.EventType, msg string, data event.EventData) { - t.LogEvent(ctx, evt, msg, data) + // Just-in-time resource check for downloads (storage only) + if ok := t.resourcesOK(ctx, client, sn, minStorageThresholdBytes, 0); !ok { + return fmt.Errorf("resource check failed") } - resp, err := client.Download(ctx, req) - if err != nil { - return fmt.Errorf("download from %s: %w", sn.CosmosAddress, err) - } - if !resp.Success { - return fmt.Errorf("download rejected by %s: %s", sn.CosmosAddress, resp.Message) - } - - return nil -} - -// downloadResult holds the result of a successful download attempt -type downloadResult struct { - SupernodeAddress string - SupernodeEndpoint string - Iteration int -} - -// attemptConcurrentDownload tries to download from multiple supernodes concurrently -// Returns the first successful result or all errors if all attempts fail -func (t *CascadeDownloadTask) attemptConcurrentDownload( - ctx context.Context, - batch lumera.Supernodes, - factory *net.ClientFactory, - req *supernodeservice.CascadeSupernodeDownloadRequest, - baseIteration int, -) (*downloadResult, []error) { - // Remove existing file if it exists to allow overwrite (do this once before concurrent attempts) - if _, err := os.Stat(req.OutputPath); err == nil { - if removeErr := os.Remove(req.OutputPath); removeErr != nil { - return nil, []error{fmt.Errorf("failed to remove existing file %s: %w", req.OutputPath, removeErr)} - } - } - - // Create a cancellable context for this batch - batchCtx, cancelBatch := context.WithCancel(ctx) - defer cancelBatch() - - // Channels for results - type attemptResult struct { - success *downloadResult - err error - idx int + req.EventLogger = func(ctx context.Context, evt event.EventType, msg string, data event.EventData) { + t.LogEvent(ctx, evt, msg, data) } - resultCh := make(chan attemptResult, len(batch)) - - // Start concurrent download attempts - for idx, sn := range batch { - iteration := baseIteration + idx + 1 - - // Log download attempt - t.LogEvent(ctx, event.SDKDownloadAttempt, "attempting download from super-node", event.EventData{ - event.KeySupernode: sn.GrpcEndpoint, - event.KeySupernodeAddress: sn.CosmosAddress, - event.KeyIteration: iteration, - }) - go func(sn lumera.Supernode, idx int, iter int) { - // Create a copy of the request for this goroutine - reqCopy := &supernodeservice.CascadeSupernodeDownloadRequest{ - ActionID: req.ActionID, - TaskID: req.TaskID, - OutputPath: req.OutputPath, - Signature: req.Signature, - } - - err := t.attemptDownload(batchCtx, sn, factory, reqCopy) - if err != nil { - resultCh <- attemptResult{ - err: err, - idx: idx, - } - return - } - - resultCh <- attemptResult{ - success: &downloadResult{ - SupernodeAddress: sn.CosmosAddress, - SupernodeEndpoint: sn.GrpcEndpoint, - Iteration: iter, - }, - idx: idx, - } - }(sn, idx, iteration) + resp, err := client.Download(ctx, req) + if err != nil { + return fmt.Errorf("download from %s: %w", sn.CosmosAddress, err) } - - // Collect results - var errors []error - for i := range len(batch) { - select { - case result := <-resultCh: - if result.success != nil { - // Success! Cancel other attempts and return - cancelBatch() - // Drain remaining results to avoid goroutine leaks - go func() { - for j := i + 1; j < len(batch); j++ { - <-resultCh - } - }() - return result.success, nil - } - - // Log failure - sn := batch[result.idx] - t.LogEvent(ctx, event.SDKDownloadFailure, "download from super-node failed", event.EventData{ - event.KeySupernode: sn.GrpcEndpoint, - event.KeySupernodeAddress: sn.CosmosAddress, - event.KeyIteration: baseIteration + result.idx + 1, - event.KeyError: result.err.Error(), - }) - errors = append(errors, result.err) - - case <-ctx.Done(): - return nil, []error{ctx.Err()} - } + if !resp.Success { + return fmt.Errorf("download rejected by %s: %s", sn.CosmosAddress, resp.Message) } - // All attempts in this batch failed - return nil, errors + return nil } diff --git a/sdk/task/helpers.go b/sdk/task/helpers.go index f887aeb2..aac0fed1 100644 --- a/sdk/task/helpers.go +++ b/sdk/task/helpers.go @@ -2,22 +2,18 @@ package task import ( "context" - "encoding/base64" - "errors" "fmt" + "math/big" "os" - "path/filepath" - "strings" + "sort" + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" - snsvc "github.com/LumeraProtocol/supernode/v2/sdk/adapters/supernodeservice" - "github.com/LumeraProtocol/supernode/v2/sdk/net" ) const maxFileSize = 1 * 1024 * 1024 * 1024 // 1GB limit -var ErrNoPeersConnected = errors.New("no P2P peers connected on available supernodes") - // ValidateFileSize checks if a file size is within the allowed 1GB limit func ValidateFileSize(filePath string) error { fileInfo, err := os.Stat(filePath) @@ -52,7 +48,7 @@ func (m *ManagerImpl) validateAction(ctx context.Context, actionID string) (lume } // validateSignature verifies the authenticity of a signature against an action's data hash. -// + // This function performs the following steps: // 1. Decodes the CASCADE metadata from the provided Lumera action // 2. Extracts the base64-encoded data hash from the metadata @@ -78,26 +74,13 @@ func (m *ManagerImpl) validateSignature(ctx context.Context, action lumera.Actio return fmt.Errorf("failed to decode cascade metadata: %w", err) } - // Extract the base64-encoded data hash from the metadata - base64EnTcketDataHash := cascadeMetaData.DataHash - - // Decode the data hash from base64 to raw bytes - dataHashBytes, err := base64.StdEncoding.DecodeString(base64EnTcketDataHash) - if err != nil { - return fmt.Errorf("failed to decode data hash: %w", err) - } - - // Decode the provided signature from base64 to raw bytes - signatureBytes, err := base64.StdEncoding.DecodeString(signature) - if err != nil { - return fmt.Errorf("failed to decode signature: %w", err) - } + // Extract the base64-encoded data hash string from the metadata + dataHashB64 := cascadeMetaData.DataHash - // Verify the signature using the Lumera client - // This checks if the signature was produced by the action creator - // for the given data hash - err = m.lumeraClient.VerifySignature(ctx, action.Creator, dataHashBytes, signatureBytes) - if err != nil { + // Verify using cascadekit helper (raw -> ADR-36) + if err := cascadekit.VerifyStringRawOrADR36(dataHashB64, signature, action.Creator, func(data, sig []byte) error { + return m.lumeraClient.VerifySignature(ctx, action.Creator, data, sig) + }); err != nil { m.logger.Error(ctx, "Signature validation failed", "actionID", action.ID, "error", err) return fmt.Errorf("signature validation failed: %w", err) } @@ -105,47 +88,7 @@ func (m *ManagerImpl) validateSignature(ctx context.Context, action lumera.Actio return nil } -// checkSupernodesPeerConnectivity verifies that at least one supernode has P2P peers connected -func (m *ManagerImpl) checkSupernodesPeerConnectivity(ctx context.Context, blockHeight int64) error { - // Fetch supernodes for the action's block height - supernodes, err := m.lumeraClient.GetSupernodes(ctx, blockHeight) - if err != nil { - return fmt.Errorf("failed to get supernodes: %w", err) - } - - if len(supernodes) == 0 { - return fmt.Errorf("no supernodes available for block height %d", blockHeight) - } - - // Check each supernode for peer connectivity - factoryCfg := net.FactoryConfig{ - LocalCosmosAddress: m.config.Account.LocalCosmosAddress, - PeerType: m.config.Account.PeerType, - } - clientFactory := net.NewClientFactory(ctx, m.logger, m.keyring, m.lumeraClient, factoryCfg) - - for _, sn := range supernodes { - client, err := clientFactory.CreateClient(ctx, sn) - if err != nil { - continue // Skip this supernode if we can't connect - } - - // Request peer info and P2P metrics to assess connectivity - ctxWithMetrics := snsvc.WithIncludeP2PMetrics(ctx) - status, err := client.GetSupernodeStatus(ctxWithMetrics) - client.Close(ctx) - if err != nil { - continue // Skip this supernode if we can't get status - } - - // Check if this supernode has peers - if status.Network.PeersCount > 1 { - return nil // Found at least one supernode with peers - } - } - - return ErrNoPeersConnected -} +// func (m *ManagerImpl) validateDownloadAction(ctx context.Context, actionID string) (lumera.Action, error) { action, err := m.lumeraClient.GetAction(ctx, actionID) @@ -166,18 +109,46 @@ func (m *ManagerImpl) validateDownloadAction(ctx context.Context, actionID strin return action, nil } -// Helper function to ensure output path has the correct filename -func ensureOutputPathWithFilename(outputPath, filename string) string { - // If outputPath is empty, just return the filename - if outputPath == "" { - return filename +func orderSupernodesByDeterministicDistance(seed string, sns lumera.Supernodes) lumera.Supernodes { + if len(sns) == 0 || seed == "" { + return sns } - - // Check if the path already ends with the filename - if strings.HasSuffix(outputPath, filename) { - return outputPath + // Precompute seed hash (blake3) + seedHash, err := utils.Blake3Hash([]byte(seed)) + if err != nil { + return sns } - // Otherwise, append the filename to the path - return filepath.Join(outputPath, filename) + type nodeDist struct { + sn lumera.Supernode + distance *big.Int + } + nd := make([]nodeDist, 0, len(sns)) + for _, sn := range sns { + id := sn.CosmosAddress + if id == "" { + id = sn.GrpcEndpoint + } + nHash, err := utils.Blake3Hash([]byte(id)) + if err != nil { + nd = append(nd, nodeDist{sn: sn, distance: new(big.Int).SetInt64(0)}) + continue + } + // XOR distance across min length + l := len(seedHash) + if len(nHash) < l { + l = len(nHash) + } + xor := make([]byte, l) + for i := 0; i < l; i++ { + xor[i] = seedHash[i] ^ nHash[i] + } + nd = append(nd, nodeDist{sn: sn, distance: new(big.Int).SetBytes(xor)}) + } + sort.Slice(nd, func(i, j int) bool { return nd[i].distance.Cmp(nd[j].distance) < 0 }) + out := make(lumera.Supernodes, len(nd)) + for i := range nd { + out[i] = nd[i].sn + } + return out } diff --git a/sdk/task/manager.go b/sdk/task/manager.go index 052088f3..c5a65bf4 100644 --- a/sdk/task/manager.go +++ b/sdk/task/manager.go @@ -107,11 +107,7 @@ func (m *ManagerImpl) CreateCascadeTask(ctx context.Context, filePath string, ac return "", err } - // Check peer connectivity before creating task - if err := m.checkSupernodesPeerConnectivity(taskCtx, action.Height); err != nil { - cancel() // Clean up if peer check fails - return "", err - } + // Peer connectivity is now validated during discovery health checks taskID := uuid.New().String()[:8] @@ -280,11 +276,7 @@ func (m *ManagerImpl) CreateDownloadTask(ctx context.Context, actionID string, o return "", fmt.Errorf("no filename found in cascade metadata") } - // Check peer connectivity before creating task - if err := m.checkSupernodesPeerConnectivity(taskCtx, action.Height); err != nil { - cancel() // Clean up if peer check fails - return "", err - } + // Peer connectivity is now validated during discovery health checks // Ensure the output path includes the correct filename finalOutputPath := path.Join(outputDir, action.ID, metadata.FileName) diff --git a/sdk/task/task.go b/sdk/task/task.go index e359c907..a212e76a 100644 --- a/sdk/task/task.go +++ b/sdk/task/task.go @@ -4,10 +4,10 @@ import ( "context" "errors" "fmt" - "sync" + "os" - "github.com/LumeraProtocol/supernode/v2/pkg/errgroup" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + sdkmath "cosmossdk.io/math" + txmod "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx" "github.com/LumeraProtocol/supernode/v2/sdk/adapters/lumera" "github.com/LumeraProtocol/supernode/v2/sdk/config" "github.com/LumeraProtocol/supernode/v2/sdk/event" @@ -25,6 +25,14 @@ const ( TaskTypeCascade TaskType = "CASCADE" ) +// Package-level thresholds and tuning +const ( + // Minimum available storage required on any volume (bytes) + minStorageThresholdBytes uint64 = 50 * 1024 * 1024 * 1024 // 50 GB + // Upload requires free RAM to be at least 8x the file size + uploadRAMMultiplier uint64 = 8 +) + // EventCallback is a function that processes events from tasks type EventCallback func(ctx context.Context, e event.Event) @@ -80,57 +88,127 @@ func (t *BaseTask) fetchSupernodes(ctx context.Context, height int64) (lumera.Su if err != nil { return nil, fmt.Errorf("fetch supernodes: %w", err) } - if len(sns) == 0 { return nil, errors.New("no supernodes found") } - + // Limit to top 10 as chain enforces this in finalize action as well if len(sns) > 10 { sns = sns[:10] } + return sns, nil +} - // Keep only SERVING nodes (done in parallel – keeps latency flat) - healthy := make(lumera.Supernodes, 0, len(sns)) - eg, ctx := errgroup.WithContext(ctx) - mu := sync.Mutex{} +func (t *BaseTask) orderByXORDistance(sns lumera.Supernodes) lumera.Supernodes { + if len(sns) <= 1 { + return sns + } + seed := t.ActionID + return orderSupernodesByDeterministicDistance(seed, sns) +} - for _, sn := range sns { - sn := sn - eg.Go(func() error { - if t.isServing(ctx, sn) { - mu.Lock() - healthy = append(healthy, sn) - mu.Unlock() +// helper: get file size (bytes). returns 0 on error +func getFileSizeBytes(p string) int64 { + fi, err := os.Stat(p) + if err != nil { + return 0 + } + return fi.Size() +} + +func (t *BaseTask) resourcesOK(ctx context.Context, client net.SupernodeClient, sn lumera.Supernode, minStorageBytes uint64, minFreeRamBytes uint64) bool { + // In tests, skip resource thresholds (keep balance + health via nodeQualifies) + if os.Getenv("INTEGRATION_TEST") == "true" { + return true + } + status, err := client.GetSupernodeStatus(ctx) + if err != nil || status == nil || status.Resources == nil { + return false + } + // Storage: any volume must satisfy available >= minStorageBytes + if minStorageBytes > 0 { + ok := false + for _, vol := range status.Resources.StorageVolumes { + if vol != nil && vol.AvailableBytes >= minStorageBytes { + ok = true + break } - return nil - }) + } + if !ok { + return false + } } - if err := eg.Wait(); err != nil { - return nil, fmt.Errorf("health-check goroutines: %w", err) + // RAM: available_gb must be >= required GiB + if minFreeRamBytes > 0 { + mem := status.Resources.Memory + if mem == nil { + return false + } + requiredGiB := float64(minFreeRamBytes) / (1024.0 * 1024.0 * 1024.0) + if mem.AvailableGb < requiredGiB { + return false + } } + return true +} - if len(healthy) == 0 { - return nil, errors.New("no healthy supernodes found") +// filterByHealth returns nodes that report gRPC health SERVING. +func (t *BaseTask) filterByHealth(parent context.Context, sns lumera.Supernodes) lumera.Supernodes { + if len(sns) == 0 { + return sns } - - return healthy, nil + keep := make([]bool, len(sns)) + for i, sn := range sns { + i, sn := i, sn + ctx, cancel := context.WithTimeout(parent, connectionTimeout) + func() { + defer cancel() + client, err := net.NewClientFactory(ctx, t.logger, t.keyring, t.client, net.FactoryConfig{ + LocalCosmosAddress: t.config.Account.LocalCosmosAddress, + PeerType: t.config.Account.PeerType, + }).CreateClient(ctx, sn) + if err != nil { + return + } + defer client.Close(ctx) + h, err := client.HealthCheck(ctx) + if err == nil && h != nil && h.Status == grpc_health_v1.HealthCheckResponse_SERVING { + keep[i] = true + } + }() + } + out := make(lumera.Supernodes, 0, len(sns)) + for i, sn := range sns { + if keep[i] { + out = append(out, sn) + } + } + return out } -// isServing pings the super-node once with a short timeout. -func (t *BaseTask) isServing(parent context.Context, sn lumera.Supernode) bool { - ctx, cancel := context.WithTimeout(parent, connectionTimeout) - defer cancel() - - client, err := net.NewClientFactory(ctx, t.logger, t.keyring, t.client, net.FactoryConfig{ - LocalCosmosAddress: t.config.Account.LocalCosmosAddress, - PeerType: t.config.Account.PeerType, - }).CreateClient(ctx, sn) - if err != nil { - logtrace.Info(ctx, "Failed to create client for supernode", logtrace.Fields{logtrace.FieldMethod: "isServing"}) - return false +// filterByMinBalance filters by requiring at least a minimum balance in the default fee denom. +func (t *BaseTask) filterByMinBalance(parent context.Context, sns lumera.Supernodes) lumera.Supernodes { + if len(sns) == 0 { + return sns } - defer client.Close(ctx) - - resp, err := client.HealthCheck(ctx) - return err == nil && resp.Status == grpc_health_v1.HealthCheckResponse_SERVING + min := sdkmath.NewInt(1_000_000) // 1 LUME in ulume + denom := txmod.DefaultFeeDenom + keep := make([]bool, len(sns)) + for i, sn := range sns { + i, sn := i, sn + ctx, cancel := context.WithTimeout(parent, connectionTimeout) + func() { + defer cancel() + bal, err := t.client.GetBalance(ctx, sn.CosmosAddress, denom) + if err == nil && bal != nil && bal.Balance != nil && !bal.Balance.Amount.LT(min) { + keep[i] = true + } + }() + } + out := make(lumera.Supernodes, 0, len(sns)) + for i, sn := range sns { + if keep[i] { + out = append(out, sn) + } + } + return out } diff --git a/sdk/task/timeouts.go b/sdk/task/timeouts.go index f6e1e7e6..4498fdaf 100644 --- a/sdk/task/timeouts.go +++ b/sdk/task/timeouts.go @@ -5,4 +5,3 @@ import "time" // connectionTimeout bounds supernode health/connection probing. // Keep this short to preserve snappy discovery without impacting long uploads. const connectionTimeout = 10 * time.Second - diff --git a/sn-manager/cmd/check.go b/sn-manager/cmd/check.go index df20b2a5..4910eb06 100644 --- a/sn-manager/cmd/check.go +++ b/sn-manager/cmd/check.go @@ -1,14 +1,14 @@ package cmd import ( - "fmt" - "strings" + "fmt" + "strings" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/updater" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" - "github.com/spf13/cobra" + "github.com/LumeraProtocol/supernode/v2/pkg/github" + "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" + "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/updater" + "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" + "github.com/spf13/cobra" ) var checkCmd = &cobra.Command{ @@ -32,8 +32,8 @@ func runCheck(cmd *cobra.Command, args []string) error { fmt.Println("Checking for updates...") - // Create GitHub client - client := github.NewClient(config.GitHubRepo) + // Create GitHub client + client := github.NewClient(config.GitHubRepo) // Get latest stable release release, err := client.GetLatestStableRelease() @@ -41,26 +41,26 @@ func runCheck(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to check for stable updates: %w", err) } - fmt.Printf("\nLatest release: %s\n", release.TagName) - fmt.Printf("Current version: %s\n", cfg.Updates.CurrentVersion) - // Report manager version and if it would update under the same policy - mv := strings.TrimSpace(appVersion) - if mv != "" && mv != "dev" && !strings.EqualFold(mv, "unknown") { - managerWould := utils.SameMajor(mv, release.TagName) && utils.CompareVersions(mv, release.TagName) < 0 - fmt.Printf("Manager version: %s (would update: %v)\n", mv, managerWould) - } else { - fmt.Printf("Manager version: %s\n", appVersion) - } + fmt.Printf("\nLatest release: %s\n", release.TagName) + fmt.Printf("Current version: %s\n", cfg.Updates.CurrentVersion) + // Report manager version and if it would update under the same policy + mv := strings.TrimSpace(appVersion) + if mv != "" && mv != "dev" && !strings.EqualFold(mv, "unknown") { + managerWould := utils.SameMajor(mv, release.TagName) && utils.CompareVersions(mv, release.TagName) < 0 + fmt.Printf("Manager version: %s (would update: %v)\n", mv, managerWould) + } else { + fmt.Printf("Manager version: %s\n", appVersion) + } - // Compare versions - cmp := utils.CompareVersions(cfg.Updates.CurrentVersion, release.TagName) + // Compare versions + cmp := utils.CompareVersions(cfg.Updates.CurrentVersion, release.TagName) if cmp < 0 { // Use the same logic as auto-updater to determine update eligibility - managerHome := config.GetManagerHome() - autoUpdater := updater.New(managerHome, cfg, appVersion) - wouldAutoUpdate := autoUpdater.ShouldUpdate(cfg.Updates.CurrentVersion, release.TagName) - + managerHome := config.GetManagerHome() + autoUpdater := updater.New(managerHome, cfg, appVersion, nil) + wouldAutoUpdate := autoUpdater.ShouldUpdate(cfg.Updates.CurrentVersion, release.TagName) + if wouldAutoUpdate { fmt.Printf("\n✓ Update available: %s → %s\n", cfg.Updates.CurrentVersion, release.TagName) fmt.Printf("Published: %s\n", release.PublishedAt.Format("2006-01-02 15:04:05")) diff --git a/sn-manager/cmd/get.go b/sn-manager/cmd/get.go index eb8f0fac..7244c10f 100644 --- a/sn-manager/cmd/get.go +++ b/sn-manager/cmd/get.go @@ -6,8 +6,8 @@ import ( "os" "path/filepath" + "github.com/LumeraProtocol/supernode/v2/pkg/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/version" "github.com/spf13/cobra" diff --git a/sn-manager/cmd/init.go b/sn-manager/cmd/init.go index 383d70ad..2eb2639c 100644 --- a/sn-manager/cmd/init.go +++ b/sn-manager/cmd/init.go @@ -8,8 +8,8 @@ import ( "path/filepath" "github.com/AlecAivazis/survey/v2" + "github.com/LumeraProtocol/supernode/v2/pkg/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/version" "github.com/spf13/cobra" diff --git a/sn-manager/cmd/ls-remote.go b/sn-manager/cmd/ls-remote.go index 65619fd1..0d7bdff6 100644 --- a/sn-manager/cmd/ls-remote.go +++ b/sn-manager/cmd/ls-remote.go @@ -3,8 +3,8 @@ package cmd import ( "fmt" + "github.com/LumeraProtocol/supernode/v2/pkg/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" "github.com/spf13/cobra" ) @@ -15,37 +15,37 @@ var lsRemoteCmd = &cobra.Command{ } func runLsRemote(cmd *cobra.Command, args []string) error { - client := github.NewClient(config.GitHubRepo) - - releases, err := client.ListReleases() - if err != nil { - return fmt.Errorf("failed to list releases: %w", err) - } - - // Filter to stable (non-draft, non-prerelease) - var stable []*github.Release - for _, r := range releases { - if !r.Draft && !r.Prerelease { - stable = append(stable, r) - } - } - - if len(stable) == 0 { - fmt.Println("No releases found") - return nil - } - - fmt.Println("Available versions:") - for i, release := range stable { - if i == 0 { - fmt.Printf(" %s (latest) - %s\n", release.TagName, release.PublishedAt.Format("2006-01-02")) - } else { - fmt.Printf(" %s - %s\n", release.TagName, release.PublishedAt.Format("2006-01-02")) - } - if i >= 9 { - break - } - } + client := github.NewClient(config.GitHubRepo) + + releases, err := client.ListReleases() + if err != nil { + return fmt.Errorf("failed to list releases: %w", err) + } + + // Filter to stable (non-draft, non-prerelease) + var stable []*github.Release + for _, r := range releases { + if !r.Draft && !r.Prerelease { + stable = append(stable, r) + } + } + + if len(stable) == 0 { + fmt.Println("No releases found") + return nil + } + + fmt.Println("Available versions:") + for i, release := range stable { + if i == 0 { + fmt.Printf(" %s (latest) - %s\n", release.TagName, release.PublishedAt.Format("2006-01-02")) + } else { + fmt.Printf(" %s - %s\n", release.TagName, release.PublishedAt.Format("2006-01-02")) + } + if i >= 9 { + break + } + } return nil } diff --git a/sn-manager/cmd/start.go b/sn-manager/cmd/start.go index de03c6dd..6deb1583 100644 --- a/sn-manager/cmd/start.go +++ b/sn-manager/cmd/start.go @@ -11,8 +11,8 @@ import ( "strings" "syscall" + "github.com/LumeraProtocol/supernode/v2/pkg/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/manager" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/updater" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" @@ -121,12 +121,27 @@ func runStart(cmd *cobra.Command, args []string) error { } } + // orchestrator to gracefully stop SuperNode and exit manager with code 3 + gracefulManagerRestart := func() { + // Write stop marker so monitor won't auto-restart SuperNode + stopMarkerPath := filepath.Join(home, stopMarkerFile) + _ = os.WriteFile(stopMarkerPath, []byte("manager-update"), 0644) + + // Attempt graceful stop of SuperNode if running + if mgr.IsRunning() { + if err := mgr.Stop(); err != nil { + log.Printf("Failed to stop supernode: %v", err) + } + } + os.Exit(3) + } + // Mandatory version sync on startup: ensure both sn-manager and SuperNode // are at the latest stable release. This bypasses regular updater checks // (gateway idleness, same-major policy) to guarantee a consistent baseline. - // Runs once before monitoring begins. + // Runs once before monitoring begins. If manager updated, restart now. func() { - u := updater.New(home, cfg, appVersion) + u := updater.New(home, cfg, appVersion, gracefulManagerRestart) // Do not block startup on failures; best-effort sync defer func() { recover() }() u.ForceSyncToLatest(context.Background()) @@ -135,7 +150,7 @@ func runStart(cmd *cobra.Command, args []string) error { // Start auto-updater if enabled var autoUpdater *updater.AutoUpdater if cfg.Updates.AutoUpgrade { - autoUpdater = updater.New(home, cfg, appVersion) + autoUpdater = updater.New(home, cfg, appVersion, gracefulManagerRestart) autoUpdater.Start(ctx) } @@ -171,7 +186,15 @@ func runStart(cmd *cobra.Command, args []string) error { return nil case err := <-monitorDone: - // Monitor exited unexpectedly + // Monitor exited; ensure SuperNode is stopped as manager exits + if autoUpdater != nil { + autoUpdater.Stop() + } + if mgr.IsRunning() { + if stopErr := mgr.Stop(); stopErr != nil { + log.Printf("Failed to stop supernode: %v", stopErr) + } + } if err != nil { return fmt.Errorf("monitor error: %w", err) } diff --git a/sn-manager/go.mod b/sn-manager/go.mod index 1beee097..8d29e8e6 100644 --- a/sn-manager/go.mod +++ b/sn-manager/go.mod @@ -3,15 +3,14 @@ module github.com/LumeraProtocol/supernode/v2/sn-manager go 1.24.1 require ( - github.com/AlecAivazis/survey/v2 v2.3.7 - github.com/LumeraProtocol/supernode/v2 v2.0.0-00010101000000-000000000000 - github.com/spf13/cobra v1.8.1 - gopkg.in/yaml.v3 v3.0.1 + github.com/AlecAivazis/survey/v2 v2.3.7 + github.com/LumeraProtocol/supernode/v2 v2.0.0-00010101000000-000000000000 + github.com/spf13/cobra v1.8.1 + gopkg.in/yaml.v3 v3.0.1 ) require ( - github.com/golang/protobuf v1.5.4 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -24,7 +23,6 @@ require ( golang.org/x/sys v0.31.0 // indirect golang.org/x/term v0.30.0 // indirect golang.org/x/text v0.23.0 // indirect - google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect google.golang.org/grpc v1.71.0 // indirect diff --git a/sn-manager/go.sum b/sn-manager/go.sum index 51f96134..6413ef48 100644 --- a/sn-manager/go.sum +++ b/sn-manager/go.sum @@ -1,14 +1,7 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ= github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= @@ -16,37 +9,28 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= @@ -59,8 +43,8 @@ github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyex github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= @@ -85,44 +69,22 @@ go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= -go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -142,40 +104,20 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094 h1:6whtk83KtD3FkGrVb2hFXuQ+ZMbCNdakARIn/aHMmG8= -google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094/go.mod h1:Zs4wYw8z1zr6RNF4cwYb31mvN/EGaKAdQjNCF3DW6K4= google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 h1:hE3bRWtU6uceqlh4fhrSnUyjKHMKB9KrTLLG+bc0ddM= google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463/go.mod h1:U90ffi8eUL9MwPcrJylN5+Mk2v3vuPDptd5yyNUiRR8= google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g= google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/sn-manager/internal/config/config.go b/sn-manager/internal/config/config.go index 87568580..f41a7f89 100644 --- a/sn-manager/internal/config/config.go +++ b/sn-manager/internal/config/config.go @@ -12,10 +12,19 @@ import ( const ( // ManagerHomeDir is the constant home directory for sn-manager ManagerHomeDir = ".sn-manager" - // GitHubRepo is the constant GitHub repository for supernode - GitHubRepo = "LumeraProtocol/supernode" + // defaultGitHubRepo is the default GitHub repository for supernode + defaultGitHubRepo = "LumeraProtocol/supernode" ) +// GitHubRepo is the GitHub repository for supernode and can be overridden via +// the SNM_GITHUB_REPO environment variable. +var GitHubRepo = func() string { + if v := os.Getenv("SNM_GITHUB_REPO"); v != "" { + return v + } + return defaultGitHubRepo +}() + // Config represents the sn-manager configuration type Config struct { Updates UpdateConfig `yaml:"updates"` @@ -81,7 +90,3 @@ func Save(cfg *Config, path string) error { return nil } - -// Validate checks if the configuration is valid -// Validate is kept for compatibility; no-op since interval was removed. -func (c *Config) Validate() error { return nil } diff --git a/sn-manager/internal/manager/manager.go b/sn-manager/internal/manager/manager.go index fd176121..06dacdb4 100644 --- a/sn-manager/internal/manager/manager.go +++ b/sn-manager/internal/manager/manager.go @@ -33,11 +33,6 @@ func New(homeDir string) (*Manager, error) { return nil, fmt.Errorf("failed to load config: %w", err) } - // Validate configuration - if err := cfg.Validate(); err != nil { - return nil, fmt.Errorf("invalid config: %w", err) - } - return &Manager{ config: cfg, homeDir: homeDir, @@ -175,9 +170,9 @@ func (m *Manager) cleanup() { const ( DefaultShutdownTimeout = 30 * time.Second ProcessCheckInterval = 5 * time.Second - CrashBackoffDelay = 2 * time.Second - StopMarkerFile = ".stop_requested" - RestartMarkerFile = ".needs_restart" + CrashBackoffDelay = 2 * time.Second + StopMarkerFile = ".stop_requested" + RestartMarkerFile = ".needs_restart" ) // Monitor continuously supervises the SuperNode process @@ -190,7 +185,7 @@ func (m *Manager) Monitor(ctx context.Context) error { // Channel to monitor process exits processExitCh := make(chan error, 1) - + // Function to arm the process wait goroutine armProcessWait := func() { processExitCh = make(chan error, 1) @@ -262,7 +257,7 @@ func (m *Manager) Monitor(ctx context.Context) error { case <-ticker.C: // Periodic check for various conditions - + // 1. Check if stop marker was removed and we should start if !m.IsRunning() { if _, err := os.Stat(stopMarkerPath); os.IsNotExist(err) { @@ -281,16 +276,16 @@ func (m *Manager) Monitor(ctx context.Context) error { if _, err := os.Stat(restartMarkerPath); err == nil { if m.IsRunning() { log.Println("Binary update detected, restarting SuperNode...") - + // Remove the restart marker if err := os.Remove(restartMarkerPath); err != nil && !os.IsNotExist(err) { log.Printf("Warning: failed to remove restart marker: %v", err) } - + // Create temporary stop marker for clean restart tmpStopMarker := []byte("update") os.WriteFile(stopMarkerPath, tmpStopMarker, 0644) - + // Stop current process if err := m.Stop(); err != nil { log.Printf("Failed to stop for update: %v", err) @@ -299,15 +294,15 @@ func (m *Manager) Monitor(ctx context.Context) error { } continue } - + // Brief pause time.Sleep(CrashBackoffDelay) - + // Remove temporary stop marker if err := os.Remove(stopMarkerPath); err != nil && !os.IsNotExist(err) { log.Printf("Warning: failed to remove stop marker: %v", err) } - + // Start with new binary log.Println("Starting with updated binary...") if err := m.Start(ctx); err != nil { @@ -325,7 +320,7 @@ func (m *Manager) Monitor(ctx context.Context) error { m.mu.RLock() proc := m.process m.mu.RUnlock() - + if proc != nil { if err := proc.Signal(syscall.Signal(0)); err != nil { // Process is dead but not cleaned up @@ -344,4 +339,3 @@ func (m *Manager) Monitor(ctx context.Context) error { func (m *Manager) GetConfig() *config.Config { return m.config } - diff --git a/sn-manager/internal/updater/updater.go b/sn-manager/internal/updater/updater.go index 5bf650c1..2e6f9d56 100644 --- a/sn-manager/internal/updater/updater.go +++ b/sn-manager/internal/updater/updater.go @@ -12,11 +12,11 @@ import ( "time" pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + "github.com/LumeraProtocol/supernode/v2/pkg/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/config" - "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/github" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/utils" "github.com/LumeraProtocol/supernode/v2/sn-manager/internal/version" - "github.com/LumeraProtocol/supernode/v2/supernode/node/supernode/gateway" + "github.com/LumeraProtocol/supernode/v2/supernode/transport/gateway" "google.golang.org/protobuf/encoding/protojson" ) @@ -28,7 +28,7 @@ const ( updateCheckInterval = 10 * time.Minute // forceUpdateAfter is the age threshold after a release is published // beyond which updates are applied regardless of normal gates (idle, policy) - forceUpdateAfter = 30 * time.Minute + forceUpdateAfter = 10 * time.Minute ) type AutoUpdater struct { @@ -43,22 +43,25 @@ type AutoUpdater struct { // Gateway error backoff state gwErrCount int gwErrWindowStart time.Time + // Optional hook to handle manager update (restart) orchestration + onManagerUpdate func() } // Use protobuf JSON decoding for gateway responses (int64s encoded as strings) -func New(homeDir string, cfg *config.Config, managerVersion string) *AutoUpdater { +func New(homeDir string, cfg *config.Config, managerVersion string, onManagerUpdate func()) *AutoUpdater { // Use the correct gateway endpoint with imported constants gatewayURL := fmt.Sprintf("http://localhost:%d/api/v1/status", gateway.DefaultGatewayPort) return &AutoUpdater{ - config: cfg, - homeDir: homeDir, - githubClient: github.NewClient(config.GitHubRepo), - versionMgr: version.NewManager(homeDir), - gatewayURL: gatewayURL, - stopCh: make(chan struct{}), - managerVersion: managerVersion, + config: cfg, + homeDir: homeDir, + githubClient: github.NewClient(config.GitHubRepo), + versionMgr: version.NewManager(homeDir), + gatewayURL: gatewayURL, + stopCh: make(chan struct{}), + managerVersion: managerVersion, + onManagerUpdate: onManagerUpdate, } } @@ -133,9 +136,6 @@ func (u *AutoUpdater) ShouldUpdate(current, latest string) bool { return false } -// isGatewayIdle returns (idle, isError). When isError is true, -// the gateway could not be reliably checked (network/error/invalid). -// When isError is false and idle is false, the gateway is busy. func (u *AutoUpdater) isGatewayIdle() (bool, bool) { client := &http.Client{Timeout: gatewayTimeout} @@ -163,16 +163,16 @@ func (u *AutoUpdater) isGatewayIdle() (bool, bool) { return false, true } - totalTasks := 0 - for _, service := range status.RunningTasks { - totalTasks += int(service.TaskCount) + // Idle when there are no running tasks across all services + if len(status.GetRunningTasks()) == 0 { + return true, false } - - if totalTasks > 0 { - log.Printf("Gateway busy: %d running tasks", totalTasks) - return false, false + for _, st := range status.GetRunningTasks() { + if st.GetTaskCount() > 0 || len(st.GetTaskIds()) > 0 { + log.Printf("Gateway busy: service=%s tasks=%d", st.GetServiceName(), st.GetTaskCount()) + return false, false + } } - return true, false } @@ -353,10 +353,12 @@ func (u *AutoUpdater) checkAndUpdateCombined(force bool) { // If manager updated, restart service after completing all work if managerUpdated { log.Printf("Self-update applied, restarting service...") - go func() { - time.Sleep(500 * time.Millisecond) + if u.onManagerUpdate != nil { + u.onManagerUpdate() + } else { + // Fallback: immediate process restart signal os.Exit(3) - }() + } } } diff --git a/supernode/adaptors/lumera.go b/supernode/adaptors/lumera.go new file mode 100644 index 00000000..bf92f95b --- /dev/null +++ b/supernode/adaptors/lumera.go @@ -0,0 +1,49 @@ +package adaptors + +import ( + "context" + + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + sntypes "github.com/LumeraProtocol/lumera/x/supernode/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + sdktx "github.com/cosmos/cosmos-sdk/types/tx" +) + +type LumeraClient interface { + GetAction(ctx context.Context, actionID string) (*actiontypes.QueryGetActionResponse, error) + GetTopSupernodes(ctx context.Context, blockHeight uint64) (*sntypes.QueryGetTopSuperNodesForBlockResponse, error) + Verify(ctx context.Context, address string, msg []byte, sig []byte) error + GetActionFee(ctx context.Context, dataSizeKB string) (*actiontypes.QueryGetActionFeeResponse, error) + SimulateFinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.SimulateResponse, error) + FinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.BroadcastTxResponse, error) +} + +type lumeraImpl struct{ c lumera.Client } + +func NewLumeraClient(c lumera.Client) LumeraClient { return &lumeraImpl{c: c} } + +func (l *lumeraImpl) GetAction(ctx context.Context, actionID string) (*actiontypes.QueryGetActionResponse, error) { + return l.c.Action().GetAction(ctx, actionID) +} + +func (l *lumeraImpl) GetTopSupernodes(ctx context.Context, blockHeight uint64) (*sntypes.QueryGetTopSuperNodesForBlockResponse, error) { + return l.c.SuperNode().GetTopSuperNodesForBlock(ctx, &sntypes.QueryGetTopSuperNodesForBlockRequest{ + BlockHeight: int32(blockHeight), + }) +} + +func (l *lumeraImpl) Verify(ctx context.Context, address string, msg []byte, sig []byte) error { + return l.c.Auth().Verify(ctx, address, msg, sig) +} + +func (l *lumeraImpl) GetActionFee(ctx context.Context, dataSizeKB string) (*actiontypes.QueryGetActionFeeResponse, error) { + return l.c.Action().GetActionFee(ctx, dataSizeKB) +} + +func (l *lumeraImpl) SimulateFinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.SimulateResponse, error) { + return l.c.ActionMsg().SimulateFinalizeCascadeAction(ctx, actionID, rqids) +} + +func (l *lumeraImpl) FinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.BroadcastTxResponse, error) { + return l.c.ActionMsg().FinalizeCascadeAction(ctx, actionID, rqids) +} diff --git a/supernode/services/cascade/adaptors/p2p.go b/supernode/adaptors/p2p.go similarity index 52% rename from supernode/services/cascade/adaptors/p2p.go rename to supernode/adaptors/p2p.go index 116d6810..31184fd7 100644 --- a/supernode/services/cascade/adaptors/p2p.go +++ b/supernode/adaptors/p2p.go @@ -13,37 +13,26 @@ import ( "github.com/LumeraProtocol/supernode/v2/p2p" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - cm "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics" "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/storage" - "github.com/pkg/errors" ) const ( - loadSymbolsBatchSize = 3000 - // Minimum first-pass coverage to store before returning from Register (percent) - storeSymbolsPercent = 18 - + loadSymbolsBatchSize = 100 + storeSymbolsPercent = 18 storeBatchContextTimeout = 3 * time.Minute + P2PDataRaptorQSymbol = 1 ) -// P2PService defines the interface for storing data in the P2P layer. -// -//go:generate mockgen -destination=mocks/p2p_mock.go -package=cascadeadaptormocks -source=p2p.go type P2PService interface { - // StoreArtefacts stores ID files and RaptorQ symbols. - // Metrics are recorded via internal metrics helpers; no metrics are returned. StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) error } -// p2pImpl is the default implementation of the P2PService interface. type p2pImpl struct { p2p p2p.Client rqStore rqstore.Store } -// NewP2PService returns a concrete implementation of P2PService. func NewP2PService(client p2p.Client, store rqstore.Store) P2PService { return &p2pImpl{p2p: client, rqStore: store} } @@ -56,118 +45,85 @@ type StoreArtefactsRequest struct { } func (p *p2pImpl) StoreArtefacts(ctx context.Context, req StoreArtefactsRequest, f logtrace.Fields) error { - logtrace.Info(ctx, "About to store artefacts (metadata + symbols)", logtrace.Fields{"taskID": req.TaskID, "id_files": len(req.IDFiles)}) - - // Enable per-node store RPC capture for this task - cm.StartStoreCapture(req.TaskID) - defer cm.StopStoreCapture(req.TaskID) - + logtrace.Info(ctx, "store: p2p start", logtrace.Fields{"taskID": req.TaskID, "actionID": req.ActionID, "id_files": len(req.IDFiles), "symbols_dir": req.SymbolsDir}) start := time.Now() firstPassSymbols, totalSymbols, err := p.storeCascadeSymbolsAndData(ctx, req.TaskID, req.ActionID, req.SymbolsDir, req.IDFiles) if err != nil { - return errors.Wrap(err, "error storing artefacts") + return fmt.Errorf("error storing artefacts: %w", err) + } + remaining := 0 + if req.SymbolsDir != "" { + if keys, werr := walkSymbolTree(req.SymbolsDir); werr == nil { + remaining = len(keys) + } + } + logtrace.Info(ctx, "store: first-pass complete", logtrace.Fields{"taskID": req.TaskID, "symbols_first_pass": firstPassSymbols, "symbols_total_available": totalSymbols, "id_files_count": len(req.IDFiles), "symbols_left_on_disk": remaining, "ms": time.Since(start).Milliseconds()}) + if remaining == 0 { + logtrace.Info(ctx, "store: dir empty after first-pass", logtrace.Fields{"taskID": req.TaskID, "dir": req.SymbolsDir}) } - dur := time.Since(start).Milliseconds() - logtrace.Info(ctx, "artefacts have been stored", logtrace.Fields{"taskID": req.TaskID, "symbols_first_pass": firstPassSymbols, "symbols_total": totalSymbols, "id_files_count": len(req.IDFiles)}) - // Record store summary for later event emission - cm.SetStoreSummary(req.TaskID, firstPassSymbols, totalSymbols, len(req.IDFiles), dur) return nil } -// storeCascadeSymbols loads symbols from `symbolsDir`, optionally downsamples, -// streams them in fixed-size batches to the P2P layer, and tracks: -// - an item-weighted aggregate success rate across all batches -// - the total number of symbols processed (item count) -// - the total number of node requests attempted across batches -// -// Returns (aggRate, totalSymbols, totalRequests, err). func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, actionID string, symbolsDir string, metadataFiles [][]byte) (int, int, error) { - /* record directory in DB */ if err := p.rqStore.StoreSymbolDirectory(taskID, symbolsDir); err != nil { return 0, 0, fmt.Errorf("store symbol dir: %w", err) } - - /* gather every symbol path under symbolsDir ------------------------- */ keys, err := walkSymbolTree(symbolsDir) if err != nil { return 0, 0, err } - totalAvailable := len(keys) targetCount := int(math.Ceil(float64(totalAvailable) * storeSymbolsPercent / 100.0)) if targetCount < 1 && totalAvailable > 0 { targetCount = 1 } - logtrace.Info(ctx, "first-pass target coverage (symbols)", logtrace.Fields{ - "total_symbols": totalAvailable, - "target_percent": storeSymbolsPercent, - "target_count": targetCount, - }) - - /* down-sample if we exceed the “big directory” threshold ------------- */ + logtrace.Info(ctx, "store: symbols discovered", logtrace.Fields{"total_symbols": totalAvailable, "dir": symbolsDir}) + logtrace.Info(ctx, "store: target coverage", logtrace.Fields{"total_symbols": totalAvailable, "target_percent": storeSymbolsPercent, "target_count": targetCount}) if len(keys) > loadSymbolsBatchSize { want := targetCount if want < len(keys) { rand.Shuffle(len(keys), func(i, j int) { keys[i], keys[j] = keys[j], keys[i] }) keys = keys[:want] } - sort.Strings(keys) // deterministic order inside the sample + sort.Strings(keys) } - - logtrace.Info(ctx, "storing RaptorQ symbols", logtrace.Fields{"count": len(keys)}) - - /* stream in fixed-size batches -------------------------------------- */ - - totalSymbols := 0 // symbols stored + logtrace.Info(ctx, "store: selected symbols", logtrace.Fields{"selected": len(keys), "of_total": totalAvailable, "dir": symbolsDir}) + logtrace.Info(ctx, "store: sending symbols", logtrace.Fields{"count": len(keys)}) + totalSymbols := 0 firstBatchProcessed := false - for start := 0; start < len(keys); { end := min(start+loadSymbolsBatchSize, len(keys)) batch := keys[start:end] - if !firstBatchProcessed && len(metadataFiles) > 0 { - // First "batch" has to include metadata + as many symbols as fit under batch size. - // If metadataFiles >= batch size, we send metadata in this batch and symbols start next batch. roomForSymbols := loadSymbolsBatchSize - len(metadataFiles) if roomForSymbols < 0 { roomForSymbols = 0 } if roomForSymbols < len(batch) { - // trim the first symbol chunk to leave space for metadata batch = batch[:roomForSymbols] end = start + roomForSymbols } - - // Load just this symbol chunk symBytes, err := utils.LoadSymbols(symbolsDir, batch) if err != nil { return 0, 0, fmt.Errorf("load symbols: %w", err) } - - // Build combined payload: metadata first, then symbols payload := make([][]byte, 0, len(metadataFiles)+len(symBytes)) payload = append(payload, metadataFiles...) payload = append(payload, symBytes...) - - // Send as the same data type you use for symbols + logtrace.Info(ctx, "store: batch send (first)", logtrace.Fields{"taskID": taskID, "metadata_count": len(metadataFiles), "symbols_in_batch": len(symBytes), "payload_total": len(payload)}) bctx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout) - bctx = cm.WithTaskID(bctx, taskID) - err = p.p2p.StoreBatch(bctx, payload, storage.P2PDataRaptorQSymbol, taskID) + err = p.p2p.StoreBatch(bctx, payload, P2PDataRaptorQSymbol, taskID) cancel() if err != nil { return totalSymbols, totalAvailable, fmt.Errorf("p2p store batch (first): %w", err) } - + logtrace.Info(ctx, "store: batch ok (first)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symBytes)}) totalSymbols += len(symBytes) - // No per-RPC metrics propagated from p2p - - // Delete only the symbols we uploaded if len(batch) > 0 { if err := utils.DeleteSymbols(ctx, symbolsDir, batch); err != nil { return totalSymbols, totalAvailable, fmt.Errorf("delete symbols: %w", err) } } - firstBatchProcessed = true } else { count, err := p.storeSymbolsInP2P(ctx, taskID, symbolsDir, batch) @@ -176,36 +132,23 @@ func (p *p2pImpl) storeCascadeSymbolsAndData(ctx context.Context, taskID, action } totalSymbols += count } - start = end } - - // Coverage uses symbols only - achievedPct := 0.0 - if totalAvailable > 0 { - achievedPct = (float64(totalSymbols) / float64(totalAvailable)) * 100.0 - } - logtrace.Info(ctx, "first-pass achieved coverage (symbols)", - logtrace.Fields{"achieved_symbols": totalSymbols, "achieved_percent": achievedPct}) - if err := p.rqStore.UpdateIsFirstBatchStored(actionID); err != nil { return totalSymbols, totalAvailable, fmt.Errorf("update first-batch flag: %w", err) } - return totalSymbols, totalAvailable, nil - } func walkSymbolTree(root string) ([]string, error) { var keys []string err := filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error { if err != nil { - return err // propagate I/O errors + return err } if d.IsDir() { - return nil // skip directory nodes + return nil } - // ignore layout json if present if strings.EqualFold(filepath.Ext(d.Name()), ".json") { return nil } @@ -213,7 +156,7 @@ func walkSymbolTree(root string) ([]string, error) { if err != nil { return err } - keys = append(keys, rel) // store as "block_0/filename" + keys = append(keys, rel) return nil }) if err != nil { @@ -222,30 +165,28 @@ func walkSymbolTree(root string) ([]string, error) { return keys, nil } -// storeSymbolsInP2P loads a batch of symbols and stores them via P2P. -// Returns (ratePct, requests, count, error) where `count` is the number of symbols in this batch. func (c *p2pImpl) storeSymbolsInP2P(ctx context.Context, taskID, root string, fileKeys []string) (int, error) { - logtrace.Info(ctx, "loading batch symbols", logtrace.Fields{"count": len(fileKeys)}) - + logtrace.Debug(ctx, "loading batch symbols", logtrace.Fields{"taskID": taskID, "count": len(fileKeys)}) symbols, err := utils.LoadSymbols(root, fileKeys) if err != nil { return 0, fmt.Errorf("load symbols: %w", err) } - symCtx, cancel := context.WithTimeout(ctx, storeBatchContextTimeout) - symCtx = cm.WithTaskID(symCtx, taskID) defer cancel() - - if err := c.p2p.StoreBatch(symCtx, symbols, storage.P2PDataRaptorQSymbol, taskID); err != nil { + logtrace.Info(ctx, "store: batch send (symbols)", logtrace.Fields{"taskID": taskID, "symbols_in_batch": len(symbols)}) + if err := c.p2p.StoreBatch(symCtx, symbols, P2PDataRaptorQSymbol, taskID); err != nil { return len(symbols), fmt.Errorf("p2p store batch: %w", err) } - logtrace.Info(ctx, "stored batch symbols", logtrace.Fields{"count": len(symbols)}) - + logtrace.Info(ctx, "store: batch ok (symbols)", logtrace.Fields{"taskID": taskID, "symbols_stored": len(symbols)}) if err := utils.DeleteSymbols(ctx, root, fileKeys); err != nil { return len(symbols), fmt.Errorf("delete symbols: %w", err) } - logtrace.Info(ctx, "deleted batch symbols", logtrace.Fields{"count": len(symbols)}) - - // No per-RPC metrics propagated from p2p return len(symbols), nil } + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/supernode/adaptors/rq.go b/supernode/adaptors/rq.go new file mode 100644 index 00000000..f414be57 --- /dev/null +++ b/supernode/adaptors/rq.go @@ -0,0 +1,70 @@ +package adaptors + +import ( + "context" + "os" + + "github.com/LumeraProtocol/supernode/v2/pkg/codec" +) + +// CodecService wraps codec operations used by cascade +type CodecService interface { + EncodeInput(ctx context.Context, actionID string, filePath string) (EncodeResult, error) + Decode(ctx context.Context, req DecodeRequest) (DecodeResult, error) + PrepareDecode(ctx context.Context, actionID string, layout codec.Layout) (blockPaths []string, + Write func(block int, symbolID string, data []byte) (string, error), Cleanup func() error, ws *codec.Workspace, err error) + DecodeFromPrepared(ctx context.Context, ws *codec.Workspace, layout codec.Layout) (DecodeResult, error) +} + +type EncodeResult struct { + SymbolsDir string + Layout codec.Layout +} + +type DecodeRequest struct { + ActionID string + Symbols map[string][]byte + Layout codec.Layout +} + +type DecodeResult struct { + FilePath string + DecodeTmpDir string +} + +type codecImpl struct{ codec codec.Codec } + +func NewCodecService(c codec.Codec) CodecService { return &codecImpl{codec: c} } + +func (c *codecImpl) EncodeInput(ctx context.Context, actionID, filePath string) (EncodeResult, error) { + var size int + if fi, err := os.Stat(filePath); err == nil { + size = int(fi.Size()) + } + res, err := c.codec.Encode(ctx, codec.EncodeRequest{TaskID: actionID, Path: filePath, DataSize: size}) + if err != nil { + return EncodeResult{}, err + } + return EncodeResult{SymbolsDir: res.SymbolsDir, Layout: res.Layout}, nil +} + +func (c *codecImpl) Decode(ctx context.Context, req DecodeRequest) (DecodeResult, error) { + res, err := c.codec.Decode(ctx, codec.DecodeRequest{ActionID: req.ActionID, Symbols: req.Symbols, Layout: req.Layout}) + if err != nil { + return DecodeResult{}, err + } + return DecodeResult{FilePath: res.FilePath, DecodeTmpDir: res.DecodeTmpDir}, nil +} + +func (c *codecImpl) PrepareDecode(ctx context.Context, actionID string, layout codec.Layout) (blockPaths []string, + Write func(block int, symbolID string, data []byte) (string, error), Cleanup func() error, ws *codec.Workspace, err error) { + return c.codec.PrepareDecode(ctx, actionID, layout) +} + +func (c *codecImpl) DecodeFromPrepared(ctx context.Context, ws *codec.Workspace, layout codec.Layout) (DecodeResult, error) { + res, err := c.codec.DecodeFromPrepared(ctx, ws, layout) + if err != nil { + return DecodeResult{}, err + } + return DecodeResult{FilePath: res.FilePath, DecodeTmpDir: res.DecodeTmpDir}, nil +} diff --git a/supernode/cascade/download.go b/supernode/cascade/download.go new file mode 100644 index 00000000..e027c817 --- /dev/null +++ b/supernode/cascade/download.go @@ -0,0 +1,261 @@ +package cascade + +import ( + "context" + "encoding/json" + "fmt" + "os" + "sort" + "time" + + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/LumeraProtocol/supernode/v2/supernode/adaptors" +) + +const targetRequiredPercent = 17 + +type DownloadRequest struct { + ActionID string + Signature string +} + +type DownloadResponse struct { + EventType SupernodeEventType + Message string + FilePath string + DownloadedDir string +} + +func (task *CascadeRegistrationTask) Download(ctx context.Context, req *DownloadRequest, send func(resp *DownloadResponse) error) (err error) { + if req != nil && req.ActionID != "" { + ctx = logtrace.CtxWithCorrelationID(ctx, req.ActionID) + ctx = logtrace.CtxWithOrigin(ctx, "download") + } + fields := logtrace.Fields{logtrace.FieldMethod: "Download", logtrace.FieldRequest: req} + logtrace.Info(ctx, "download: request", fields) + + actionDetails, err := task.LumeraClient.GetAction(ctx, req.ActionID) + if err != nil { + fields[logtrace.FieldError] = err.Error() + return task.wrapErr(ctx, "failed to get action", err, fields) + } + logtrace.Info(ctx, "download: action fetched", fields) + task.streamDownloadEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", "", send) + + if actionDetails.GetAction().State != actiontypes.ActionStateDone { + err = errors.New("action is not in a valid state") + fields[logtrace.FieldError] = "action state is not done yet" + fields[logtrace.FieldActionState] = actionDetails.GetAction().State + return task.wrapErr(ctx, "action not finalized yet", err, fields) + } + logtrace.Info(ctx, "download: action state ok", fields) + + metadata, err := cascadekit.UnmarshalCascadeMetadata(actionDetails.GetAction().Metadata) + if err != nil { + fields[logtrace.FieldError] = err.Error() + return task.wrapErr(ctx, "error decoding cascade metadata", err, fields) + } + logtrace.Info(ctx, "download: metadata decoded", fields) + task.streamDownloadEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", "", send) + + if !metadata.Public { + if req.Signature == "" { + fields[logtrace.FieldError] = "missing signature for private download" + return task.wrapErr(ctx, "private cascade requires a download signature", nil, fields) + } + if err := task.VerifyDownloadSignature(ctx, req.ActionID, req.Signature); err != nil { + fields[logtrace.FieldError] = err.Error() + return task.wrapErr(ctx, "failed to verify download signature", err, fields) + } + logtrace.Info(ctx, "download: signature verified", fields) + } else { + logtrace.Info(ctx, "download: public cascade (no signature)", fields) + } + + task.streamDownloadEvent(SupernodeEventTypeNetworkRetrieveStarted, "Network retrieval started", "", "", send) + + logtrace.Info(ctx, "download: network retrieval start", logtrace.Fields{logtrace.FieldActionID: actionDetails.GetAction().ActionID}) + filePath, tmpDir, err := task.downloadArtifacts(ctx, actionDetails.GetAction().ActionID, metadata, fields, send) + if err != nil { + fields[logtrace.FieldError] = err.Error() + if tmpDir != "" { + if cerr := task.CleanupDownload(ctx, tmpDir); cerr != nil { + logtrace.Warn(ctx, "cleanup of tmp dir after error failed", logtrace.Fields{"tmp_dir": tmpDir, logtrace.FieldError: cerr.Error()}) + } + } + return task.wrapErr(ctx, "failed to download artifacts", err, fields) + } + logtrace.Debug(ctx, "File reconstructed and hash verified", fields) + task.streamDownloadEvent(SupernodeEventTypeDecodeCompleted, "Decode completed", filePath, tmpDir, send) + + return nil +} + +func (task *CascadeRegistrationTask) CleanupDownload(ctx context.Context, tmpDir string) error { + if tmpDir == "" { + return nil + } + if err := os.RemoveAll(tmpDir); err != nil { + return err + } + return nil +} + +func (task *CascadeRegistrationTask) VerifyDownloadSignature(ctx context.Context, actionID, signature string) error { + if signature == "" { + return errors.New("signature required") + } + // Fetch the action to get the creator address for verification + act, err := task.LumeraClient.GetAction(ctx, actionID) + if err != nil { + return fmt.Errorf("get action for signature verification: %w", err) + } + creator := act.GetAction().Creator + if err := cascadekit.VerifyStringRawOrADR36(actionID, signature, creator, func(data, sig []byte) error { + return task.LumeraClient.Verify(ctx, creator, data, sig) + }); err != nil { + return err + } + return nil +} + +func (task *CascadeRegistrationTask) streamDownloadEvent(eventType SupernodeEventType, msg, filePath, dir string, send func(resp *DownloadResponse) error) { + _ = send(&DownloadResponse{EventType: eventType, Message: msg, FilePath: filePath, DownloadedDir: dir}) +} + +func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, actionID string, metadata actiontypes.CascadeMetadata, fields logtrace.Fields, send func(resp *DownloadResponse) error) (string, string, error) { + var layout codec.Layout + var layoutFetchMS, layoutDecodeMS int64 + var layoutAttempts int + + // Retrieve via index IDs + if len(metadata.RqIdsIds) > 0 { + for _, indexID := range metadata.RqIdsIds { + iStart := time.Now() + logtrace.Debug(ctx, "RPC Retrieve index file", logtrace.Fields{"index_id": indexID}) + indexFile, err := task.P2PClient.Retrieve(ctx, indexID) + if err != nil || len(indexFile) == 0 { + logtrace.Warn(ctx, "Retrieve index file failed or empty", logtrace.Fields{"index_id": indexID, logtrace.FieldError: fmt.Sprintf("%v", err)}) + continue + } + logtrace.Debug(ctx, "Retrieve index file completed", logtrace.Fields{"index_id": indexID, "bytes": len(indexFile), "ms": time.Since(iStart).Milliseconds()}) + indexData, err := cascadekit.ParseCompressedIndexFile(indexFile) + if err != nil { + logtrace.Warn(ctx, "failed to parse index file", logtrace.Fields{"index_id": indexID, logtrace.FieldError: err.Error()}) + continue + } + var netMS, decMS int64 + var attempts int + layout, netMS, decMS, attempts, err = task.retrieveLayoutFromIndex(ctx, indexData, fields) + if err != nil { + logtrace.Warn(ctx, "failed to retrieve layout from index", logtrace.Fields{"index_id": indexID, logtrace.FieldError: err.Error(), "attempts": attempts}) + continue + } + layoutFetchMS, layoutDecodeMS, layoutAttempts = netMS, decMS, attempts + if len(layout.Blocks) > 0 { + logtrace.Debug(ctx, "layout file retrieved via index", logtrace.Fields{"index_id": indexID, "attempts": attempts, "net_ms": layoutFetchMS, "decode_ms": layoutDecodeMS}) + break + } + } + } + if len(layout.Blocks) == 0 { + return "", "", errors.New("no symbols found in RQ metadata") + } + fields["layout_fetch_ms"], fields["layout_decode_ms"], fields["layout_attempts"] = layoutFetchMS, layoutDecodeMS, layoutAttempts + return task.restoreFileFromLayout(ctx, layout, metadata.DataHash, actionID, send) +} + +func (task *CascadeRegistrationTask) restoreFileFromLayout(ctx context.Context, layout codec.Layout, dataHash string, actionID string, send func(resp *DownloadResponse) error) (string, string, error) { + fields := logtrace.Fields{logtrace.FieldActionID: actionID} + symSet := make(map[string]struct{}) + for _, block := range layout.Blocks { + for _, s := range block.Symbols { + symSet[s] = struct{}{} + } + } + allSymbols := make([]string, 0, len(symSet)) + for s := range symSet { + allSymbols = append(allSymbols, s) + } + sort.Strings(allSymbols) + totalSymbols := len(allSymbols) + fields["totalSymbols"] = totalSymbols + targetRequiredCount := (totalSymbols*targetRequiredPercent + 99) / 100 + if targetRequiredCount < 1 && totalSymbols > 0 { + targetRequiredCount = 1 + } + logtrace.Info(ctx, "download: plan symbols", logtrace.Fields{"total_symbols": totalSymbols, "target_required_percent": targetRequiredPercent, "target_required_count": targetRequiredCount}) + retrieveStart := time.Now() + reqCount := targetRequiredCount + if reqCount > totalSymbols { + reqCount = totalSymbols + } + rStart := time.Now() + logtrace.Info(ctx, "download: prepare decode", logtrace.Fields{"action_id": actionID}) + _, writeSymbol, cleanup, ws, perr := task.RQ.PrepareDecode(ctx, actionID, layout) + if perr != nil { + fields[logtrace.FieldError] = perr.Error() + logtrace.Error(ctx, "prepare decode failed", fields) + return "", "", fmt.Errorf("prepare decode workspace: %w", perr) + } + writer := func(symbolID string, data []byte) error { + _, werr := writeSymbol(-1, symbolID, data) + return werr + } + logtrace.Info(ctx, "download: batch retrieve start", logtrace.Fields{"action_id": actionID, "requested": reqCount, "total_candidates": totalSymbols}) + // We ignore the returned map since symbols are streamed to disk via writer + resultMap, err := task.P2PClient.BatchRetrieve(ctx, allSymbols, reqCount, actionID, writer) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "batch retrieve failed", fields) + if cleanup != nil { _ = cleanup() } + return "", "", fmt.Errorf("batch retrieve symbols: %w", err) + } + retrieveMS := time.Since(retrieveStart).Milliseconds() + logtrace.Info(ctx, "download: batch retrieve ok", logtrace.Fields{"action_id": actionID, "received": len(resultMap), "ms": time.Since(rStart).Milliseconds()}) + decodeStart := time.Now() + dStart := time.Now() + logtrace.Info(ctx, "download: decode start", logtrace.Fields{"action_id": actionID}) + decodeInfo, derr := task.RQ.DecodeFromPrepared(ctx, ws, layout) + if derr != nil { + fields[logtrace.FieldError] = derr.Error() + logtrace.Error(ctx, "decode failed", fields) + if cleanup != nil { _ = cleanup() } + return "", "", fmt.Errorf("decode symbols using RaptorQ: %w", derr) + } + decodeMS := time.Since(decodeStart).Milliseconds() + logtrace.Info(ctx, "download: decode ok", logtrace.Fields{"action_id": actionID, "ms": time.Since(dStart).Milliseconds(), "tmp_dir": decodeInfo.DecodeTmpDir, "file_path": decodeInfo.FilePath}) + // Emit timing metrics for network retrieval and decode phases + logtrace.Debug(ctx, "download: timing", logtrace.Fields{"action_id": actionID, "retrieve_ms": retrieveMS, "decode_ms": decodeMS}) + + // Verify reconstructed file hash matches action metadata + fileHash, herr := utils.Blake3HashFile(decodeInfo.FilePath) + if herr != nil { + fields[logtrace.FieldError] = herr.Error() + logtrace.Error(ctx, "failed to hash file", fields) + return "", "", fmt.Errorf("hash file: %w", herr) + } + if fileHash == nil { + fields[logtrace.FieldError] = "file hash is nil" + logtrace.Error(ctx, "failed to hash file", fields) + return "", "", errors.New("file hash is nil") + } + if verr := cascadekit.VerifyB64DataHash(fileHash, dataHash); verr != nil { + fields[logtrace.FieldError] = verr.Error() + logtrace.Error(ctx, "failed to verify hash", fields) + return "", decodeInfo.DecodeTmpDir, verr + } + logtrace.Debug(ctx, "request data-hash has been matched with the action data-hash", fields) + logtrace.Info(ctx, "download: file verified", fields) + // Emit minimal JSON payload (metrics system removed) + info := map[string]interface{}{"action_id": actionID, "found_symbols": len(symbols), "target_percent": targetRequiredPercent} + if b, err := json.Marshal(info); err == nil { + task.streamDownloadEvent(SupernodeEventTypeArtefactsDownloaded, string(b), decodeInfo.FilePath, decodeInfo.DecodeTmpDir, send) + } + return decodeInfo.FilePath, decodeInfo.DecodeTmpDir, nil +} diff --git a/supernode/cascade/download_helpers.go b/supernode/cascade/download_helpers.go new file mode 100644 index 00000000..73631549 --- /dev/null +++ b/supernode/cascade/download_helpers.go @@ -0,0 +1,47 @@ +package cascade + +import ( + "context" + "time" + + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" +) + +// retrieveLayoutFromIDs tries the given layout IDs in order and returns the first valid layout. +func (task *CascadeRegistrationTask) retrieveLayoutFromIDs(ctx context.Context, layoutIDs []string, fields logtrace.Fields) (codec.Layout, int64, int64, int, error) { + var layout codec.Layout + var netMS, decMS int64 + attempts := 0 + for _, lid := range layoutIDs { + attempts++ + nStart := time.Now() + logtrace.Debug(ctx, "RPC Retrieve layout file", logtrace.Fields{"layout_id": lid}) + raw, err := task.P2PClient.Retrieve(ctx, lid) + if err != nil || len(raw) == 0 { + logtrace.Warn(ctx, "Retrieve layout failed or empty", logtrace.Fields{"layout_id": lid, logtrace.FieldError: err}) + continue + } + netMS = time.Since(nStart).Milliseconds() + dStart := time.Now() + // Layout files are stored as compressed RQ metadata: base64(JSON(layout)).signature.counter + // Use the cascadekit parser to decompress and decode instead of JSON-unmarshalling raw bytes. + parsedLayout, _, _, err := cascadekit.ParseRQMetadataFile(raw) + if err != nil { + logtrace.Warn(ctx, "Parse layout file failed", logtrace.Fields{"layout_id": lid, logtrace.FieldError: err}) + continue + } + layout = parsedLayout + decMS = time.Since(dStart).Milliseconds() + if len(layout.Blocks) > 0 { + return layout, netMS, decMS, attempts, nil + } + } + return codec.Layout{}, netMS, decMS, attempts, nil +} + +// retrieveLayoutFromIndex resolves layout IDs in the index file and tries to fetch a valid layout. +func (task *CascadeRegistrationTask) retrieveLayoutFromIndex(ctx context.Context, index cascadekit.IndexFile, fields logtrace.Fields) (codec.Layout, int64, int64, int, error) { + return task.retrieveLayoutFromIDs(ctx, index.LayoutIDs, fields) +} diff --git a/supernode/cascade/events.go b/supernode/cascade/events.go new file mode 100644 index 00000000..f1314a1a --- /dev/null +++ b/supernode/cascade/events.go @@ -0,0 +1,25 @@ +package cascade + +type SupernodeEventType int + +const ( + SupernodeEventTypeUNKNOWN SupernodeEventType = 0 + SupernodeEventTypeActionRetrieved SupernodeEventType = 1 + SupernodeEventTypeActionFeeVerified SupernodeEventType = 2 + SupernodeEventTypeTopSupernodeCheckPassed SupernodeEventType = 3 + SupernodeEventTypeMetadataDecoded SupernodeEventType = 4 + SupernodeEventTypeDataHashVerified SupernodeEventType = 5 + SupernodeEventTypeInputEncoded SupernodeEventType = 6 + SupernodeEventTypeSignatureVerified SupernodeEventType = 7 + SupernodeEventTypeRQIDsGenerated SupernodeEventType = 8 + SupernodeEventTypeRqIDsVerified SupernodeEventType = 9 + SupernodeEventTypeFinalizeSimulated SupernodeEventType = 10 + SupernodeEventTypeArtefactsStored SupernodeEventType = 11 + SupernodeEventTypeActionFinalized SupernodeEventType = 12 + SupernodeEventTypeArtefactsDownloaded SupernodeEventType = 13 + SupernodeEventTypeFinalizeSimulationFailed SupernodeEventType = 14 + // Download phase markers + SupernodeEventTypeNetworkRetrieveStarted SupernodeEventType = 15 + SupernodeEventTypeDecodeCompleted SupernodeEventType = 16 + SupernodeEventTypeServeReady SupernodeEventType = 17 +) diff --git a/supernode/cascade/helper.go b/supernode/cascade/helper.go new file mode 100644 index 00000000..cbc5699b --- /dev/null +++ b/supernode/cascade/helper.go @@ -0,0 +1,206 @@ +package cascade + +import ( + "context" + "strconv" + + "cosmossdk.io/math" + actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" + + "github.com/LumeraProtocol/supernode/v2/supernode/adaptors" + + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (task *CascadeRegistrationTask) fetchAction(ctx context.Context, actionID string, f logtrace.Fields) (*actiontypes.Action, error) { + if f == nil { + f = logtrace.Fields{} + } + f[logtrace.FieldActionID] = actionID + logtrace.Info(ctx, "register: fetch action start", f) + res, err := task.LumeraClient.GetAction(ctx, actionID) + if err != nil { + return nil, task.wrapErr(ctx, "failed to get action", err, f) + } + if res.GetAction().ActionID == "" { + return nil, task.wrapErr(ctx, "action not found", errors.New(""), f) + } + logtrace.Info(ctx, "register: fetch action ok", f) + return res.GetAction(), nil +} + +func (task *CascadeRegistrationTask) ensureIsTopSupernode(ctx context.Context, blockHeight uint64, f logtrace.Fields) error { + if f == nil { + f = logtrace.Fields{} + } + f[logtrace.FieldBlockHeight] = blockHeight + logtrace.Info(ctx, "register: top-supernodes fetch start", f) + top, err := task.LumeraClient.GetTopSupernodes(ctx, blockHeight) + if err != nil { + return task.wrapErr(ctx, "failed to get top SNs", err, f) + } + logtrace.Info(ctx, "register: top-supernodes fetch ok", f) + if !supernode.Exists(top.Supernodes, task.SupernodeAccountAddress) { + addresses := make([]string, len(top.Supernodes)) + for i, sn := range top.Supernodes { + addresses[i] = sn.SupernodeAccount + } + logtrace.Debug(ctx, "Supernode not in top list", logtrace.Fields{"currentAddress": task.SupernodeAccountAddress, "topSupernodes": addresses}) + return task.wrapErr(ctx, "current supernode does not exist in the top SNs list", errors.Errorf("current address: %s, top supernodes: %v", task.SupernodeAccountAddress, addresses), f) + } + logtrace.Info(ctx, "register: top-supernode verified", f) + return nil +} + +func (task *CascadeRegistrationTask) encodeInput(ctx context.Context, actionID string, filePath string, f logtrace.Fields) (*adaptors.EncodeResult, error) { + if f == nil { + f = logtrace.Fields{} + } + f[logtrace.FieldActionID] = actionID + f["file_path"] = filePath + logtrace.Info(ctx, "register: encode input start", f) + res, err := task.RQ.EncodeInput(ctx, actionID, filePath) + if err != nil { + return nil, task.wrapErr(ctx, "failed to encode data", err, f) + } + // Enrich fields with result for subsequent logs + f["symbols_dir"] = res.SymbolsDir + logtrace.Info(ctx, "register: encode input ok", f) + return &res, nil +} + +// ValidateIndexAndLayout verifies: +// - creator signature over the index payload (index_b64) +// - layout signature over base64(JSON(layout)) +// Returns the decoded index and layoutB64. No logging here; callers handle it. +func (task *CascadeRegistrationTask) validateIndexAndLayout(ctx context.Context, creator string, indexSignatureFormat string, layout codec.Layout) (cascadekit.IndexFile, []byte, error) { + // Extract and verify creator signature on index + indexB64, creatorSigB64, err := cascadekit.ExtractIndexAndCreatorSig(indexSignatureFormat) + if err != nil { + return cascadekit.IndexFile{}, nil, err + } + if err := cascadekit.VerifyIndex(indexB64, creatorSigB64, creator, func(data, sig []byte) error { + return task.LumeraClient.Verify(ctx, creator, data, sig) + }); err != nil { + return cascadekit.IndexFile{}, nil, err + } + // Decode index + indexFile, err := cascadekit.DecodeIndexB64(indexB64) + if err != nil { + return cascadekit.IndexFile{}, nil, err + } + // Build layoutB64 and verify single-block + signature + layoutB64, err := cascadekit.LayoutB64(layout) + if err != nil { + return cascadekit.IndexFile{}, nil, err + } + // Enforce single-block layout for Cascade + if len(layout.Blocks) != 1 { + return cascadekit.IndexFile{}, nil, errors.New("layout must contain exactly one block") + } + if err := cascadekit.VerifyLayout(layoutB64, indexFile.LayoutSignature, creator, func(data, sig []byte) error { + return task.LumeraClient.Verify(ctx, creator, data, sig) + }); err != nil { + return cascadekit.IndexFile{}, nil, err + } + return indexFile, layoutB64, nil +} + +func (task *CascadeRegistrationTask) generateRQIDFiles(ctx context.Context, meta actiontypes.CascadeMetadata, layoutSigB64 string, layoutB64 []byte, f logtrace.Fields) ([]string, [][]byte, error) { + if f == nil { + f = logtrace.Fields{} + } + f["rq_ic"] = uint32(meta.RqIdsIc) + f["rq_max"] = uint32(meta.RqIdsMax) + logtrace.Info(ctx, "register: rqid files generation start", f) + + layoutIDs, layoutFiles, err := cascadekit.GenerateLayoutFilesFromB64(layoutB64, layoutSigB64, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) + if err != nil { + return nil, nil, task.wrapErr(ctx, "failed to generate layout files", err, f) + } + logtrace.Info(ctx, "register: layout files generated", logtrace.Fields{"count": len(layoutFiles), "layout_ids": len(layoutIDs)}) + indexIDs, indexFiles, err := cascadekit.GenerateIndexFiles(meta.Signatures, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) + if err != nil { + return nil, nil, task.wrapErr(ctx, "failed to generate index files", err, f) + } + allFiles := append(layoutFiles, indexFiles...) + logtrace.Info(ctx, "register: index files generated", logtrace.Fields{"count": len(indexFiles), "rqids": len(indexIDs)}) + logtrace.Info(ctx, "register: rqid files generation ok", logtrace.Fields{"total_files": len(allFiles)}) + return indexIDs, allFiles, nil +} + +func (task *CascadeRegistrationTask) storeArtefacts(ctx context.Context, actionID string, idFiles [][]byte, symbolsDir string, f logtrace.Fields) error { + if f == nil { + f = logtrace.Fields{} + } + lf := logtrace.Fields{logtrace.FieldActionID: actionID, logtrace.FieldTaskID: task.taskID, "id_files_count": len(idFiles), "symbols_dir": symbolsDir} + for k, v := range f { + lf[k] = v + } + ctx = logtrace.CtxWithOrigin(ctx, "first_pass") + logtrace.Info(ctx, "store: first-pass begin", lf) + if err := task.P2P.StoreArtefacts(ctx, adaptors.StoreArtefactsRequest{IDFiles: idFiles, SymbolsDir: symbolsDir, TaskID: task.taskID, ActionID: actionID}, f); err != nil { + return task.wrapErr(ctx, "failed to store artefacts", err, lf) + } + logtrace.Info(ctx, "store: first-pass ok", lf) + return nil +} + +func (task *CascadeRegistrationTask) wrapErr(ctx context.Context, msg string, err error, f logtrace.Fields) error { + if err != nil { + f[logtrace.FieldError] = err.Error() + } + logtrace.Error(ctx, msg, f) + if err != nil { + return status.Errorf(codes.Internal, "%s: %v", msg, err) + } + return status.Errorf(codes.Internal, "%s", msg) +} + +func (task *CascadeRegistrationTask) emitArtefactsStored(ctx context.Context, fields logtrace.Fields, _ codec.Layout, send func(resp *RegisterResponse) error) { + if fields == nil { + fields = logtrace.Fields{} + } + msg := "Artefacts stored" + logtrace.Info(ctx, "register: artefacts stored", fields) + task.streamEvent(SupernodeEventTypeArtefactsStored, msg, "", send) +} + +func (task *CascadeRegistrationTask) verifyActionFee(ctx context.Context, action *actiontypes.Action, dataSize int, fields logtrace.Fields) error { + if fields == nil { + fields = logtrace.Fields{} + } + fields["data_bytes"] = dataSize + logtrace.Info(ctx, "register: verify action fee start", fields) + // Round up to the nearest KB to avoid underestimating required fee + dataSizeInKBs := (dataSize + 1023) / 1024 + fee, err := task.LumeraClient.GetActionFee(ctx, strconv.Itoa(dataSizeInKBs)) + if err != nil { + return task.wrapErr(ctx, "failed to get action fee", err, fields) + } + amount, err := strconv.ParseInt(fee.Amount, 10, 64) + if err != nil { + return task.wrapErr(ctx, "failed to parse fee amount", err, fields) + } + requiredFee := sdk.NewCoin("ulume", math.NewInt(amount)) + logtrace.Debug(ctx, "calculated required fee", logtrace.Fields{"fee": requiredFee.String(), "dataBytes": dataSize}) + // Accept paying more than the minimum required fee. Only enforce denom match and Amount >= required. + if action.Price == nil { + return task.wrapErr(ctx, "insufficient fee", errors.Errorf("expected at least %s, got ", requiredFee.String()), fields) + } + if action.Price.Denom != requiredFee.Denom { + return task.wrapErr(ctx, "invalid fee denom", errors.Errorf("expected denom %s, got %s", requiredFee.Denom, action.Price.Denom), fields) + } + if action.Price.Amount.LT(requiredFee.Amount) { + return task.wrapErr(ctx, "insufficient fee", errors.Errorf("expected at least %s, got %s", requiredFee.String(), action.Price.String()), fields) + } + logtrace.Info(ctx, "register: verify action fee ok", logtrace.Fields{"required_fee": requiredFee.String(), "provided_fee": action.Price.String()}) + return nil +} diff --git a/supernode/services/cascade/interfaces.go b/supernode/cascade/interfaces.go similarity index 74% rename from supernode/services/cascade/interfaces.go rename to supernode/cascade/interfaces.go index e782bc23..5a4d0d4e 100644 --- a/supernode/services/cascade/interfaces.go +++ b/supernode/cascade/interfaces.go @@ -6,7 +6,7 @@ import ( // CascadeServiceFactory defines an interface to create cascade tasks // -//go:generate mockgen -destination=mocks/cascade_interfaces_mock.go -package=cascademocks -source=interfaces.go + type CascadeServiceFactory interface { NewCascadeRegistrationTask() CascadeTask } @@ -15,5 +15,5 @@ type CascadeServiceFactory interface { type CascadeTask interface { Register(ctx context.Context, req *RegisterRequest, send func(resp *RegisterResponse) error) error Download(ctx context.Context, req *DownloadRequest, send func(resp *DownloadResponse) error) error - CleanupDownload(ctx context.Context, actionID string) error + CleanupDownload(ctx context.Context, tmpDir string) error } diff --git a/supernode/cascade/register.go b/supernode/cascade/register.go new file mode 100644 index 00000000..926f9b31 --- /dev/null +++ b/supernode/cascade/register.go @@ -0,0 +1,165 @@ +package cascade + +import ( + "context" + "os" + + "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" +) + +// RegisterRequest contains parameters for upload request +type RegisterRequest struct { + TaskID string + ActionID string + DataHash []byte + DataSize int + FilePath string +} + +// RegisterResponse contains the result of upload +type RegisterResponse struct { + EventType SupernodeEventType + Message string + TxHash string +} + +func (task *CascadeRegistrationTask) Register( + ctx context.Context, + req *RegisterRequest, + send func(resp *RegisterResponse) error, +) (err error) { + // Step 1: Correlate context and capture task identity + if req != nil && req.ActionID != "" { + ctx = logtrace.CtxWithCorrelationID(ctx, req.ActionID) + ctx = logtrace.CtxWithOrigin(ctx, "first_pass") + task.taskID = req.TaskID + } + + // Step 2: Log request and ensure uploaded file cleanup + fields := logtrace.Fields{logtrace.FieldMethod: "Register", logtrace.FieldRequest: req} + logtrace.Info(ctx, "register: request", fields) + defer func() { + if req != nil && req.FilePath != "" { + if remErr := os.RemoveAll(req.FilePath); remErr != nil { + logtrace.Warn(ctx, "Failed to remove uploaded file", fields) + } else { + logtrace.Debug(ctx, "Uploaded file cleaned up", fields) + } + } + }() + + // Step 3: Fetch the action details + action, err := task.fetchAction(ctx, req.ActionID, fields) + if err != nil { + return err + } + fields[logtrace.FieldBlockHeight] = action.BlockHeight + fields[logtrace.FieldCreator] = action.Creator + fields[logtrace.FieldStatus] = action.State + fields[logtrace.FieldPrice] = action.Price + logtrace.Info(ctx, "register: action fetched", fields) + task.streamEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", send) + + // Step 4: Verify action fee based on data size (rounded up to KB) + if err := task.verifyActionFee(ctx, action, req.DataSize, fields); err != nil { + return err + } + logtrace.Info(ctx, "register: fee verified", fields) + task.streamEvent(SupernodeEventTypeActionFeeVerified, "Action fee verified", "", send) + + // Step 5: Ensure this node is eligible (top supernode for block) + fields[logtrace.FieldSupernodeState] = task.SupernodeAccountAddress + if err := task.ensureIsTopSupernode(ctx, uint64(action.BlockHeight), fields); err != nil { + return err + } + logtrace.Info(ctx, "register: top supernode confirmed", fields) + task.streamEvent(SupernodeEventTypeTopSupernodeCheckPassed, "Top supernode eligibility confirmed", "", send) + + // Step 6: Decode Cascade metadata from the action + cascadeMeta, err := cascadekit.UnmarshalCascadeMetadata(action.Metadata) + if err != nil { + return task.wrapErr(ctx, "failed to unmarshal cascade metadata", err, fields) + } + logtrace.Info(ctx, "register: metadata decoded", fields) + task.streamEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", send) + + // Step 7: Verify request-provided data hash matches metadata + if err := cascadekit.VerifyB64DataHash(req.DataHash, cascadeMeta.DataHash); err != nil { + return err + } + logtrace.Debug(ctx, "request data-hash has been matched with the action data-hash", fields) + logtrace.Info(ctx, "register: data hash matched", fields) + task.streamEvent(SupernodeEventTypeDataHashVerified, "Data hash verified", "", send) + + // Step 8: Encode input using the RQ codec to produce layout and symbols + encodeResult, err := task.encodeInput(ctx, req.ActionID, req.FilePath, fields) + if err != nil { + return err + } + fields["symbols_dir"] = encodeResult.SymbolsDir + logtrace.Info(ctx, "register: input encoded", fields) + task.streamEvent(SupernodeEventTypeInputEncoded, "Input encoded", "", send) + + // Step 9: Verify index and layout signatures; produce layoutB64 + logtrace.Info(ctx, "register: verify+decode layout start", fields) + indexFile, layoutB64, vErr := task.validateIndexAndLayout(ctx, action.Creator, cascadeMeta.Signatures, encodeResult.Layout) + if vErr != nil { + return task.wrapErr(ctx, "signature or index validation failed", vErr, fields) + } + layoutSignatureB64 := indexFile.LayoutSignature + logtrace.Info(ctx, "register: signature verified", fields) + task.streamEvent(SupernodeEventTypeSignatureVerified, "Signature verified", "", send) + + // Step 10: Generate RQID files (layout and index) and compute IDs + rqIDs, idFiles, err := task.generateRQIDFiles(ctx, cascadeMeta, layoutSignatureB64, layoutB64, fields) + if err != nil { + return err + } + + // Calculate combined size of all index and layout files + totalSize := 0 + for _, file := range idFiles { + totalSize += len(file) + } + + fields["id_files_count"] = len(idFiles) + fields["rqids_count"] = len(rqIDs) + fields["combined_files_size_bytes"] = totalSize + fields["combined_files_size_kb"] = float64(totalSize) / 1024 + fields["combined_files_size_mb"] = float64(totalSize) / (1024 * 1024) + logtrace.Info(ctx, "register: rqid files generated", fields) + task.streamEvent(SupernodeEventTypeRQIDsGenerated, "RQID files generated", "", send) + + logtrace.Info(ctx, "register: rqids validated", fields) + task.streamEvent(SupernodeEventTypeRqIDsVerified, "RQIDs verified", "", send) + + // Step 11: Simulate finalize to ensure the tx will succeed + if _, err := task.LumeraClient.SimulateFinalizeAction(ctx, action.ActionID, rqIDs); err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Info(ctx, "register: finalize simulation failed", fields) + task.streamEvent(SupernodeEventTypeFinalizeSimulationFailed, "Finalize simulation failed", "", send) + return task.wrapErr(ctx, "finalize action simulation failed", err, fields) + } + logtrace.Info(ctx, "register: finalize simulation passed", fields) + task.streamEvent(SupernodeEventTypeFinalizeSimulated, "Finalize simulation passed", "", send) + + // Step 12: Store artefacts to the network store + if err := task.storeArtefacts(ctx, action.ActionID, idFiles, encodeResult.SymbolsDir, fields); err != nil { + return err + } + task.emitArtefactsStored(ctx, fields, encodeResult.Layout, send) + + // Step 13: Finalize the action on-chain + resp, err := task.LumeraClient.FinalizeAction(ctx, action.ActionID, rqIDs) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Info(ctx, "register: finalize action error", fields) + return task.wrapErr(ctx, "failed to finalize action", err, fields) + } + txHash := resp.TxResponse.TxHash + fields[logtrace.FieldTxHash] = txHash + logtrace.Info(ctx, "register: action finalized", fields) + task.streamEvent(SupernodeEventTypeActionFinalized, "Action finalized", txHash, send) + return nil +} diff --git a/supernode/cascade/service.go b/supernode/cascade/service.go new file mode 100644 index 00000000..29b047bd --- /dev/null +++ b/supernode/cascade/service.go @@ -0,0 +1,42 @@ +package cascade + +import ( + "context" + + "github.com/LumeraProtocol/supernode/v2/p2p" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" + "github.com/LumeraProtocol/supernode/v2/supernode/adaptors" +) + +type CascadeService struct { + LumeraClient adaptors.LumeraClient + P2P adaptors.P2PService + RQ adaptors.CodecService + P2PClient p2p.Client + SupernodeAccountAddress string +} + +// Compile-time checks to ensure CascadeService implements required interfaces +var _ CascadeServiceFactory = (*CascadeService)(nil) + +// NewCascadeRegistrationTask creates a new task for cascade registration +func (service *CascadeService) NewCascadeRegistrationTask() CascadeTask { + task := NewCascadeRegistrationTask(service) + return task +} + +// Run starts the service (no background workers) +func (service *CascadeService) Run(ctx context.Context) error { <-ctx.Done(); return nil } + +// NewCascadeService returns a new CascadeService instance +func NewCascadeService(supernodeAccountAddress string, lumera lumera.Client, p2pClient p2p.Client, codec codec.Codec, rqstore rqstore.Store) *CascadeService { + return &CascadeService{ + LumeraClient: adaptors.NewLumeraClient(lumera), + P2P: adaptors.NewP2PService(p2pClient, rqstore), + RQ: adaptors.NewCodecService(codec), + P2PClient: p2pClient, + SupernodeAccountAddress: supernodeAccountAddress, + } +} diff --git a/supernode/cascade/task.go b/supernode/cascade/task.go new file mode 100644 index 00000000..71725d20 --- /dev/null +++ b/supernode/cascade/task.go @@ -0,0 +1,20 @@ +package cascade + +// CascadeRegistrationTask is the task for cascade registration +type CascadeRegistrationTask struct { + *CascadeService + + taskID string +} + +var _ CascadeTask = (*CascadeRegistrationTask)(nil) + +// NewCascadeRegistrationTask returns a new Task instance +func NewCascadeRegistrationTask(service *CascadeService) *CascadeRegistrationTask { + return &CascadeRegistrationTask{CascadeService: service} +} + +// streamEvent sends a RegisterResponse via the provided callback. +func (task *CascadeRegistrationTask) streamEvent(eventType SupernodeEventType, msg, txHash string, send func(resp *RegisterResponse) error) { + _ = send(&RegisterResponse{EventType: eventType, Message: msg, TxHash: txHash}) +} diff --git a/supernode/cmd/config_update.go b/supernode/cmd/config_update.go index 91807962..3b3ff255 100644 --- a/supernode/cmd/config_update.go +++ b/supernode/cmd/config_update.go @@ -8,8 +8,8 @@ import ( "github.com/AlecAivazis/survey/v2" "github.com/LumeraProtocol/supernode/v2/pkg/keyring" "github.com/LumeraProtocol/supernode/v2/supernode/config" - "github.com/spf13/cobra" cKeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/spf13/cobra" ) // configUpdateCmd represents the config update command @@ -51,7 +51,7 @@ func promptParameterSelection() (string, error) { Message: "Select parameter to update:", Options: []string{ "Supernode IP Address", - "Supernode Port", + "Supernode Port", "Lumera GRPC Address", "Chain ID", "Key Name", @@ -197,7 +197,7 @@ func updateKeyringBackend() error { // Show warning fmt.Println("⚠️ WARNING: Changing keyring backend will switch to a different keyring.") fmt.Println("You will need to select a key from the new keyring or recover one.") - + var proceed bool confirmPrompt := &survey.Confirm{ Message: "Do you want to continue?", @@ -225,14 +225,14 @@ func updateKeyringBackend() error { // Update keyring backend in config appConfig.KeyringConfig.Backend = backend - + // Save config with new keyring backend if err := saveConfig(); err != nil { return err } fmt.Printf("Updated keyring backend to: %s\n", backend) - + // Reload config to get the new keyring settings cfgFile := filepath.Join(baseDir, DefaultConfigFile) reloadedConfig, err := config.LoadConfig(cfgFile, baseDir) @@ -269,7 +269,7 @@ func selectKeyFromNewKeyring(kr cKeyring.Keyring, keyInfos []*cKeyring.Record) e func selectKeyFromKeyring(kr cKeyring.Keyring, keyInfos []*cKeyring.Record) error { // Build options list with display format options := []string{} - + // Add existing keys for _, info := range keyInfos { addr, err := info.GetAddress() @@ -278,7 +278,7 @@ func selectKeyFromKeyring(kr cKeyring.Keyring, keyInfos []*cKeyring.Record) erro } options = append(options, fmt.Sprintf("%s (%s)", info.Name, addr.String())) } - + // Always add option to recover new key options = append(options, "Add new key (recover from mnemonic)") @@ -316,4 +316,4 @@ func saveConfig() error { func init() { configCmd.AddCommand(configUpdateCmd) -} \ No newline at end of file +} diff --git a/supernode/cmd/init.go b/supernode/cmd/init.go index 6412d848..a9d01cb5 100644 --- a/supernode/cmd/init.go +++ b/supernode/cmd/init.go @@ -15,7 +15,7 @@ import ( "github.com/AlecAivazis/survey/v2" "github.com/LumeraProtocol/supernode/v2/pkg/keyring" "github.com/LumeraProtocol/supernode/v2/supernode/config" - consmoskeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" + cKeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/spf13/cobra" ) @@ -36,15 +36,7 @@ var ( passphraseFile string ) -// Default configuration values -const ( - DefaultKeyringBackend = "test" - DefaultKeyName = "test-key" - DefaultSupernodeAddr = "0.0.0.0" - DefaultSupernodePort = 4444 - DefaultLumeraGRPC = "localhost:9090" - DefaultChainID = "testing" -) +// Default configuration values centralized in config package // InitInputs holds all user inputs for initialization type InitInputs struct { @@ -221,7 +213,7 @@ func gatherUserInputs() (InitInputs, error) { // Step 5a: Determine keyring backend (how keys are stored securely) // Options: 'test' (unencrypted), 'file' (encrypted file), 'os' (system keyring) - backend := DefaultKeyringBackend + backend := config.DefaultKeyringBackend if keyringBackendFlag != "" { backend = keyringBackendFlag } @@ -233,7 +225,7 @@ func gatherUserInputs() (InitInputs, error) { // Step 5b: Set the name for the cryptographic key // This name is used to reference the key in the keyring - keyName := DefaultKeyName + keyName := config.DefaultKeyName if keyNameFlag != "" { keyName = keyNameFlag @@ -245,7 +237,7 @@ func gatherUserInputs() (InitInputs, error) { // Step 5c: Configure the supernode's network binding address // Determines which network interface the supernode will listen on - supernodeAddr := DefaultSupernodeAddr + supernodeAddr := config.DefaultSupernodeHost if supernodeAddrFlag != "" { supernodeAddr = supernodeAddrFlag @@ -256,7 +248,7 @@ func gatherUserInputs() (InitInputs, error) { } // Step 5d: Set the port for supernode peer-to-peer communication - supernodePort := DefaultSupernodePort + supernodePort := int(config.DefaultSupernodePort) if supernodePortFlag != 0 { supernodePort = supernodePortFlag @@ -268,7 +260,7 @@ func gatherUserInputs() (InitInputs, error) { // Step 5e: Configure connection to the Lumera blockchain node // This is the GRPC endpoint for blockchain interactions - lumeraGRPC := DefaultLumeraGRPC + lumeraGRPC := config.DefaultLumeraGRPC if lumeraGrpcFlag != "" { lumeraGRPC = lumeraGrpcFlag @@ -280,7 +272,7 @@ func gatherUserInputs() (InitInputs, error) { // Step 5f: Set the blockchain network identifier // Must match the chain ID of the Lumera network you're connecting to - chainID := DefaultChainID + chainID := config.DefaultChainID if chainIDFlag != "" { chainID = chainIDFlag } @@ -419,7 +411,7 @@ func setupKeyring(keyName string, shouldRecover bool, mnemonic string) (string, } // recoverExistingKey handles the recovery of an existing key from mnemonic -func recoverExistingKey(kr consmoskeyring.Keyring, keyName, mnemonic string) (string, error) { +func recoverExistingKey(kr cKeyring.Keyring, keyName, mnemonic string) (string, error) { // Process and validate mnemonic using helper function processedMnemonic, err := processAndValidateMnemonic(mnemonic) if err != nil { @@ -444,7 +436,7 @@ func recoverExistingKey(kr consmoskeyring.Keyring, keyName, mnemonic string) (st } // createNewKey handles the creation of a new key -func createNewKey(kr consmoskeyring.Keyring, keyName string) (string, string, error) { +func createNewKey(kr cKeyring.Keyring, keyName string) (string, string, error) { // Generate mnemonic and create new account keyMnemonic, _, err := keyring.CreateNewAccount(kr, keyName) if err != nil { @@ -497,7 +489,7 @@ func promptKeyringBackend(passedBackend string) (string, error) { } backend = passedBackend } else { - backend = DefaultKeyringBackend + backend = config.DefaultKeyringBackend } prompt := &survey.Select{ Message: "Choose keyring backend:", @@ -565,24 +557,24 @@ func promptNetworkConfig(passedAddrs string, passedPort int, passedGRPC, passedC if passedAddrs != "" { supernodeAddr = passedAddrs } else { - supernodeAddr = DefaultSupernodeAddr + supernodeAddr = config.DefaultSupernodeHost } var port string if passedPort != 0 { port = fmt.Sprintf("%d", passedPort) } else { - port = fmt.Sprintf("%d", DefaultSupernodePort) + port = fmt.Sprintf("%d", config.DefaultSupernodePort) } if passedGRPC != "" { lumeraGrpcAddr = passedGRPC } else { - lumeraGrpcAddr = DefaultLumeraGRPC + lumeraGrpcAddr = config.DefaultLumeraGRPC } if passedChainID != "" { chainID = passedChainID } else { - chainID = DefaultChainID + chainID = config.DefaultChainID } // Supernode IP address @@ -618,7 +610,6 @@ func promptNetworkConfig(passedAddrs string, passedPort int, passedGRPC, passedC return "", 0, "", "", fmt.Errorf("invalid supernode port: %s", portStr) } - // Lumera GRPC address (full address with port) lumeraPrompt := &survey.Input{ Message: "Enter Lumera GRPC address:", diff --git a/supernode/cmd/service.go b/supernode/cmd/service.go index d4af1269..8cd8708f 100644 --- a/supernode/cmd/service.go +++ b/supernode/cmd/service.go @@ -23,7 +23,7 @@ func RunServices(ctx context.Context, services ...service) error { if err != nil { logtrace.Error(ctx, "service stopped with an error", logtrace.Fields{"service": reflect.TypeOf(service).String(), "error": err}) } else { - logtrace.Info(ctx, "service stopped", logtrace.Fields{"service": reflect.TypeOf(service).String()}) + logtrace.Debug(ctx, "service stopped", logtrace.Fields{"service": reflect.TypeOf(service).String()}) } return err }) diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index 31c19b2a..e7405f47 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -3,13 +3,12 @@ package cmd import ( "context" "fmt" - "net/http" - _ "net/http/pprof" "os" "os/signal" "path/filepath" "strings" "syscall" + "time" "github.com/LumeraProtocol/supernode/v2/p2p" "github.com/LumeraProtocol/supernode/v2/p2p/kademlia/store/cloud" @@ -17,18 +16,26 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/codec" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + grpcserver "github.com/LumeraProtocol/supernode/v2/pkg/net/grpc/server" "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" + "github.com/LumeraProtocol/supernode/v2/pkg/task" + cascadeService "github.com/LumeraProtocol/supernode/v2/supernode/cascade" "github.com/LumeraProtocol/supernode/v2/supernode/config" - "github.com/LumeraProtocol/supernode/v2/supernode/node/action/server/cascade" - "github.com/LumeraProtocol/supernode/v2/supernode/node/supernode/gateway" - "github.com/LumeraProtocol/supernode/v2/supernode/node/supernode/server" - cascadeService "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" - supernodeService "github.com/LumeraProtocol/supernode/v2/supernode/services/common/supernode" - "github.com/LumeraProtocol/supernode/v2/supernode/services/verifier" + statusService "github.com/LumeraProtocol/supernode/v2/supernode/status" + "github.com/LumeraProtocol/supernode/v2/supernode/transport/gateway" + cascadeRPC "github.com/LumeraProtocol/supernode/v2/supernode/transport/grpc/cascade" + server "github.com/LumeraProtocol/supernode/v2/supernode/transport/grpc/status" + "github.com/LumeraProtocol/supernode/v2/supernode/verifier" cKeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/spf13/cobra" + + pbcascade "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" + + pbsupernode "github.com/LumeraProtocol/supernode/v2/gen/supernode" + + // Configure DHT advertised/minimum versions from build-time variables + "github.com/LumeraProtocol/supernode/v2/p2p/kademlia" ) // startCmd represents the start command @@ -41,12 +48,22 @@ The supernode will connect to the Lumera network and begin participating in the // Initialize logging logtrace.Setup("supernode") + // Advertise our binary version to peers + kademlia.SetLocalVersion(Version) + // Optionally enforce a minimum peer version if provided at build time + if strings.TrimSpace(MinVer) != "" { + kademlia.SetMinVersion(MinVer) + } + // Create context with correlation ID for tracing ctx := logtrace.CtxWithCorrelationID(context.Background(), "supernode-start") + // Make the context cancelable for graceful shutdown + ctx, cancel := context.WithCancel(ctx) + defer cancel() // Log configuration info cfgFile := filepath.Join(baseDir, DefaultConfigFile) - logtrace.Info(ctx, "Starting supernode with configuration", logtrace.Fields{"config_file": cfgFile, "keyring_dir": appConfig.GetKeyringDir(), "key_name": appConfig.SupernodeConfig.KeyName}) + logtrace.Debug(ctx, "Starting supernode with configuration", logtrace.Fields{"config_file": cfgFile, "keyring_dir": appConfig.GetKeyringDir(), "key_name": appConfig.SupernodeConfig.KeyName}) // Initialize keyring kr, err := initKeyringFromConfig(appConfig) @@ -61,22 +78,33 @@ The supernode will connect to the Lumera network and begin participating in the } // Verify config matches chain registration before starting services - logtrace.Info(ctx, "Verifying configuration against chain registration", logtrace.Fields{}) + logtrace.Debug(ctx, "Verifying configuration against chain registration", logtrace.Fields{}) configVerifier := verifier.NewConfigVerifier(appConfig, lumeraClient, kr) verificationResult, err := configVerifier.VerifyConfig(ctx) - if err != nil { - logtrace.Fatal(ctx, "Config verification failed", logtrace.Fields{"error": err.Error()}) - } - - if !verificationResult.IsValid() { - logtrace.Fatal(ctx, "Config verification failed", logtrace.Fields{"summary": verificationResult.Summary()}) + if err != nil || (verificationResult != nil && !verificationResult.IsValid()) { + logFields := logtrace.Fields{} + if err != nil { + logFields["error"] = err.Error() + } + if verificationResult != nil && !verificationResult.IsValid() { + logFields["summary"] = verificationResult.Summary() + } + logtrace.Fatal(ctx, "Config verification failed", logFields) } if verificationResult.HasWarnings() { logtrace.Warn(ctx, "Config verification warnings", logtrace.Fields{"summary": verificationResult.Summary()}) } - logtrace.Info(ctx, "Configuration verification successful", logtrace.Fields{}) + logtrace.Debug(ctx, "Configuration verification successful", logtrace.Fields{}) + + // Set Datadog host to identity and service to latest IP address from chain + logtrace.SetDatadogHost(appConfig.SupernodeConfig.Identity) + if snInfo, err := lumeraClient.SuperNode().GetSupernodeWithLatestAddress(ctx, appConfig.SupernodeConfig.Identity); err == nil && snInfo != nil { + if ip := strings.TrimSpace(snInfo.LatestAddress); ip != "" { + logtrace.SetDatadogService(ip) + } + } // Initialize RaptorQ store for Cascade processing rqStore, err := initRQStore(ctx, appConfig) @@ -90,95 +118,108 @@ The supernode will connect to the Lumera network and begin participating in the logtrace.Fatal(ctx, "Failed to initialize P2P service", logtrace.Fields{"error": err.Error()}) } - // Initialize the supernode - supernodeInstance, err := NewSupernode(ctx, appConfig, kr, p2pService, rqStore, lumeraClient) - if err != nil { - logtrace.Fatal(ctx, "Failed to initialize supernode", logtrace.Fields{"error": err.Error()}) - } + // Supernode wrapper removed; components are managed directly // Configure cascade service cService := cascadeService.NewCascadeService( - &cascadeService.Config{ - Config: common.Config{ - SupernodeAccountAddress: appConfig.SupernodeConfig.Identity, - }, - RqFilesDir: appConfig.GetRaptorQFilesDir(), - }, + appConfig.SupernodeConfig.Identity, lumeraClient, - *p2pService, + p2pService, codec.NewRaptorQCodec(appConfig.GetRaptorQFilesDir()), rqStore, ) - // Create cascade action server - cascadeActionServer := cascade.NewCascadeActionServer(cService) + // Create a task tracker and cascade action server with DI + tr := task.New() + cascadeActionServer := cascadeRPC.NewCascadeActionServer(cService, tr, 0, 0) // Set the version in the status service package - supernodeService.Version = Version + statusService.Version = Version - // Create supernode status service - statusService := supernodeService.NewSupernodeStatusService(*p2pService, lumeraClient, appConfig) - statusService.RegisterTaskProvider(cService) + // Create supernode status service with injected tracker + statusSvc := statusService.NewSupernodeStatusService(p2pService, lumeraClient, appConfig, tr) // Create supernode server - supernodeServer := server.NewSupernodeServer(statusService) - - // Configure server - serverConfig := &server.Config{ - Identity: appConfig.SupernodeConfig.Identity, - ListenAddresses: appConfig.SupernodeConfig.Host, - Port: int(appConfig.SupernodeConfig.Port), - } - - // Create gRPC server - grpcServer, err := server.New(serverConfig, "service", kr, lumeraClient, cascadeActionServer, supernodeServer) + supernodeServer := server.NewSupernodeServer(statusSvc) + + // Create gRPC server (explicit args, no config struct) + grpcServer, err := server.New( + appConfig.SupernodeConfig.Identity, + appConfig.SupernodeConfig.Host, + int(appConfig.SupernodeConfig.Port), + "service", + kr, + lumeraClient, + grpcserver.ServiceDesc{Desc: &pbcascade.CascadeService_ServiceDesc, Service: cascadeActionServer}, + grpcserver.ServiceDesc{Desc: &pbsupernode.SupernodeService_ServiceDesc, Service: supernodeServer}, + ) if err != nil { logtrace.Fatal(ctx, "Failed to create gRPC server", logtrace.Fields{"error": err.Error()}) } // Create HTTP gateway server that directly calls the supernode server - gatewayServer, err := gateway.NewServer(appConfig.SupernodeConfig.Host, int(appConfig.SupernodeConfig.GatewayPort), supernodeServer) + // Pass chain ID for pprof configuration + gatewayServer, err := gateway.NewServerWithConfig( + appConfig.SupernodeConfig.Host, + int(appConfig.SupernodeConfig.GatewayPort), + supernodeServer, + appConfig.LumeraClientConfig.ChainID, + ) if err != nil { return fmt.Errorf("failed to create gateway server: %w", err) } - // Start profiling server on testnet only - isTestnet := strings.Contains(strings.ToLower(appConfig.LumeraClientConfig.ChainID), "testnet") + // Start the services using the standard runner and capture exit + servicesErr := make(chan error, 1) + go func() { servicesErr <- RunServices(ctx, grpcServer, cService, p2pService, gatewayServer) }() - if isTestnet { - profilingAddr := "0.0.0.0:6060" + // Set up signal handling for graceful shutdown + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + defer signal.Stop(sigCh) + + // Wait for either a termination signal or service exit + var triggeredBySignal bool + var runErr error + select { + case sig := <-sigCh: + triggeredBySignal = true + logtrace.Debug(ctx, "Received signal, shutting down", logtrace.Fields{"signal": sig.String()}) + case runErr = <-servicesErr: + if runErr != nil { + logtrace.Error(ctx, "Service error", logtrace.Fields{"error": runErr.Error()}) + } else { + logtrace.Debug(ctx, "Services exited", logtrace.Fields{}) + } + } - logtrace.Info(ctx, "Starting profiling server", logtrace.Fields{ - "address": profilingAddr, - "chain_id": appConfig.LumeraClientConfig.ChainID, - "is_testnet": isTestnet, - }) + // Cancel context to signal all services + cancel() - go func() { - if err := http.ListenAndServe(profilingAddr, nil); err != nil { - logtrace.Error(ctx, "Profiling server error", logtrace.Fields{"error": err.Error()}) - } - }() - } + // Stop HTTP gateway and gRPC servers without blocking shutdown + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 10*time.Second) + defer shutdownCancel() - // Start the services go func() { - if err := RunServices(ctx, grpcServer, cService, *p2pService, gatewayServer); err != nil { - logtrace.Error(ctx, "Service error", logtrace.Fields{"error": err.Error()}) + if err := gatewayServer.Stop(shutdownCtx); err != nil { + logtrace.Warn(ctx, "Gateway shutdown warning", logtrace.Fields{"error": err.Error()}) } }() + grpcServer.Close() - // Set up signal handling for graceful shutdown - sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) - - // Wait for termination signal - sig := <-sigCh - logtrace.Info(ctx, "Received signal, shutting down", logtrace.Fields{"signal": sig.String()}) + // Close Lumera client without blocking shutdown + logtrace.Debug(ctx, "Closing Lumera client", logtrace.Fields{}) + go func() { + if err := lumeraClient.Close(); err != nil { + logtrace.Error(ctx, "Error closing Lumera client", logtrace.Fields{"error": err.Error()}) + } + }() - // Graceful shutdown - if err := supernodeInstance.Stop(ctx); err != nil { - logtrace.Error(ctx, "Error during shutdown", logtrace.Fields{"error": err.Error()}) + // If we triggered shutdown by signal, wait for services to drain + if triggeredBySignal { + if err := <-servicesErr; err != nil { + logtrace.Error(ctx, "Service error on shutdown", logtrace.Fields{"error": err.Error()}) + } } return nil @@ -190,7 +231,7 @@ func init() { } // initP2PService initializes the P2P service -func initP2PService(ctx context.Context, config *config.Config, lumeraClient lumera.Client, kr cKeyring.Keyring, rqStore rqstore.Store, cloud cloud.Storage, mst *sqlite.MigrationMetaStore) (*p2p.P2P, error) { +func initP2PService(ctx context.Context, config *config.Config, lumeraClient lumera.Client, kr cKeyring.Keyring, rqStore rqstore.Store, cloud cloud.Storage, mst *sqlite.MigrationMetaStore) (p2p.P2P, error) { // Get the supernode address from the keyring keyInfo, err := kr.Key(config.SupernodeConfig.KeyName) if err != nil { @@ -204,12 +245,51 @@ func initP2PService(ctx context.Context, config *config.Config, lumeraClient lum // Create P2P config using helper function p2pConfig := createP2PConfig(config, address.String()) - logtrace.Info(ctx, "Initializing P2P service", logtrace.Fields{"address": p2pConfig.ListenAddress, "port": p2pConfig.Port, "data_dir": p2pConfig.DataDir, "supernode_id": address.String()}) + logtrace.Debug(ctx, "Initializing P2P service", logtrace.Fields{"address": p2pConfig.ListenAddress, "port": p2pConfig.Port, "data_dir": p2pConfig.DataDir, "supernode_id": address.String()}) p2pService, err := p2p.New(ctx, p2pConfig, lumeraClient, kr, rqStore, cloud, mst) if err != nil { return nil, fmt.Errorf("failed to initialize p2p service: %w", err) } - return &p2pService, nil + return p2pService, nil +} + +// initLumeraClient initializes the Lumera client based on configuration +func initLumeraClient(ctx context.Context, config *config.Config, kr cKeyring.Keyring) (lumera.Client, error) { + if config == nil { + return nil, fmt.Errorf("config is nil") + } + + lumeraConfig, err := lumera.NewConfig(config.LumeraClientConfig.GRPCAddr, config.LumeraClientConfig.ChainID, config.SupernodeConfig.KeyName, kr) + if err != nil { + return nil, fmt.Errorf("failed to create Lumera config: %w", err) + } + return lumera.NewClient( + ctx, + lumeraConfig, + ) +} + +// initRQStore initializes the RaptorQ store for Cascade processing +func initRQStore(ctx context.Context, config *config.Config) (rqstore.Store, error) { + if config == nil { + return nil, fmt.Errorf("config is nil") + } + + // Create RaptorQ store directory if it doesn't exist + rqDir := config.GetRaptorQFilesDir() + "/rq" + if err := os.MkdirAll(rqDir, 0700); err != nil { + return nil, fmt.Errorf("failed to create RQ store directory: %w", err) + } + + // Create the SQLite file path + rqStoreFile := rqDir + "/rqstore.db" + + logtrace.Debug(ctx, "Initializing RaptorQ store", logtrace.Fields{ + "file_path": rqStoreFile, + }) + + // Initialize RaptorQ store with SQLite + return rqstore.NewSQLiteRQStore(rqStoreFile) } diff --git a/supernode/cmd/supernode.go b/supernode/cmd/supernode.go deleted file mode 100644 index 19a65718..00000000 --- a/supernode/cmd/supernode.go +++ /dev/null @@ -1,140 +0,0 @@ -package cmd - -import ( - "context" - "fmt" - "os" - - "github.com/LumeraProtocol/supernode/v2/p2p" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" - "github.com/LumeraProtocol/supernode/v2/supernode/config" - "github.com/cosmos/cosmos-sdk/crypto/keyring" -) - -// Supernode represents a supernode in the Lumera network -type Supernode struct { - config *config.Config - lumeraClient lumera.Client - p2pService p2p.P2P - keyring keyring.Keyring - rqStore rqstore.Store - keyName string // String that represents the supernode account in keyring -} - -// NewSupernode creates a new supernode instance -func NewSupernode(ctx context.Context, config *config.Config, kr keyring.Keyring, - p2pClient *p2p.P2P, rqStore rqstore.Store, lumeraClient lumera.Client) (*Supernode, error) { - - if config == nil { - return nil, fmt.Errorf("config is nil") - } - - supernode := &Supernode{ - config: config, - lumeraClient: lumeraClient, - keyring: kr, - rqStore: rqStore, - p2pService: *p2pClient, - keyName: config.SupernodeConfig.KeyName, - } - - return supernode, nil -} - -// Start starts all supernode services -func (s *Supernode) Start(ctx context.Context) error { - // Verify that the key specified in config exists - keyInfo, err := s.keyring.Key(s.config.SupernodeConfig.KeyName) - if err != nil { - logtrace.Error(ctx, "Key not found in keyring", logtrace.Fields{ - "key_name": s.config.SupernodeConfig.KeyName, - "error": err.Error(), - }) - - // Provide helpful guidance - fmt.Printf("\nError: Key '%s' not found in keyring at %s\n", - s.config.SupernodeConfig.KeyName, s.config.GetKeyringDir()) - fmt.Println("\nPlease create the key first with one of these commands:") - fmt.Printf(" supernode keys add %s\n", s.config.SupernodeConfig.KeyName) - fmt.Printf(" supernode keys recover %s\n", s.config.SupernodeConfig.KeyName) - return fmt.Errorf("key not found") - } - - // Get the account address for logging - address, err := keyInfo.GetAddress() - if err != nil { - logtrace.Error(ctx, "Failed to get address from key", logtrace.Fields{ - "error": err.Error(), - }) - return err - } - - logtrace.Info(ctx, "Found valid key in keyring", logtrace.Fields{ - "key_name": s.config.SupernodeConfig.KeyName, - "address": address.String(), - }) - - // Use the P2P service that was passed in via constructor - logtrace.Info(ctx, "Starting P2P service", logtrace.Fields{}) - if err := s.p2pService.Run(ctx); err != nil { - return fmt.Errorf("p2p service error: %w", err) - } - - return nil -} - -// Stop stops all supernode services -func (s *Supernode) Stop(ctx context.Context) error { - // Close the Lumera client connection - if s.lumeraClient != nil { - logtrace.Info(ctx, "Closing Lumera client", logtrace.Fields{}) - if err := s.lumeraClient.Close(); err != nil { - logtrace.Error(ctx, "Error closing Lumera client", logtrace.Fields{ - "error": err.Error(), - }) - } - } - - return nil -} - -// initLumeraClient initializes the Lumera client based on configuration -func initLumeraClient(ctx context.Context, config *config.Config, kr keyring.Keyring) (lumera.Client, error) { - if config == nil { - return nil, fmt.Errorf("config is nil") - } - - lumeraConfig, err := lumera.NewConfig(config.LumeraClientConfig.GRPCAddr, config.LumeraClientConfig.ChainID, config.SupernodeConfig.KeyName, kr) - if err != nil { - return nil, fmt.Errorf("failed to create Lumera config: %w", err) - } - return lumera.NewClient( - ctx, - lumeraConfig, - ) -} - -// initRQStore initializes the RaptorQ store for Cascade processing -func initRQStore(ctx context.Context, config *config.Config) (rqstore.Store, error) { - if config == nil { - return nil, fmt.Errorf("config is nil") - } - - // Create RaptorQ store directory if it doesn't exist - rqDir := config.GetRaptorQFilesDir() + "/rq" - if err := os.MkdirAll(rqDir, 0700); err != nil { - return nil, fmt.Errorf("failed to create RQ store directory: %w", err) - } - - // Create the SQLite file path - rqStoreFile := rqDir + "/rqstore.db" - - logtrace.Info(ctx, "Initializing RaptorQ store", logtrace.Fields{ - "file_path": rqStoreFile, - }) - - // Initialize RaptorQ store with SQLite - return rqstore.NewSQLiteRQStore(rqStoreFile) -} diff --git a/supernode/cmd/version.go b/supernode/cmd/version.go index e6d085d8..9daaabc8 100644 --- a/supernode/cmd/version.go +++ b/supernode/cmd/version.go @@ -11,6 +11,8 @@ var ( Version = "dev" GitCommit = "unknown" BuildTime = "unknown" + // Optional: minimum peer version for DHT gating (empty disables gating) + MinVer = "" ) // versionCmd represents the version command diff --git a/supernode/config.yml b/supernode/config.yml index 3bbf8b7e..35d888a3 100644 --- a/supernode/config.yml +++ b/supernode/config.yml @@ -2,7 +2,9 @@ supernode: key_name: "mykey" # Account name for the supernode in keyring identity: "lumera1ccmw5plzuldntum2rz6kq6uq346vtrhrvwfzsa" # Identity of the supernode, lumera address + # You can set either 'host' (preferred) or 'ip_address' (legacy alias). host: "0.0.0.0" + # ip_address: "0.0.0.0" port: 4444 # Keyring Configuration diff --git a/supernode/config/config.go b/supernode/config/config.go index e3910ac2..d655391c 100644 --- a/supernode/config/config.go +++ b/supernode/config/config.go @@ -5,15 +5,18 @@ import ( "fmt" "os" "path/filepath" + "strings" "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "gopkg.in/yaml.v3" ) type SupernodeConfig struct { - KeyName string `yaml:"key_name"` - Identity string `yaml:"identity"` - Host string `yaml:"host"` + KeyName string `yaml:"key_name"` + Identity string `yaml:"identity"` + Host string `yaml:"host"` + // IPAddress is an accepted alias for Host to support older configs + IPAddress string `yaml:"ip_address,omitempty"` Port uint16 `yaml:"port"` GatewayPort uint16 `yaml:"gateway_port,omitempty"` } @@ -127,6 +130,15 @@ func LoadConfig(filename string, baseDir string) (*Config, error) { return nil, fmt.Errorf("error parsing config file: %w", err) } + // Support both 'host' and legacy 'ip_address' fields. If 'host' is empty + // and 'ip_address' is provided, use it as the host value. + if strings.TrimSpace(config.SupernodeConfig.Host) == "" && strings.TrimSpace(config.SupernodeConfig.IPAddress) != "" { + config.SupernodeConfig.Host = strings.TrimSpace(config.SupernodeConfig.IPAddress) + logtrace.Debug(ctx, "Using ip_address as host", logtrace.Fields{ + "ip_address": config.SupernodeConfig.IPAddress, + }) + } + // Set the base directory config.BaseDir = baseDir diff --git a/supernode/config/defaults.go b/supernode/config/defaults.go new file mode 100644 index 00000000..d7915259 --- /dev/null +++ b/supernode/config/defaults.go @@ -0,0 +1,15 @@ +package config + +// Centralized default values for configuration + +const ( + DefaultKeyringBackend = "test" + DefaultKeyringDir = "keys" + DefaultKeyName = "test-key" + DefaultSupernodeHost = "0.0.0.0" + DefaultSupernodePort = 4444 + DefaultP2PPort = 4445 + DefaultLumeraGRPC = "localhost:9090" + DefaultChainID = "testing" + DefaultRaptorQFilesDir = "raptorq_files" +) diff --git a/supernode/config/save.go b/supernode/config/save.go index 5199fb81..d93e6cb8 100644 --- a/supernode/config/save.go +++ b/supernode/config/save.go @@ -32,21 +32,21 @@ func SaveConfig(config *Config, filename string) error { // CreateDefaultConfig creates a default configuration with the specified values func CreateDefaultConfig(keyName, identity, chainID string, keyringBackend, keyringDir string, passPlain, passEnv, passFile string) *Config { - // Set default values if keyringBackend == "" { - keyringBackend = "test" + keyringBackend = DefaultKeyringBackend } if keyringDir == "" { - keyringDir = "keys" + keyringDir = DefaultKeyringDir + } + if keyName == "" { + keyName = DefaultKeyName + } + if chainID == "" { + chainID = DefaultChainID } return &Config{ - SupernodeConfig: SupernodeConfig{ - KeyName: keyName, - Identity: identity, - Host: "0.0.0.0", - Port: 4444, - }, + SupernodeConfig: SupernodeConfig{KeyName: keyName, Identity: identity, Host: DefaultSupernodeHost, Port: DefaultSupernodePort}, KeyringConfig: KeyringConfig{ Backend: keyringBackend, Dir: keyringDir, @@ -54,16 +54,8 @@ func CreateDefaultConfig(keyName, identity, chainID string, keyringBackend, keyr PassEnv: passEnv, PassFile: passFile, }, - P2PConfig: P2PConfig{ - Port: 4445, - DataDir: "data/p2p", - }, - LumeraClientConfig: LumeraClientConfig{ - GRPCAddr: "localhost:9090", - ChainID: chainID, - }, - RaptorQConfig: RaptorQConfig{ - FilesDir: "raptorq_files", - }, + P2PConfig: P2PConfig{Port: DefaultP2PPort, DataDir: "data/p2p"}, + LumeraClientConfig: LumeraClientConfig{GRPCAddr: DefaultLumeraGRPC, ChainID: chainID}, + RaptorQConfig: RaptorQConfig{FilesDir: DefaultRaptorQFilesDir}, } } diff --git a/supernode/node/action/server/cascade/cascade_action_server.go b/supernode/node/action/server/cascade/cascade_action_server.go deleted file mode 100644 index a99fbf0a..00000000 --- a/supernode/node/action/server/cascade/cascade_action_server.go +++ /dev/null @@ -1,355 +0,0 @@ -package cascade - -import ( - "encoding/hex" - "fmt" - "io" - "os" - - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - cascadeService "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade" - - "google.golang.org/grpc" -) - -type ActionServer struct { - pb.UnimplementedCascadeServiceServer - factory cascadeService.CascadeServiceFactory -} - -// NewCascadeActionServer creates a new CascadeActionServer with injected service -func NewCascadeActionServer(factory cascadeService.CascadeServiceFactory) *ActionServer { - return &ActionServer{factory: factory} -} - -// calculateOptimalChunkSize returns an optimal chunk size based on file size -// to balance throughput and memory usage -func calculateOptimalChunkSize(fileSize int64) int { - const ( - minChunkSize = 64 * 1024 // 64 KB minimum - maxChunkSize = 4 * 1024 * 1024 // 4 MB maximum for 1GB+ files - smallFileThreshold = 1024 * 1024 // 1 MB - mediumFileThreshold = 50 * 1024 * 1024 // 50 MB - largeFileThreshold = 500 * 1024 * 1024 // 500 MB - ) - - var chunkSize int - - switch { - case fileSize <= smallFileThreshold: - // For small files (up to 1MB), use 64KB chunks - chunkSize = minChunkSize - case fileSize <= mediumFileThreshold: - // For medium files (1MB-50MB), use 256KB chunks - chunkSize = 256 * 1024 - case fileSize <= largeFileThreshold: - // For large files (50MB-500MB), use 1MB chunks - chunkSize = 1024 * 1024 - default: - // For very large files (500MB+), use 4MB chunks for optimal throughput - chunkSize = maxChunkSize - } - - // Ensure chunk size is within bounds - if chunkSize < minChunkSize { - chunkSize = minChunkSize - } - if chunkSize > maxChunkSize { - chunkSize = maxChunkSize - } - - return chunkSize -} - -func (server *ActionServer) Desc() *grpc.ServiceDesc { - return &pb.CascadeService_ServiceDesc -} - -func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) error { - fields := logtrace.Fields{ - logtrace.FieldMethod: "Register", - logtrace.FieldModule: "CascadeActionServer", - } - - ctx := stream.Context() - logtrace.Info(ctx, "client streaming request to upload cascade input data received", fields) - - const maxFileSize = 1 * 1024 * 1024 * 1024 // 1GB limit - - var ( - metadata *pb.Metadata - totalSize int - ) - - hasher, tempFile, tempFilePath, err := initializeHasherAndTempFile() - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to initialize hasher and temp file", fields) - return fmt.Errorf("initializing hasher and temp file: %w", err) - } - defer func(tempFile *os.File) { - err := tempFile.Close() - if err != nil && !errors.Is(err, os.ErrClosed) { - fields[logtrace.FieldError] = err.Error() - logtrace.Warn(ctx, "error closing temp file", fields) - } - }(tempFile) - - // Process incoming stream - for { - req, err := stream.Recv() - if err == io.EOF { - // End of stream - break - } - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "error receiving stream data", fields) - return fmt.Errorf("failed to receive stream data: %w", err) - } - - // Check which type of message we received - switch x := req.RequestType.(type) { - case *pb.RegisterRequest_Chunk: - if x.Chunk != nil { - - // hash the chunks - _, err := hasher.Write(x.Chunk.Data) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to write chunk to hasher", fields) - return fmt.Errorf("hashing error: %w", err) - } - - // write chunks to the file - _, err = tempFile.Write(x.Chunk.Data) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to write chunk to file", fields) - return fmt.Errorf("file write error: %w", err) - } - totalSize += len(x.Chunk.Data) - - // Validate total size doesn't exceed limit - if totalSize > maxFileSize { - fields[logtrace.FieldError] = "file size exceeds 1GB limit" - fields["total_size"] = totalSize - logtrace.Error(ctx, "upload rejected: file too large", fields) - return fmt.Errorf("file size %d exceeds maximum allowed size of 1GB", totalSize) - } - - logtrace.Info(ctx, "received data chunk", logtrace.Fields{ - "chunk_size": len(x.Chunk.Data), - "total_size_so_far": totalSize, - }) - } - case *pb.RegisterRequest_Metadata: - // Store metadata - this should be the final message - metadata = x.Metadata - logtrace.Info(ctx, "received metadata", logtrace.Fields{ - "task_id": metadata.TaskId, - "action_id": metadata.ActionId, - }) - } - } - - // Verify we received metadata - if metadata == nil { - logtrace.Error(ctx, "no metadata received in stream", fields) - return fmt.Errorf("no metadata received") - } - fields[logtrace.FieldTaskID] = metadata.GetTaskId() - fields[logtrace.FieldActionID] = metadata.GetActionId() - logtrace.Info(ctx, "metadata received from action-sdk", fields) - - // Ensure all data is written to disk before calculating hash - if err := tempFile.Sync(); err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to sync temp file", fields) - return fmt.Errorf("failed to sync temp file: %w", err) - } - - hash := hasher.Sum(nil) - hashHex := hex.EncodeToString(hash) - fields[logtrace.FieldHashHex] = hashHex - logtrace.Info(ctx, "final BLAKE3 hash generated", fields) - - targetPath, err := replaceTempDirWithTaskDir(metadata.GetTaskId(), tempFilePath, tempFile) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to replace temp dir with task dir", fields) - return fmt.Errorf("failed to replace temp dir with task dir: %w", err) - } - - // Process the complete data - task := server.factory.NewCascadeRegistrationTask() - err = task.Register(ctx, &cascadeService.RegisterRequest{ - TaskID: metadata.TaskId, - ActionID: metadata.ActionId, - DataHash: hash, - DataSize: totalSize, - FilePath: targetPath, - }, func(resp *cascadeService.RegisterResponse) error { - grpcResp := &pb.RegisterResponse{ - EventType: pb.SupernodeEventType(resp.EventType), - Message: resp.Message, - TxHash: resp.TxHash, - } - if err := stream.Send(grpcResp); err != nil { - logtrace.Error(ctx, "failed to send response to client", logtrace.Fields{ - logtrace.FieldError: err.Error(), - }) - return err - } - return nil - }) - - if err != nil { - logtrace.Error(ctx, "registration task failed", logtrace.Fields{ - logtrace.FieldError: err.Error(), - }) - return fmt.Errorf("registration failed: %w", err) - } - - logtrace.Info(ctx, "cascade registration completed successfully", fields) - return nil -} - -func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeService_DownloadServer) error { - fields := logtrace.Fields{ - logtrace.FieldMethod: "Download", - logtrace.FieldModule: "CascadeActionServer", - logtrace.FieldActionID: req.GetActionId(), - } - - ctx := stream.Context() - logtrace.Info(ctx, "download request received from client", fields) - - task := server.factory.NewCascadeRegistrationTask() - - // Verify signature if provided - if req.GetSignature() != "" { - // Cast to concrete type to access helper method - if cascadeTask, ok := task.(*cascadeService.CascadeRegistrationTask); ok { - err := cascadeTask.VerifyDownloadSignature(ctx, req.GetActionId(), req.GetSignature()) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "signature verification failed", fields) - return fmt.Errorf("signature verification failed: %w", err) - } - } else { - logtrace.Error(ctx, "unable to cast task to CascadeRegistrationTask", fields) - return fmt.Errorf("unable to verify signature: task type assertion failed") - } - } - - var restoredFilePath string - var tmpDir string - - // Ensure tmpDir is cleaned up even if errors occur after retrieval - defer func() { - if tmpDir != "" { - if err := task.CleanupDownload(ctx, tmpDir); err != nil { - logtrace.Error(ctx, "error cleaning up the tmp dir", logtrace.Fields{logtrace.FieldError: err.Error()}) - } else { - logtrace.Info(ctx, "tmp dir has been cleaned up", logtrace.Fields{"tmp_dir": tmpDir}) - } - } - }() - - err := task.Download(ctx, &cascadeService.DownloadRequest{ - ActionID: req.GetActionId(), - }, func(resp *cascadeService.DownloadResponse) error { - grpcResp := &pb.DownloadResponse{ - ResponseType: &pb.DownloadResponse_Event{ - Event: &pb.DownloadEvent{ - EventType: pb.SupernodeEventType(resp.EventType), - Message: resp.Message, - }, - }, - } - - if resp.FilePath != "" { - restoredFilePath = resp.FilePath - tmpDir = resp.DownloadedDir - } - - return stream.Send(grpcResp) - }) - - if err != nil { - logtrace.Error(ctx, "error occurred during download process", logtrace.Fields{ - logtrace.FieldError: err.Error(), - }) - return err - } - - if restoredFilePath == "" { - logtrace.Error(ctx, "no artefact file retrieved", fields) - return fmt.Errorf("no artefact to stream") - } - logtrace.Info(ctx, "streaming artefact file in chunks", fields) - - // Open the restored file and stream directly from disk to avoid buffering entire file in memory - f, err := os.Open(restoredFilePath) - if err != nil { - logtrace.Error(ctx, "failed to open restored file", logtrace.Fields{logtrace.FieldError: err.Error()}) - return err - } - defer f.Close() - - fi, err := f.Stat() - if err != nil { - logtrace.Error(ctx, "failed to stat restored file", logtrace.Fields{logtrace.FieldError: err.Error()}) - return err - } - - // Calculate optimal chunk size based on file size - chunkSize := calculateOptimalChunkSize(fi.Size()) - logtrace.Info(ctx, "calculated optimal chunk size for download", logtrace.Fields{ - "file_size": fi.Size(), - "chunk_size": chunkSize, - }) - - // Announce: file is ready to be served to the client - if err := stream.Send(&pb.DownloadResponse{ - ResponseType: &pb.DownloadResponse_Event{ - Event: &pb.DownloadEvent{ - EventType: pb.SupernodeEventType_SERVE_READY, - Message: "File available for download", - }, - }, - }); err != nil { - logtrace.Error(ctx, "failed to send serve-ready event", logtrace.Fields{logtrace.FieldError: err.Error()}) - return err - } - - // Stream the file in fixed-size chunks - buf := make([]byte, chunkSize) - for { - n, readErr := f.Read(buf) - if n > 0 { - if err := stream.Send(&pb.DownloadResponse{ - ResponseType: &pb.DownloadResponse_Chunk{ - Chunk: &pb.DataChunk{Data: buf[:n]}, - }, - }); err != nil { - logtrace.Error(ctx, "failed to stream chunk", logtrace.Fields{logtrace.FieldError: err.Error()}) - return err - } - } - if readErr == io.EOF { - break - } - if readErr != nil { - return fmt.Errorf("chunked read failed: %w", readErr) - } - } - - // Cleanup is handled in deferred block above - - logtrace.Info(ctx, "completed streaming all chunks", fields) - return nil -} diff --git a/supernode/node/action/server/cascade/cascade_action_server_mock.go b/supernode/node/action/server/cascade/cascade_action_server_mock.go deleted file mode 100644 index 3113dcb3..00000000 --- a/supernode/node/action/server/cascade/cascade_action_server_mock.go +++ /dev/null @@ -1,41 +0,0 @@ -package cascade - -import ( - "context" - "io" - - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" - "google.golang.org/grpc/metadata" -) - -// mockStream simulates pb.CascadeService_RegisterServer -type mockStream struct { - ctx context.Context - request []*pb.RegisterRequest - sent []*pb.RegisterResponse - pos int -} - -func (m *mockStream) Context() context.Context { - return m.ctx -} - -func (m *mockStream) Send(resp *pb.RegisterResponse) error { - m.sent = append(m.sent, resp) - return nil -} - -func (m *mockStream) Recv() (*pb.RegisterRequest, error) { - if m.pos >= len(m.request) { - return nil, io.EOF - } - req := m.request[m.pos] - m.pos++ - return req, nil -} - -func (m *mockStream) SetHeader(md metadata.MD) error { return nil } -func (m *mockStream) SendHeader(md metadata.MD) error { return nil } -func (m *mockStream) SetTrailer(md metadata.MD) {} -func (m *mockStream) SendMsg(_ any) error { return nil } -func (m *mockStream) RecvMsg(_ any) error { return nil } diff --git a/supernode/node/action/server/cascade/cascade_action_server_test.go b/supernode/node/action/server/cascade/cascade_action_server_test.go deleted file mode 100644 index ff2738b3..00000000 --- a/supernode/node/action/server/cascade/cascade_action_server_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package cascade - -import ( - "context" - "errors" - "testing" - - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade" - cascademocks "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/mocks" - - "github.com/stretchr/testify/assert" - "github.com/golang/mock/gomock" -) - -func TestRegister_Success(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockTask := cascademocks.NewMockCascadeTask(ctrl) - mockFactory := cascademocks.NewMockCascadeServiceFactory(ctrl) - - // Expect Register to be called with any input, respond via callback - mockTask.EXPECT().Register(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, req *cascade.RegisterRequest, send func(*cascade.RegisterResponse) error) error { - return send(&cascade.RegisterResponse{ - EventType: 1, - Message: "registration successful", - TxHash: "tx123", - }) - }, - ).Times(1) - - mockFactory.EXPECT().NewCascadeRegistrationTask().Return(mockTask).Times(1) - - server := NewCascadeActionServer(mockFactory) - - stream := &mockStream{ - ctx: context.Background(), - request: []*pb.RegisterRequest{ - {RequestType: &pb.RegisterRequest_Chunk{Chunk: &pb.DataChunk{Data: []byte("abc123")}}}, - {RequestType: &pb.RegisterRequest_Metadata{ - Metadata: &pb.Metadata{TaskId: "t1", ActionId: "a1"}, - }}, - }, - } - - err := server.Register(stream) - assert.NoError(t, err) - assert.Len(t, stream.sent, 1) - assert.Equal(t, "registration successful", stream.sent[0].Message) - assert.Equal(t, "tx123", stream.sent[0].TxHash) -} - -func TestRegister_Error_NoMetadata(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockFactory := cascademocks.NewMockCascadeServiceFactory(ctrl) - server := NewCascadeActionServer(mockFactory) - - stream := &mockStream{ - ctx: context.Background(), - request: []*pb.RegisterRequest{ - {RequestType: &pb.RegisterRequest_Chunk{Chunk: &pb.DataChunk{Data: []byte("abc123")}}}, - }, - } - - err := server.Register(stream) - assert.EqualError(t, err, "no metadata received") -} - -func TestRegister_Error_TaskFails(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockTask := cascademocks.NewMockCascadeTask(ctrl) - mockFactory := cascademocks.NewMockCascadeServiceFactory(ctrl) - - mockTask.EXPECT().Register(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("task failed")).Times(1) - mockFactory.EXPECT().NewCascadeRegistrationTask().Return(mockTask).Times(1) - - server := NewCascadeActionServer(mockFactory) - - stream := &mockStream{ - ctx: context.Background(), - request: []*pb.RegisterRequest{ - {RequestType: &pb.RegisterRequest_Chunk{Chunk: &pb.DataChunk{Data: []byte("abc123")}}}, - {RequestType: &pb.RegisterRequest_Metadata{ - Metadata: &pb.Metadata{TaskId: "t1", ActionId: "a1"}, - }}, - }, - } - - err := server.Register(stream) - assert.EqualError(t, err, "registration failed: task failed") -} diff --git a/supernode/node/action/server/cascade/helper.go b/supernode/node/action/server/cascade/helper.go deleted file mode 100644 index ec005707..00000000 --- a/supernode/node/action/server/cascade/helper.go +++ /dev/null @@ -1,39 +0,0 @@ -package cascade - -import ( - "fmt" - "lukechampine.com/blake3" - "os" - "path/filepath" - - "github.com/LumeraProtocol/supernode/v2/pkg/errors" -) - -func initializeHasherAndTempFile() (*blake3.Hasher, *os.File, string, error) { - hasher := blake3.New(32, nil) - - // Create a unique temp file to avoid collisions across concurrent calls - tempFile, err := os.CreateTemp("", "cascade-upload-*") - if err != nil { - return nil, nil, "", fmt.Errorf("could not create temp file: %w", err) - } - - return hasher, tempFile, tempFile.Name(), nil -} - -func replaceTempDirWithTaskDir(taskID, tempFilePath string, tempFile *os.File) (targetPath string, err error) { - if err := tempFile.Close(); err != nil && !errors.Is(err, os.ErrClosed) { - return "", fmt.Errorf("failed to close temp file: %w", err) - } - - targetDir := filepath.Join(os.TempDir(), taskID) - if err := os.MkdirAll(targetDir, 0755); err != nil { - return "", fmt.Errorf("could not create task directory: %w", err) - } - targetPath = filepath.Join(targetDir, fmt.Sprintf("uploaded-%s.dat", taskID)) - if err := os.Rename(tempFilePath, targetPath); err != nil { - return "", fmt.Errorf("could not move file to final location: %w", err) - } - - return targetPath, nil -} diff --git a/supernode/node/supernode/gateway/server.go b/supernode/node/supernode/gateway/server.go deleted file mode 100644 index 5440a7f4..00000000 --- a/supernode/node/supernode/gateway/server.go +++ /dev/null @@ -1,126 +0,0 @@ -package gateway - -import ( - "context" - "fmt" - "net" - "net/http" - "strconv" - "time" - - "github.com/grpc-ecosystem/grpc-gateway/runtime" - - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" -) - -// DefaultGatewayPort is an uncommon port for internal gateway use -const DefaultGatewayPort = 8002 - -// Server represents the HTTP gateway server -type Server struct { - ipAddress string - port int - server *http.Server - supernodeServer pb.SupernodeServiceServer -} - -// NewServer creates a new HTTP gateway server that directly calls the service -// If port is 0, it will use the default port -func NewServer(ipAddress string, port int, supernodeServer pb.SupernodeServiceServer) (*Server, error) { - if supernodeServer == nil { - return nil, fmt.Errorf("supernode server is required") - } - - // Use default port if not specified - if port == 0 { - port = DefaultGatewayPort - } - - return &Server{ - ipAddress: ipAddress, - port: port, - supernodeServer: supernodeServer, - }, nil -} - -// Run starts the HTTP gateway server (implements service interface) -func (s *Server) Run(ctx context.Context) error { - // Create gRPC-Gateway mux with custom JSON marshaler options - mux := runtime.NewServeMux( - runtime.WithMarshalerOption(runtime.MIMEWildcard, &runtime.JSONPb{ - EmitDefaults: true, // This ensures zero values are included - OrigName: true, // Use original proto field names - }), - ) - - // Register the service handler directly - err := pb.RegisterSupernodeServiceHandlerServer(ctx, mux, s.supernodeServer) - if err != nil { - return fmt.Errorf("failed to register gateway handler: %w", err) - } - - // Create HTTP mux for custom endpoints - httpMux := http.NewServeMux() - - // Register gRPC-Gateway endpoints - httpMux.Handle("/api/", mux) - - // Register Swagger endpoints - httpMux.HandleFunc("/swagger.json", s.serveSwaggerJSON) - httpMux.HandleFunc("/swagger-ui/", s.serveSwaggerUI) - httpMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/" { - http.Redirect(w, r, "/swagger-ui/", http.StatusFound) - } else { - http.NotFound(w, r) - } - }) - - // Create HTTP server - s.server = &http.Server{ - Addr: net.JoinHostPort(s.ipAddress, strconv.Itoa(s.port)), - Handler: s.corsMiddleware(httpMux), - ReadTimeout: 15 * time.Second, - WriteTimeout: 15 * time.Second, - IdleTimeout: 60 * time.Second, - } - - logtrace.Info(ctx, "Starting HTTP gateway server", logtrace.Fields{ - "address": s.ipAddress, - "port": s.port, - }) - - // Start server - if err := s.server.ListenAndServe(); err != nil && err != http.ErrServerClosed { - return fmt.Errorf("gateway server failed: %w", err) - } - - return nil -} - -// Stop gracefully stops the HTTP gateway server (implements service interface) -func (s *Server) Stop(ctx context.Context) error { - if s.server == nil { - return nil - } - - logtrace.Info(ctx, "Shutting down HTTP gateway server", nil) - return s.server.Shutdown(ctx) -} - -// corsMiddleware adds CORS headers for web access -func (s *Server) corsMiddleware(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") - w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS") - w.Header().Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, Authorization") - - if r.Method == "OPTIONS" { - w.WriteHeader(http.StatusOK) - return - } - - h.ServeHTTP(w, r) - }) -} diff --git a/supernode/node/supernode/gateway/swagger.json b/supernode/node/supernode/gateway/swagger.json deleted file mode 100644 index af023816..00000000 --- a/supernode/node/supernode/gateway/swagger.json +++ /dev/null @@ -1,296 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "supernode/supernode.proto", - "version": "version not set" - }, - "tags": [ - { - "name": "SupernodeService" - } - ], - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/api/v1/services": { - "get": { - "operationId": "SupernodeService_ListServices", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/supernodeListServicesResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/rpcStatus" - } - } - }, - "tags": [ - "SupernodeService" - ] - } - }, - "/api/v1/status": { - "get": { - "operationId": "SupernodeService_GetStatus", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/supernodeStatusResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/rpcStatus" - } - } - }, - "tags": [ - "SupernodeService" - ] - } - } - }, - "definitions": { - "ResourcesCPU": { - "type": "object", - "properties": { - "usagePercent": { - "type": "number", - "format": "double", - "title": "CPU usage percentage (0-100)" - }, - "cores": { - "type": "integer", - "format": "int32", - "title": "Number of CPU cores" - } - } - }, - "ResourcesMemory": { - "type": "object", - "properties": { - "totalGb": { - "type": "number", - "format": "double", - "title": "Total memory in GB" - }, - "usedGb": { - "type": "number", - "format": "double", - "title": "Used memory in GB" - }, - "availableGb": { - "type": "number", - "format": "double", - "title": "Available memory in GB" - }, - "usagePercent": { - "type": "number", - "format": "double", - "title": "Memory usage percentage (0-100)" - } - } - }, - "ResourcesStorage": { - "type": "object", - "properties": { - "path": { - "type": "string", - "title": "Storage path being monitored" - }, - "totalBytes": { - "type": "string", - "format": "uint64" - }, - "usedBytes": { - "type": "string", - "format": "uint64" - }, - "availableBytes": { - "type": "string", - "format": "uint64" - }, - "usagePercent": { - "type": "number", - "format": "double", - "title": "Storage usage percentage (0-100)" - } - } - }, - "StatusResponseNetwork": { - "type": "object", - "properties": { - "peersCount": { - "type": "integer", - "format": "int32", - "title": "Number of connected peers in P2P network" - }, - "peerAddresses": { - "type": "array", - "items": { - "type": "string" - }, - "title": "List of connected peer addresses (optional, may be empty for privacy)" - } - }, - "title": "Network information" - }, - "StatusResponseResources": { - "type": "object", - "properties": { - "cpu": { - "$ref": "#/definitions/ResourcesCPU" - }, - "memory": { - "$ref": "#/definitions/ResourcesMemory" - }, - "storageVolumes": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/ResourcesStorage" - } - }, - "hardwareSummary": { - "type": "string", - "title": "Formatted hardware summary (e.g., \"8 cores / 32GB RAM\")" - } - }, - "title": "System resource information" - }, - "StatusResponseServiceTasks": { - "type": "object", - "properties": { - "serviceName": { - "type": "string" - }, - "taskIds": { - "type": "array", - "items": { - "type": "string" - } - }, - "taskCount": { - "type": "integer", - "format": "int32" - } - }, - "title": "ServiceTasks contains task information for a specific service" - }, - "protobufAny": { - "type": "object", - "properties": { - "@type": { - "type": "string" - } - }, - "additionalProperties": {} - }, - "rpcStatus": { - "type": "object", - "properties": { - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/protobufAny" - } - } - } - }, - "supernodeListServicesResponse": { - "type": "object", - "properties": { - "services": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/supernodeServiceInfo" - } - }, - "count": { - "type": "integer", - "format": "int32" - } - } - }, - "supernodeServiceInfo": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "methods": { - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "supernodeStatusResponse": { - "type": "object", - "properties": { - "version": { - "type": "string", - "title": "Supernode version" - }, - "uptimeSeconds": { - "type": "string", - "format": "uint64", - "title": "Uptime in seconds" - }, - "resources": { - "$ref": "#/definitions/StatusResponseResources" - }, - "runningTasks": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/StatusResponseServiceTasks" - }, - "title": "Services with currently running tasks" - }, - "registeredServices": { - "type": "array", - "items": { - "type": "string" - }, - "title": "All registered/available services" - }, - "network": { - "$ref": "#/definitions/StatusResponseNetwork", - "title": "P2P network information" - }, - "rank": { - "type": "integer", - "format": "int32", - "title": "Rank in the top supernodes list (0 if not in top list)" - }, - "ipAddress": { - "type": "string", - "title": "Supernode IP address with port (e.g., \"192.168.1.1:4445\")" - } - }, - "title": "The StatusResponse represents system status with clear organization" - } - } -} diff --git a/supernode/node/supernode/server/config.go b/supernode/node/supernode/server/config.go deleted file mode 100644 index 4e9d0f23..00000000 --- a/supernode/node/supernode/server/config.go +++ /dev/null @@ -1,19 +0,0 @@ -package server - -const ( - defaultPort = 4444 -) - -// Config contains settings of the supernode server. -type Config struct { - Identity string - ListenAddresses string - Port int -} - -// NewConfig returns a new Config instance. -func NewConfig() *Config { - return &Config{ - Port: defaultPort, - } -} diff --git a/supernode/node/supernode/server/config_test.go b/supernode/node/supernode/server/config_test.go deleted file mode 100644 index 33e06f68..00000000 --- a/supernode/node/supernode/server/config_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package server - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestNewConfig_Defaults(t *testing.T) { - cfg := NewConfig() - - assert.NotNil(t, cfg) - assert.Equal(t, "", cfg.ListenAddresses, "default listen address should be empty") - assert.Equal(t, 4444, cfg.Port, "default port should be 4444") - assert.Equal(t, "", cfg.Identity, "default identity should be empty") -} diff --git a/supernode/node/supernode/server/mock_keyring.go b/supernode/node/supernode/server/mock_keyring.go deleted file mode 100644 index 85cb9910..00000000 --- a/supernode/node/supernode/server/mock_keyring.go +++ /dev/null @@ -1,379 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/cosmos/cosmos-sdk/crypto/keyring (interfaces: Keyring) - -// Package mock_keyring is a generated GoMock package. -package server - -import ( - reflect "reflect" - - keyring "github.com/cosmos/cosmos-sdk/crypto/keyring" - types "github.com/cosmos/cosmos-sdk/crypto/types" - types0 "github.com/cosmos/cosmos-sdk/types" - signing "github.com/cosmos/cosmos-sdk/types/tx/signing" - gomock "go.uber.org/mock/gomock" -) - -// MockKeyring is a mock of Keyring interface. -type MockKeyring struct { - ctrl *gomock.Controller - recorder *MockKeyringMockRecorder -} - -// MockKeyringMockRecorder is the mock recorder for MockKeyring. -type MockKeyringMockRecorder struct { - mock *MockKeyring -} - -// NewMockKeyring creates a new mock instance. -func NewMockKeyring(ctrl *gomock.Controller) *MockKeyring { - mock := &MockKeyring{ctrl: ctrl} - mock.recorder = &MockKeyringMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockKeyring) EXPECT() *MockKeyringMockRecorder { - return m.recorder -} - -// Backend mocks base method. -func (m *MockKeyring) Backend() string { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Backend") - ret0, _ := ret[0].(string) - return ret0 -} - -// Backend indicates an expected call of Backend. -func (mr *MockKeyringMockRecorder) Backend() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Backend", reflect.TypeOf((*MockKeyring)(nil).Backend)) -} - -// Delete mocks base method. -func (m *MockKeyring) Delete(arg0 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Delete indicates an expected call of Delete. -func (mr *MockKeyringMockRecorder) Delete(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockKeyring)(nil).Delete), arg0) -} - -// DeleteByAddress mocks base method. -func (m *MockKeyring) DeleteByAddress(arg0 types0.Address) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteByAddress", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteByAddress indicates an expected call of DeleteByAddress. -func (mr *MockKeyringMockRecorder) DeleteByAddress(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteByAddress", reflect.TypeOf((*MockKeyring)(nil).DeleteByAddress), arg0) -} - -// ExportPrivKeyArmor mocks base method. -func (m *MockKeyring) ExportPrivKeyArmor(arg0, arg1 string) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExportPrivKeyArmor", arg0, arg1) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ExportPrivKeyArmor indicates an expected call of ExportPrivKeyArmor. -func (mr *MockKeyringMockRecorder) ExportPrivKeyArmor(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportPrivKeyArmor", reflect.TypeOf((*MockKeyring)(nil).ExportPrivKeyArmor), arg0, arg1) -} - -// ExportPrivKeyArmorByAddress mocks base method. -func (m *MockKeyring) ExportPrivKeyArmorByAddress(arg0 types0.Address, arg1 string) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExportPrivKeyArmorByAddress", arg0, arg1) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ExportPrivKeyArmorByAddress indicates an expected call of ExportPrivKeyArmorByAddress. -func (mr *MockKeyringMockRecorder) ExportPrivKeyArmorByAddress(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportPrivKeyArmorByAddress", reflect.TypeOf((*MockKeyring)(nil).ExportPrivKeyArmorByAddress), arg0, arg1) -} - -// ExportPubKeyArmor mocks base method. -func (m *MockKeyring) ExportPubKeyArmor(arg0 string) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExportPubKeyArmor", arg0) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ExportPubKeyArmor indicates an expected call of ExportPubKeyArmor. -func (mr *MockKeyringMockRecorder) ExportPubKeyArmor(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportPubKeyArmor", reflect.TypeOf((*MockKeyring)(nil).ExportPubKeyArmor), arg0) -} - -// ExportPubKeyArmorByAddress mocks base method. -func (m *MockKeyring) ExportPubKeyArmorByAddress(arg0 types0.Address) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExportPubKeyArmorByAddress", arg0) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ExportPubKeyArmorByAddress indicates an expected call of ExportPubKeyArmorByAddress. -func (mr *MockKeyringMockRecorder) ExportPubKeyArmorByAddress(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportPubKeyArmorByAddress", reflect.TypeOf((*MockKeyring)(nil).ExportPubKeyArmorByAddress), arg0) -} - -// ImportPrivKey mocks base method. -func (m *MockKeyring) ImportPrivKey(arg0, arg1, arg2 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ImportPrivKey", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// ImportPrivKey indicates an expected call of ImportPrivKey. -func (mr *MockKeyringMockRecorder) ImportPrivKey(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportPrivKey", reflect.TypeOf((*MockKeyring)(nil).ImportPrivKey), arg0, arg1, arg2) -} - -// ImportPrivKeyHex mocks base method. -func (m *MockKeyring) ImportPrivKeyHex(arg0, arg1, arg2 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ImportPrivKeyHex", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// ImportPrivKeyHex indicates an expected call of ImportPrivKeyHex. -func (mr *MockKeyringMockRecorder) ImportPrivKeyHex(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportPrivKeyHex", reflect.TypeOf((*MockKeyring)(nil).ImportPrivKeyHex), arg0, arg1, arg2) -} - -// ImportPubKey mocks base method. -func (m *MockKeyring) ImportPubKey(arg0, arg1 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ImportPubKey", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ImportPubKey indicates an expected call of ImportPubKey. -func (mr *MockKeyringMockRecorder) ImportPubKey(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportPubKey", reflect.TypeOf((*MockKeyring)(nil).ImportPubKey), arg0, arg1) -} - -// Key mocks base method. -func (m *MockKeyring) Key(arg0 string) (*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Key", arg0) - ret0, _ := ret[0].(*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Key indicates an expected call of Key. -func (mr *MockKeyringMockRecorder) Key(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Key", reflect.TypeOf((*MockKeyring)(nil).Key), arg0) -} - -// KeyByAddress mocks base method. -func (m *MockKeyring) KeyByAddress(arg0 types0.Address) (*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "KeyByAddress", arg0) - ret0, _ := ret[0].(*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// KeyByAddress indicates an expected call of KeyByAddress. -func (mr *MockKeyringMockRecorder) KeyByAddress(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeyByAddress", reflect.TypeOf((*MockKeyring)(nil).KeyByAddress), arg0) -} - -// List mocks base method. -func (m *MockKeyring) List() ([]*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "List") - ret0, _ := ret[0].([]*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// List indicates an expected call of List. -func (mr *MockKeyringMockRecorder) List() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockKeyring)(nil).List)) -} - -// MigrateAll mocks base method. -func (m *MockKeyring) MigrateAll() ([]*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "MigrateAll") - ret0, _ := ret[0].([]*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// MigrateAll indicates an expected call of MigrateAll. -func (mr *MockKeyringMockRecorder) MigrateAll() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MigrateAll", reflect.TypeOf((*MockKeyring)(nil).MigrateAll)) -} - -// NewAccount mocks base method. -func (m *MockKeyring) NewAccount(arg0, arg1, arg2, arg3 string, arg4 keyring.SignatureAlgo) (*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewAccount", arg0, arg1, arg2, arg3, arg4) - ret0, _ := ret[0].(*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewAccount indicates an expected call of NewAccount. -func (mr *MockKeyringMockRecorder) NewAccount(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewAccount", reflect.TypeOf((*MockKeyring)(nil).NewAccount), arg0, arg1, arg2, arg3, arg4) -} - -// NewMnemonic mocks base method. -func (m *MockKeyring) NewMnemonic(arg0 string, arg1 keyring.Language, arg2, arg3 string, arg4 keyring.SignatureAlgo) (*keyring.Record, string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewMnemonic", arg0, arg1, arg2, arg3, arg4) - ret0, _ := ret[0].(*keyring.Record) - ret1, _ := ret[1].(string) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// NewMnemonic indicates an expected call of NewMnemonic. -func (mr *MockKeyringMockRecorder) NewMnemonic(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewMnemonic", reflect.TypeOf((*MockKeyring)(nil).NewMnemonic), arg0, arg1, arg2, arg3, arg4) -} - -// Rename mocks base method. -func (m *MockKeyring) Rename(arg0, arg1 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Rename", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// Rename indicates an expected call of Rename. -func (mr *MockKeyringMockRecorder) Rename(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Rename", reflect.TypeOf((*MockKeyring)(nil).Rename), arg0, arg1) -} - -// SaveLedgerKey mocks base method. -func (m *MockKeyring) SaveLedgerKey(arg0 string, arg1 keyring.SignatureAlgo, arg2 string, arg3, arg4, arg5 uint32) (*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SaveLedgerKey", arg0, arg1, arg2, arg3, arg4, arg5) - ret0, _ := ret[0].(*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SaveLedgerKey indicates an expected call of SaveLedgerKey. -func (mr *MockKeyringMockRecorder) SaveLedgerKey(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveLedgerKey", reflect.TypeOf((*MockKeyring)(nil).SaveLedgerKey), arg0, arg1, arg2, arg3, arg4, arg5) -} - -// SaveMultisig mocks base method. -func (m *MockKeyring) SaveMultisig(arg0 string, arg1 types.PubKey) (*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SaveMultisig", arg0, arg1) - ret0, _ := ret[0].(*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SaveMultisig indicates an expected call of SaveMultisig. -func (mr *MockKeyringMockRecorder) SaveMultisig(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveMultisig", reflect.TypeOf((*MockKeyring)(nil).SaveMultisig), arg0, arg1) -} - -// SaveOfflineKey mocks base method. -func (m *MockKeyring) SaveOfflineKey(arg0 string, arg1 types.PubKey) (*keyring.Record, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SaveOfflineKey", arg0, arg1) - ret0, _ := ret[0].(*keyring.Record) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SaveOfflineKey indicates an expected call of SaveOfflineKey. -func (mr *MockKeyringMockRecorder) SaveOfflineKey(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveOfflineKey", reflect.TypeOf((*MockKeyring)(nil).SaveOfflineKey), arg0, arg1) -} - -// Sign mocks base method. -func (m *MockKeyring) Sign(arg0 string, arg1 []byte, arg2 signing.SignMode) ([]byte, types.PubKey, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Sign", arg0, arg1, arg2) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(types.PubKey) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// Sign indicates an expected call of Sign. -func (mr *MockKeyringMockRecorder) Sign(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sign", reflect.TypeOf((*MockKeyring)(nil).Sign), arg0, arg1, arg2) -} - -// SignByAddress mocks base method. -func (m *MockKeyring) SignByAddress(arg0 types0.Address, arg1 []byte, arg2 signing.SignMode) ([]byte, types.PubKey, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SignByAddress", arg0, arg1, arg2) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(types.PubKey) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// SignByAddress indicates an expected call of SignByAddress. -func (mr *MockKeyringMockRecorder) SignByAddress(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SignByAddress", reflect.TypeOf((*MockKeyring)(nil).SignByAddress), arg0, arg1, arg2) -} - -// SupportedAlgorithms mocks base method. -func (m *MockKeyring) SupportedAlgorithms() (keyring.SigningAlgoList, keyring.SigningAlgoList) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SupportedAlgorithms") - ret0, _ := ret[0].(keyring.SigningAlgoList) - ret1, _ := ret[1].(keyring.SigningAlgoList) - return ret0, ret1 -} - -// SupportedAlgorithms indicates an expected call of SupportedAlgorithms. -func (mr *MockKeyringMockRecorder) SupportedAlgorithms() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SupportedAlgorithms", reflect.TypeOf((*MockKeyring)(nil).SupportedAlgorithms)) -} diff --git a/supernode/node/supernode/server/server_test.go b/supernode/node/supernode/server/server_test.go deleted file mode 100644 index 7803bcce..00000000 --- a/supernode/node/supernode/server/server_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package server - -import ( - "testing" - - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - "github.com/stretchr/testify/assert" - gomock "go.uber.org/mock/gomock" - - "google.golang.org/grpc" - "google.golang.org/grpc/health/grpc_health_v1" -) - -// --- Mock service implementing server.service --- -type mockService struct{} - -func (m *mockService) Desc() *grpc.ServiceDesc { - return &grpc.ServiceDesc{ - ServiceName: "test.Service", - HandlerType: (*interface{})(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{}, - } -} - -func TestNewServer_WithValidConfig(t *testing.T) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - mockKeyring := NewMockKeyring(ctl) - mockLumeraClient := lumera.NewMockClient(ctl) - - cfg := NewConfig() - cfg.ListenAddresses = "127.0.0.1" - s, err := New(cfg, "supernode-test", mockKeyring, mockLumeraClient, &mockService{}) - assert.NoError(t, err) - assert.NotNil(t, s) -} - -func TestNewServer_WithNilConfig(t *testing.T) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - mockKeyring := NewMockKeyring(ctl) - mockLumeraClient := lumera.NewMockClient(ctl) - - s, err := New(nil, "supernode-test", mockKeyring, mockLumeraClient) - assert.Nil(t, s) - assert.EqualError(t, err, "config is nil") -} - -func TestSetServiceStatusAndClose(t *testing.T) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - mockKeyring := NewMockKeyring(ctl) - mockLumeraClient := lumera.NewMockClient(ctl) - - cfg := NewConfig() - cfg.ListenAddresses = "127.0.0.1" - s, _ := New(cfg, "test", mockKeyring, mockLumeraClient, &mockService{}) - _ = s.setupGRPCServer() - - s.SetServiceStatus("test.Service", grpc_health_v1.HealthCheckResponse_SERVING) - s.Close() - - // No assertion — success is no panic / crash on shutdown -} diff --git a/supernode/node/supernode/server/status_server.go b/supernode/node/supernode/server/status_server.go deleted file mode 100644 index d90b1e3e..00000000 --- a/supernode/node/supernode/server/status_server.go +++ /dev/null @@ -1,268 +0,0 @@ -package server - -import ( - "context" - - "google.golang.org/grpc" - - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/supernode" -) - -// SupernodeServer implements the SupernodeService gRPC service -type SupernodeServer struct { - pb.UnimplementedSupernodeServiceServer - statusService *supernode.SupernodeStatusService - services []ServiceInfo // Store service descriptors -} - -// ServiceInfo holds information about a registered service -type ServiceInfo struct { - Name string - Methods []string -} - -// NewSupernodeServer creates a new SupernodeServer -func NewSupernodeServer(statusService *supernode.SupernodeStatusService) *SupernodeServer { - return &SupernodeServer{ - statusService: statusService, - services: []ServiceInfo{}, - } -} - -// RegisterService adds a service to the known services list -func (s *SupernodeServer) RegisterService(serviceName string, desc *grpc.ServiceDesc) { - methods := make([]string, 0, len(desc.Methods)+len(desc.Streams)) - - // Add unary methods - for _, method := range desc.Methods { - methods = append(methods, method.MethodName) - } - - // Add streaming methods - for _, stream := range desc.Streams { - methods = append(methods, stream.StreamName) - } - - s.services = append(s.services, ServiceInfo{ - Name: serviceName, - Methods: methods, - }) -} - -// GetStatus implements SupernodeService.GetStatus -func (s *SupernodeServer) GetStatus(ctx context.Context, req *pb.StatusRequest) (*pb.StatusResponse, error) { - // Get status from the common service; gate P2P metrics by request flag - status, err := s.statusService.GetStatus(ctx, req.GetIncludeP2PMetrics()) - if err != nil { - return nil, err - } - - // Convert to protobuf response - response := &pb.StatusResponse{ - Version: status.Version, - UptimeSeconds: status.UptimeSeconds, - Resources: &pb.StatusResponse_Resources{ - Cpu: &pb.StatusResponse_Resources_CPU{ - UsagePercent: status.Resources.CPU.UsagePercent, - Cores: status.Resources.CPU.Cores, - }, - Memory: &pb.StatusResponse_Resources_Memory{ - TotalGb: status.Resources.Memory.TotalGB, - UsedGb: status.Resources.Memory.UsedGB, - AvailableGb: status.Resources.Memory.AvailableGB, - UsagePercent: status.Resources.Memory.UsagePercent, - }, - StorageVolumes: make([]*pb.StatusResponse_Resources_Storage, 0, len(status.Resources.Storage)), - HardwareSummary: status.Resources.HardwareSummary, - }, - RunningTasks: make([]*pb.StatusResponse_ServiceTasks, 0, len(status.RunningTasks)), - RegisteredServices: status.RegisteredServices, - Network: &pb.StatusResponse_Network{ - PeersCount: status.Network.PeersCount, - PeerAddresses: status.Network.PeerAddresses, - }, - Rank: status.Rank, - IpAddress: status.IPAddress, - } - - // Convert storage information - for _, storage := range status.Resources.Storage { - storageInfo := &pb.StatusResponse_Resources_Storage{ - Path: storage.Path, - TotalBytes: storage.TotalBytes, - UsedBytes: storage.UsedBytes, - AvailableBytes: storage.AvailableBytes, - UsagePercent: storage.UsagePercent, - } - response.Resources.StorageVolumes = append(response.Resources.StorageVolumes, storageInfo) - } - - // Convert service tasks - for _, service := range status.RunningTasks { - serviceTask := &pb.StatusResponse_ServiceTasks{ - ServiceName: service.ServiceName, - TaskIds: service.TaskIDs, - TaskCount: service.TaskCount, - } - response.RunningTasks = append(response.RunningTasks, serviceTask) - } - - // Map optional P2P metrics - if req.GetIncludeP2PMetrics() { - pm := status.P2PMetrics - pbdht := &pb.StatusResponse_P2PMetrics_DhtMetrics{} - for _, p := range pm.DhtMetrics.StoreSuccessRecent { - pbdht.StoreSuccessRecent = append(pbdht.StoreSuccessRecent, &pb.StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint{ - TimeUnix: p.TimeUnix, - Requests: p.Requests, - Successful: p.Successful, - SuccessRate: p.SuccessRate, - }) - } - for _, p := range pm.DhtMetrics.BatchRetrieveRecent { - pbdht.BatchRetrieveRecent = append(pbdht.BatchRetrieveRecent, &pb.StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint{ - TimeUnix: p.TimeUnix, - Keys: p.Keys, - Required: p.Required, - FoundLocal: p.FoundLocal, - FoundNetwork: p.FoundNetwork, - DurationMs: p.DurationMS, - }) - } - pbdht.HotPathBannedSkips = pm.DhtMetrics.HotPathBannedSkips - pbdht.HotPathBanIncrements = pm.DhtMetrics.HotPathBanIncrements - - pbpm := &pb.StatusResponse_P2PMetrics{ - DhtMetrics: pbdht, - NetworkHandleMetrics: map[string]*pb.StatusResponse_P2PMetrics_HandleCounters{}, - ConnPoolMetrics: map[string]int64{}, - BanList: []*pb.StatusResponse_P2PMetrics_BanEntry{}, - Database: &pb.StatusResponse_P2PMetrics_DatabaseStats{}, - Disk: &pb.StatusResponse_P2PMetrics_DiskStatus{}, - } - - // Network handle metrics - for k, v := range pm.NetworkHandleMetrics { - pbpm.NetworkHandleMetrics[k] = &pb.StatusResponse_P2PMetrics_HandleCounters{ - Total: v.Total, - Success: v.Success, - Failure: v.Failure, - Timeout: v.Timeout, - } - } - // Conn pool metrics - for k, v := range pm.ConnPoolMetrics { - pbpm.ConnPoolMetrics[k] = v - } - // Ban list - for _, b := range pm.BanList { - pbpm.BanList = append(pbpm.BanList, &pb.StatusResponse_P2PMetrics_BanEntry{ - Id: b.ID, - Ip: b.IP, - Port: b.Port, - Count: b.Count, - CreatedAtUnix: b.CreatedAtUnix, - AgeSeconds: b.AgeSeconds, - }) - } - // Database - pbpm.Database.P2PDbSizeMb = pm.Database.P2PDBSizeMB - pbpm.Database.P2PDbRecordsCount = pm.Database.P2PDBRecordsCount - // Disk - pbpm.Disk.AllMb = pm.Disk.AllMB - pbpm.Disk.UsedMb = pm.Disk.UsedMB - pbpm.Disk.FreeMb = pm.Disk.FreeMB - - // Recent batch store - for _, e := range pm.RecentBatchStore { - pbpm.RecentBatchStore = append(pbpm.RecentBatchStore, &pb.StatusResponse_P2PMetrics_RecentBatchStoreEntry{ - TimeUnix: e.TimeUnix, - SenderId: e.SenderID, - SenderIp: e.SenderIP, - Keys: int32(e.Keys), - DurationMs: e.DurationMS, - Ok: e.OK, - Error: e.Error, - }) - } - // Recent batch retrieve - for _, e := range pm.RecentBatchRetrieve { - pbpm.RecentBatchRetrieve = append(pbpm.RecentBatchRetrieve, &pb.StatusResponse_P2PMetrics_RecentBatchRetrieveEntry{ - TimeUnix: e.TimeUnix, - SenderId: e.SenderID, - SenderIp: e.SenderIP, - Requested: int32(e.Requested), - Found: int32(e.Found), - DurationMs: e.DurationMS, - Error: e.Error, - }) - } - - // Per-IP buckets - if pm.RecentBatchStoreByIP != nil { - pbpm.RecentBatchStoreByIp = map[string]*pb.StatusResponse_P2PMetrics_RecentBatchStoreList{} - for ip, list := range pm.RecentBatchStoreByIP { - pbList := &pb.StatusResponse_P2PMetrics_RecentBatchStoreList{} - for _, e := range list { - pbList.Entries = append(pbList.Entries, &pb.StatusResponse_P2PMetrics_RecentBatchStoreEntry{ - TimeUnix: e.TimeUnix, - SenderId: e.SenderID, - SenderIp: e.SenderIP, - Keys: int32(e.Keys), - DurationMs: e.DurationMS, - Ok: e.OK, - Error: e.Error, - }) - } - pbpm.RecentBatchStoreByIp[ip] = pbList - } - } - if pm.RecentBatchRetrieveByIP != nil { - pbpm.RecentBatchRetrieveByIp = map[string]*pb.StatusResponse_P2PMetrics_RecentBatchRetrieveList{} - for ip, list := range pm.RecentBatchRetrieveByIP { - pbList := &pb.StatusResponse_P2PMetrics_RecentBatchRetrieveList{} - for _, e := range list { - pbList.Entries = append(pbList.Entries, &pb.StatusResponse_P2PMetrics_RecentBatchRetrieveEntry{ - TimeUnix: e.TimeUnix, - SenderId: e.SenderID, - SenderIp: e.SenderIP, - Requested: int32(e.Requested), - Found: int32(e.Found), - DurationMs: e.DurationMS, - Error: e.Error, - }) - } - pbpm.RecentBatchRetrieveByIp[ip] = pbList - } - } - - response.P2PMetrics = pbpm - } - - // Codec configuration removed - - return response, nil -} - -// ListServices implements SupernodeService.ListServices -func (s *SupernodeServer) ListServices(ctx context.Context, req *pb.ListServicesRequest) (*pb.ListServicesResponse, error) { - // Convert internal ServiceInfo to protobuf ServiceInfo - services := make([]*pb.ServiceInfo, 0, len(s.services)) - for _, svc := range s.services { - services = append(services, &pb.ServiceInfo{ - Name: svc.Name, - Methods: svc.Methods, - }) - } - - return &pb.ListServicesResponse{ - Services: services, - Count: int32(len(services)), - }, nil -} - -// Desc implements the service interface for gRPC service registration -func (s *SupernodeServer) Desc() *grpc.ServiceDesc { - return &pb.SupernodeService_ServiceDesc -} diff --git a/supernode/node/supernode/server/status_server_test.go b/supernode/node/supernode/server/status_server_test.go deleted file mode 100644 index 7b2808d7..00000000 --- a/supernode/node/supernode/server/status_server_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package server - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/supernode" -) - -func TestSupernodeServer_GetStatus(t *testing.T) { - ctx := context.Background() - - // Create status service - statusService := supernode.NewSupernodeStatusService(nil, nil, nil) - - // Create server - server := NewSupernodeServer(statusService) - - // Test with empty service - resp, err := server.GetStatus(ctx, &pb.StatusRequest{}) - require.NoError(t, err) - assert.NotNil(t, resp) - - // Check basic structure - assert.NotNil(t, resp.Resources) - assert.NotNil(t, resp.Resources.Cpu) - assert.NotNil(t, resp.Resources.Memory) - assert.NotNil(t, resp.RunningTasks) - assert.NotNil(t, resp.RegisteredServices) - - // Check version field - assert.NotEmpty(t, resp.Version) - - // Check uptime field - assert.True(t, resp.UptimeSeconds >= 0) - - // Check CPU metrics - assert.True(t, resp.Resources.Cpu.UsagePercent >= 0) - assert.True(t, resp.Resources.Cpu.UsagePercent <= 100) - assert.True(t, resp.Resources.Cpu.Cores >= 0) - - // Check Memory metrics (now in GB) - assert.True(t, resp.Resources.Memory.TotalGb > 0) - assert.True(t, resp.Resources.Memory.UsagePercent >= 0) - assert.True(t, resp.Resources.Memory.UsagePercent <= 100) - - // Check hardware summary - if resp.Resources.Cpu.Cores > 0 && resp.Resources.Memory.TotalGb > 0 { - assert.NotEmpty(t, resp.Resources.HardwareSummary) - } - - // Check Storage (should have default root filesystem) - assert.NotEmpty(t, resp.Resources.StorageVolumes) - assert.Equal(t, "/", resp.Resources.StorageVolumes[0].Path) - - // Should have no services initially - assert.Empty(t, resp.RunningTasks) - assert.Empty(t, resp.RegisteredServices) - - // Check new fields have default values - assert.NotNil(t, resp.Network) - assert.Equal(t, int32(0), resp.Network.PeersCount) - assert.Empty(t, resp.Network.PeerAddresses) - assert.Equal(t, int32(0), resp.Rank) - assert.Empty(t, resp.IpAddress) -} - -func TestSupernodeServer_GetStatusWithService(t *testing.T) { - ctx := context.Background() - - // Create status service - statusService := supernode.NewSupernodeStatusService(nil, nil, nil) - - // Add a mock task provider - mockProvider := &common.MockTaskProvider{ - ServiceName: "test-service", - TaskIDs: []string{"task1", "task2"}, - } - statusService.RegisterTaskProvider(mockProvider) - - // Create server - server := NewSupernodeServer(statusService) - - // Test with service - resp, err := server.GetStatus(ctx, &pb.StatusRequest{}) - require.NoError(t, err) - assert.NotNil(t, resp) - - // Should have one service - assert.Len(t, resp.RunningTasks, 1) - assert.Len(t, resp.RegisteredServices, 1) - assert.Equal(t, []string{"test-service"}, resp.RegisteredServices) - - // Check service details - service := resp.RunningTasks[0] - assert.Equal(t, "test-service", service.ServiceName) - assert.Equal(t, int32(2), service.TaskCount) - assert.Equal(t, []string{"task1", "task2"}, service.TaskIds) -} - -func TestSupernodeServer_Desc(t *testing.T) { - statusService := supernode.NewSupernodeStatusService(nil, nil, nil) - server := NewSupernodeServer(statusService) - - desc := server.Desc() - assert.NotNil(t, desc) - assert.Equal(t, "supernode.SupernodeService", desc.ServiceName) -} diff --git a/supernode/services/cascade/adaptors/lumera.go b/supernode/services/cascade/adaptors/lumera.go deleted file mode 100644 index f5e3b52e..00000000 --- a/supernode/services/cascade/adaptors/lumera.go +++ /dev/null @@ -1,81 +0,0 @@ -package adaptors - -import ( - "context" - "fmt" - - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" - sntypes "github.com/LumeraProtocol/lumera/x/supernode/v1/types" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - sdktx "github.com/cosmos/cosmos-sdk/types/tx" -) - -//go:generate mockgen -destination=mocks/lumera_mock.go -package=cascadeadaptormocks -source=lumera.go - -// LumeraClient defines the interface for interacting with Lumera chain data during cascade registration. -type LumeraClient interface { - // SupernodeModule - GetTopSupernodes(ctx context.Context, height uint64) (*sntypes.QueryGetTopSuperNodesForBlockResponse, error) - - // Action Module - GetAction(ctx context.Context, actionID string) (*actiontypes.QueryGetActionResponse, error) - FinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.BroadcastTxResponse, error) - SimulateFinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.SimulateResponse, error) - GetActionFee(ctx context.Context, dataSize string) (*actiontypes.QueryGetActionFeeResponse, error) - // Auth - Verify(ctx context.Context, creator string, file []byte, sigBytes []byte) error -} - -// Client is the concrete implementation used in production. -type Client struct { - lc lumera.Client -} - -func NewLumeraClient(client lumera.Client) LumeraClient { - return &Client{ - lc: client, - } -} - -func (c *Client) GetAction(ctx context.Context, actionID string) (*actiontypes.QueryGetActionResponse, error) { - return c.lc.Action().GetAction(ctx, actionID) -} - -func (c *Client) GetActionFee(ctx context.Context, dataSize string) (*actiontypes.QueryGetActionFeeResponse, error) { - return c.lc.Action().GetActionFee(ctx, dataSize) -} - -func (c *Client) FinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.BroadcastTxResponse, error) { - resp, err := c.lc.ActionMsg().FinalizeCascadeAction(ctx, actionID, rqids) - if err != nil { - // Preserve underlying gRPC status/details - return nil, fmt.Errorf("finalize cascade action broadcast failed: %w", err) - } - - // Surface chain-level failures (non-zero code) with rich context - if resp != nil && resp.TxResponse != nil && resp.TxResponse.Code != 0 { - return nil, fmt.Errorf( - "tx failed: code=%d codespace=%s height=%d gas_wanted=%d gas_used=%d raw_log=%s", - resp.TxResponse.Code, - resp.TxResponse.Codespace, - resp.TxResponse.Height, - resp.TxResponse.GasWanted, - resp.TxResponse.GasUsed, - resp.TxResponse.RawLog, - ) - } - - return resp, nil -} - -func (c *Client) SimulateFinalizeAction(ctx context.Context, actionID string, rqids []string) (*sdktx.SimulateResponse, error) { - return c.lc.ActionMsg().SimulateFinalizeCascadeAction(ctx, actionID, rqids) -} - -func (c *Client) GetTopSupernodes(ctx context.Context, height uint64) (*sntypes.QueryGetTopSuperNodesForBlockResponse, error) { - return c.lc.SuperNode().GetTopSuperNodesForBlock(ctx, height) -} - -func (c *Client) Verify(ctx context.Context, creator string, file []byte, sigBytes []byte) error { - return c.lc.Auth().Verify(ctx, creator, file, sigBytes) -} diff --git a/supernode/services/cascade/adaptors/mocks/lumera_mock.go b/supernode/services/cascade/adaptors/mocks/lumera_mock.go deleted file mode 100644 index 29cdd48f..00000000 --- a/supernode/services/cascade/adaptors/mocks/lumera_mock.go +++ /dev/null @@ -1,127 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: lumera.go - -// Package cascadeadaptormocks is a generated GoMock package. -package cascadeadaptormocks - -import ( - context "context" - reflect "reflect" - - types "github.com/LumeraProtocol/lumera/x/action/v1/types" - types0 "github.com/LumeraProtocol/lumera/x/supernode/v1/types" - tx "github.com/cosmos/cosmos-sdk/types/tx" - gomock "github.com/golang/mock/gomock" -) - -// MockLumeraClient is a mock of LumeraClient interface. -type MockLumeraClient struct { - ctrl *gomock.Controller - recorder *MockLumeraClientMockRecorder -} - -// MockLumeraClientMockRecorder is the mock recorder for MockLumeraClient. -type MockLumeraClientMockRecorder struct { - mock *MockLumeraClient -} - -// NewMockLumeraClient creates a new mock instance. -func NewMockLumeraClient(ctrl *gomock.Controller) *MockLumeraClient { - mock := &MockLumeraClient{ctrl: ctrl} - mock.recorder = &MockLumeraClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockLumeraClient) EXPECT() *MockLumeraClientMockRecorder { - return m.recorder -} - -// FinalizeAction mocks base method. -func (m *MockLumeraClient) FinalizeAction(ctx context.Context, actionID string, rqids []string) (*tx.BroadcastTxResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FinalizeAction", ctx, actionID, rqids) - ret0, _ := ret[0].(*tx.BroadcastTxResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FinalizeAction indicates an expected call of FinalizeAction. -func (mr *MockLumeraClientMockRecorder) FinalizeAction(ctx, actionID, rqids interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinalizeAction", reflect.TypeOf((*MockLumeraClient)(nil).FinalizeAction), ctx, actionID, rqids) -} - -// GetAction mocks base method. -func (m *MockLumeraClient) GetAction(ctx context.Context, actionID string) (*types.QueryGetActionResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAction", ctx, actionID) - ret0, _ := ret[0].(*types.QueryGetActionResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetAction indicates an expected call of GetAction. -func (mr *MockLumeraClientMockRecorder) GetAction(ctx, actionID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAction", reflect.TypeOf((*MockLumeraClient)(nil).GetAction), ctx, actionID) -} - -// GetActionFee mocks base method. -func (m *MockLumeraClient) GetActionFee(ctx context.Context, dataSize string) (*types.QueryGetActionFeeResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetActionFee", ctx, dataSize) - ret0, _ := ret[0].(*types.QueryGetActionFeeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetActionFee indicates an expected call of GetActionFee. -func (mr *MockLumeraClientMockRecorder) GetActionFee(ctx, dataSize interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActionFee", reflect.TypeOf((*MockLumeraClient)(nil).GetActionFee), ctx, dataSize) -} - -// GetTopSupernodes mocks base method. -func (m *MockLumeraClient) GetTopSupernodes(ctx context.Context, height uint64) (*types0.QueryGetTopSuperNodesForBlockResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTopSupernodes", ctx, height) - ret0, _ := ret[0].(*types0.QueryGetTopSuperNodesForBlockResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetTopSupernodes indicates an expected call of GetTopSupernodes. -func (mr *MockLumeraClientMockRecorder) GetTopSupernodes(ctx, height interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTopSupernodes", reflect.TypeOf((*MockLumeraClient)(nil).GetTopSupernodes), ctx, height) -} - -// SimulateFinalizeAction mocks base method. -func (m *MockLumeraClient) SimulateFinalizeAction(ctx context.Context, actionID string, rqids []string) (*tx.SimulateResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SimulateFinalizeAction", ctx, actionID, rqids) - ret0, _ := ret[0].(*tx.SimulateResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SimulateFinalizeAction indicates an expected call of SimulateFinalizeAction. -func (mr *MockLumeraClientMockRecorder) SimulateFinalizeAction(ctx, actionID, rqids interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimulateFinalizeAction", reflect.TypeOf((*MockLumeraClient)(nil).SimulateFinalizeAction), ctx, actionID, rqids) -} - -// Verify mocks base method. -func (m *MockLumeraClient) Verify(ctx context.Context, creator string, file, sigBytes []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Verify", ctx, creator, file, sigBytes) - ret0, _ := ret[0].(error) - return ret0 -} - -// Verify indicates an expected call of Verify. -func (mr *MockLumeraClientMockRecorder) Verify(ctx, creator, file, sigBytes interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockLumeraClient)(nil).Verify), ctx, creator, file, sigBytes) -} diff --git a/supernode/services/cascade/adaptors/mocks/p2p_mock.go b/supernode/services/cascade/adaptors/mocks/p2p_mock.go deleted file mode 100644 index ec99d92a..00000000 --- a/supernode/services/cascade/adaptors/mocks/p2p_mock.go +++ /dev/null @@ -1,51 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: p2p.go - -// Package cascadeadaptormocks is a generated GoMock package. -package cascadeadaptormocks - -import ( - context "context" - reflect "reflect" - - logtrace "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - adaptors "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - gomock "github.com/golang/mock/gomock" -) - -// MockP2PService is a mock of P2PService interface. -type MockP2PService struct { - ctrl *gomock.Controller - recorder *MockP2PServiceMockRecorder -} - -// MockP2PServiceMockRecorder is the mock recorder for MockP2PService. -type MockP2PServiceMockRecorder struct { - mock *MockP2PService -} - -// NewMockP2PService creates a new mock instance. -func NewMockP2PService(ctrl *gomock.Controller) *MockP2PService { - mock := &MockP2PService{ctrl: ctrl} - mock.recorder = &MockP2PServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockP2PService) EXPECT() *MockP2PServiceMockRecorder { - return m.recorder -} - -// StoreArtefacts mocks base method. -func (m *MockP2PService) StoreArtefacts(ctx context.Context, req adaptors.StoreArtefactsRequest, f logtrace.Fields) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StoreArtefacts", ctx, req, f) - ret0, _ := ret[0].(error) - return ret0 -} - -// StoreArtefacts indicates an expected call of StoreArtefacts. -func (mr *MockP2PServiceMockRecorder) StoreArtefacts(ctx, req, f interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreArtefacts", reflect.TypeOf((*MockP2PService)(nil).StoreArtefacts), ctx, req, f) -} diff --git a/supernode/services/cascade/adaptors/mocks/rq_mock.go b/supernode/services/cascade/adaptors/mocks/rq_mock.go deleted file mode 100644 index f45f2eb5..00000000 --- a/supernode/services/cascade/adaptors/mocks/rq_mock.go +++ /dev/null @@ -1,85 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: rq.go - -// Package cascadeadaptormocks is a generated GoMock package. -package cascadeadaptormocks - -import ( - context "context" - reflect "reflect" - - codec "github.com/LumeraProtocol/supernode/v2/pkg/codec" - adaptors "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - gomock "github.com/golang/mock/gomock" -) - -// MockCodecService is a mock of CodecService interface. -type MockCodecService struct { - ctrl *gomock.Controller - recorder *MockCodecServiceMockRecorder -} - -// MockCodecServiceMockRecorder is the mock recorder for MockCodecService. -type MockCodecServiceMockRecorder struct { - mock *MockCodecService -} - -// NewMockCodecService creates a new mock instance. -func NewMockCodecService(ctrl *gomock.Controller) *MockCodecService { - mock := &MockCodecService{ctrl: ctrl} - mock.recorder = &MockCodecServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockCodecService) EXPECT() *MockCodecServiceMockRecorder { - return m.recorder -} - -// Decode mocks base method. -func (m *MockCodecService) Decode(ctx context.Context, req adaptors.DecodeRequest) (adaptors.DecodeResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Decode", ctx, req) - ret0, _ := ret[0].(adaptors.DecodeResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Decode indicates an expected call of Decode. -func (mr *MockCodecServiceMockRecorder) Decode(ctx, req interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Decode", reflect.TypeOf((*MockCodecService)(nil).Decode), ctx, req) -} - -// EncodeInput mocks base method. -func (m *MockCodecService) EncodeInput(ctx context.Context, taskID, path string, dataSize int) (adaptors.EncodeResult, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "EncodeInput", ctx, taskID, path, dataSize) - ret0, _ := ret[0].(adaptors.EncodeResult) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// EncodeInput indicates an expected call of EncodeInput. -func (mr *MockCodecServiceMockRecorder) EncodeInput(ctx, taskID, path, dataSize interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EncodeInput", reflect.TypeOf((*MockCodecService)(nil).EncodeInput), ctx, taskID, path, dataSize) -} - -// PrepareDecode mocks base method. -func (m *MockCodecService) PrepareDecode(ctx context.Context, actionID string, layout codec.Layout) ([]string, func(int, string, []byte) (string, error), func() error, *codec.Workspace, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PrepareDecode", ctx, actionID, layout) - ret0, _ := ret[0].([]string) - ret1, _ := ret[1].(func(int, string, []byte) (string, error)) - ret2, _ := ret[2].(func() error) - ret3, _ := ret[3].(*codec.Workspace) - ret4, _ := ret[4].(error) - return ret0, ret1, ret2, ret3, ret4 -} - -// PrepareDecode indicates an expected call of PrepareDecode. -func (mr *MockCodecServiceMockRecorder) PrepareDecode(ctx, actionID, layout interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareDecode", reflect.TypeOf((*MockCodecService)(nil).PrepareDecode), ctx, actionID, layout) -} diff --git a/supernode/services/cascade/adaptors/rq.go b/supernode/services/cascade/adaptors/rq.go deleted file mode 100644 index 92e89819..00000000 --- a/supernode/services/cascade/adaptors/rq.go +++ /dev/null @@ -1,81 +0,0 @@ -package adaptors - -import ( - "context" - - "github.com/LumeraProtocol/supernode/v2/pkg/codec" -) - -// CodecService defines the interface for RaptorQ encoding of input data. -// -//go:generate mockgen -destination=mocks/rq_mock.go -package=cascadeadaptormocks -source=rq.go -type CodecService interface { - EncodeInput(ctx context.Context, taskID string, path string, dataSize int) (EncodeResult, error) - PrepareDecode(ctx context.Context, actionID string, layout codec.Layout) (blockPaths []string, Write func(block int, symbolID string, data []byte) (string, error), Cleanup func() error, ws *codec.Workspace, err error) - Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) -} - -// EncodeResult represents the outcome of encoding the input data. -type EncodeResult struct { - SymbolsDir string - Metadata codec.Layout -} - -// codecImpl is the default implementation using the real codec service. -type codecImpl struct { - codec codec.Codec -} - -// NewCodecService creates a new production instance of CodecService. -func NewCodecService(codec codec.Codec) CodecService { - return &codecImpl{codec: codec} -} - -// EncodeInput encodes the provided data and returns symbols and metadata. -func (c *codecImpl) EncodeInput(ctx context.Context, taskID string, path string, dataSize int) (EncodeResult, error) { - resp, err := c.codec.Encode(ctx, codec.EncodeRequest{ - TaskID: taskID, - Path: path, - DataSize: dataSize, - }) - if err != nil { - return EncodeResult{}, err - } - - return EncodeResult{ - SymbolsDir: resp.SymbolsDir, - Metadata: resp.Metadata, - }, nil -} - -type DecodeRequest struct { - Symbols map[string][]byte - Layout codec.Layout - ActionID string -} - -type DecodeResponse struct { - DecodeTmpDir string - FilePath string -} - -// Decode decodes the provided symbols and returns the original file -func (c *codecImpl) Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) { - resp, err := c.codec.Decode(ctx, codec.DecodeRequest{ - Symbols: req.Symbols, - Layout: req.Layout, - ActionID: req.ActionID, - }) - if err != nil { - return DecodeResponse{}, err - } - - return DecodeResponse{ - FilePath: resp.FilePath, - DecodeTmpDir: resp.DecodeTmpDir, - }, nil -} - -func (c *codecImpl) PrepareDecode(ctx context.Context, actionID string, layout codec.Layout) (blockPaths []string, Write func(block int, symbolID string, data []byte) (string, error), Cleanup func() error, ws *codec.Workspace, err error) { - return -} diff --git a/supernode/services/cascade/config.go b/supernode/services/cascade/config.go deleted file mode 100644 index 7a0f1ef2..00000000 --- a/supernode/services/cascade/config.go +++ /dev/null @@ -1,13 +0,0 @@ -package cascade - -import ( - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" -) - -// Config contains settings for the cascade service -type Config struct { - common.Config `mapstructure:",squash" json:"-"` - - RaptorQServiceAddress string `mapstructure:"-" json:"-"` - RqFilesDir string `mapstructure:"rq_files_dir" json:"rq_files_dir,omitempty"` -} diff --git a/supernode/services/cascade/download.go b/supernode/services/cascade/download.go deleted file mode 100644 index 363834bc..00000000 --- a/supernode/services/cascade/download.go +++ /dev/null @@ -1,308 +0,0 @@ -package cascade - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "os" - "sort" - "time" - - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/crypto" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - cm "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" -) - -const targetRequiredPercent = 17 - -type DownloadRequest struct { - ActionID string -} - -type DownloadResponse struct { - EventType SupernodeEventType - Message string - FilePath string - DownloadedDir string -} - -func (task *CascadeRegistrationTask) Download( - ctx context.Context, - req *DownloadRequest, - send func(resp *DownloadResponse) error, -) (err error) { - fields := logtrace.Fields{logtrace.FieldMethod: "Download", logtrace.FieldRequest: req} - logtrace.Info(ctx, "Cascade download request received", fields) - - // Ensure task status is finalized regardless of outcome - defer func() { - if err != nil { - task.UpdateStatus(common.StatusTaskCanceled) - } else { - task.UpdateStatus(common.StatusTaskCompleted) - } - task.Cancel() - }() - - actionDetails, err := task.LumeraClient.GetAction(ctx, req.ActionID) - if err != nil { - fields[logtrace.FieldError] = err - return task.wrapErr(ctx, "failed to get action", err, fields) - } - logtrace.Info(ctx, "Action retrieved", fields) - task.streamDownloadEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", "", send) - - if actionDetails.GetAction().State != actiontypes.ActionStateDone { - err = errors.New("action is not in a valid state") - fields[logtrace.FieldError] = "action state is not done yet" - fields[logtrace.FieldActionState] = actionDetails.GetAction().State - return task.wrapErr(ctx, "action not found", err, fields) - } - logtrace.Info(ctx, "Action state validated", fields) - - metadata, err := task.decodeCascadeMetadata(ctx, actionDetails.GetAction().Metadata, fields) - if err != nil { - fields[logtrace.FieldError] = err.Error() - return task.wrapErr(ctx, "error decoding cascade metadata", err, fields) - } - logtrace.Info(ctx, "Cascade metadata decoded", fields) - task.streamDownloadEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", "", send) - - // Notify: network retrieval phase begins - task.streamDownloadEvent(SupernodeEventTypeNetworkRetrieveStarted, "Network retrieval started", "", "", send) - - filePath, tmpDir, err := task.downloadArtifacts(ctx, actionDetails.GetAction().ActionID, metadata, fields, send) - if err != nil { - fields[logtrace.FieldError] = err.Error() - return task.wrapErr(ctx, "failed to download artifacts", err, fields) - } - logtrace.Info(ctx, "File reconstructed and hash verified", fields) - // Notify: decode completed, file ready on disk - task.streamDownloadEvent(SupernodeEventTypeDecodeCompleted, "Decode completed", filePath, tmpDir, send) - - return nil -} - -func (task *CascadeRegistrationTask) downloadArtifacts(ctx context.Context, actionID string, metadata actiontypes.CascadeMetadata, fields logtrace.Fields, send func(resp *DownloadResponse) error) (string, string, error) { - logtrace.Info(ctx, "started downloading the artifacts", fields) - - var ( - layout codec.Layout - layoutFetchMS int64 - layoutDecodeMS int64 - layoutAttempts int - ) - - for _, indexID := range metadata.RqIdsIds { - indexFile, err := task.P2PClient.Retrieve(ctx, indexID) - if err != nil || len(indexFile) == 0 { - continue - } - - // Parse index file to get layout IDs - indexData, err := task.parseIndexFile(indexFile) - if err != nil { - logtrace.Info(ctx, "failed to parse index file", fields) - continue - } - - // Try to retrieve layout files using layout IDs from index file - var netMS, decMS int64 - layout, netMS, decMS, layoutAttempts, err = task.retrieveLayoutFromIndex(ctx, indexData, fields) - if err != nil { - logtrace.Info(ctx, "failed to retrieve layout from index", fields) - continue - } - layoutFetchMS = netMS - layoutDecodeMS = decMS - - if len(layout.Blocks) > 0 { - logtrace.Info(ctx, "layout file retrieved via index", fields) - break - } - } - - if len(layout.Blocks) == 0 { - return "", "", errors.New("no symbols found in RQ metadata") - } - // Persist layout timing in fields for downstream metrics - fields["layout_fetch_ms"] = layoutFetchMS - fields["layout_decode_ms"] = layoutDecodeMS - fields["layout_attempts"] = layoutAttempts - return task.restoreFileFromLayout(ctx, layout, metadata.DataHash, actionID, send) -} - -func (task *CascadeRegistrationTask) restoreFileFromLayout( - ctx context.Context, - layout codec.Layout, - dataHash string, - actionID string, - send func(resp *DownloadResponse) error, -) (string, string, error) { - - fields := logtrace.Fields{ - logtrace.FieldActionID: actionID, - } - var allSymbols []string - for _, block := range layout.Blocks { - allSymbols = append(allSymbols, block.Symbols...) - } - sort.Strings(allSymbols) - - totalSymbols := len(allSymbols) - fields["totalSymbols"] = totalSymbols - // Compute target requirement (reporting only; does not change behavior) - targetRequiredCount := (totalSymbols*targetRequiredPercent + 99) / 100 - if targetRequiredCount < 1 && totalSymbols > 0 { - targetRequiredCount = 1 - } - logtrace.Info(ctx, "Retrieving target-required symbols for decode", fields) - - // Enable retrieve metrics capture for this action - cm.StartRetrieveCapture(actionID) - defer cm.StopRetrieveCapture(actionID) - - // Measure symbols batch retrieve duration - retrieveStart := time.Now() - // Tag context with metrics task ID (actionID) - ctxRetrieve := cm.WithTaskID(ctx, actionID) - // Retrieve only a fraction of symbols (targetRequiredCount) based on redundancy - // The DHT will short-circuit once it finds the required number across the provided keys - reqCount := targetRequiredCount - if reqCount > totalSymbols { - reqCount = totalSymbols - } - symbols, err := task.P2PClient.BatchRetrieve(ctxRetrieve, allSymbols, reqCount, actionID) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "batch retrieve failed", fields) - return "", "", fmt.Errorf("batch retrieve symbols: %w", err) - } - retrieveMS := time.Since(retrieveStart).Milliseconds() - - // Measure decode duration - decodeStart := time.Now() - decodeInfo, err := task.RQ.Decode(ctx, adaptors.DecodeRequest{ - ActionID: actionID, - Symbols: symbols, - Layout: layout, - }) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "decode failed", fields) - return "", "", fmt.Errorf("decode symbols using RaptorQ: %w", err) - } - decodeMS := time.Since(decodeStart).Milliseconds() - - // Set minimal retrieve summary and emit event strictly from internal collector - cm.SetRetrieveSummary(actionID, retrieveMS, decodeMS) - payload := cm.BuildDownloadEventPayloadFromCollector(actionID) - if retrieve, ok := payload["retrieve"].(map[string]any); ok { - retrieve["target_required_percent"] = targetRequiredPercent - retrieve["target_required_count"] = targetRequiredCount - retrieve["total_symbols"] = totalSymbols - } - if b, err := json.MarshalIndent(payload, "", " "); err == nil { - task.streamDownloadEvent(SupernodeEventTypeArtefactsDownloaded, string(b), "", "", send) - } - - fileHash, err := crypto.HashFileIncrementally(decodeInfo.FilePath, 0) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "failed to hash file", fields) - return "", "", fmt.Errorf("hash file: %w", err) - } - if fileHash == nil { - fields[logtrace.FieldError] = "file hash is nil" - logtrace.Error(ctx, "failed to hash file", fields) - return "", "", errors.New("file hash is nil") - } - - err = task.verifyDataHash(ctx, fileHash, dataHash, fields) - if err != nil { - logtrace.Error(ctx, "failed to verify hash", fields) - fields[logtrace.FieldError] = err.Error() - return "", decodeInfo.DecodeTmpDir, err - } - logtrace.Info(ctx, "File successfully restored and hash verified", fields) - - return decodeInfo.FilePath, decodeInfo.DecodeTmpDir, nil -} - -func (task *CascadeRegistrationTask) streamDownloadEvent(eventType SupernodeEventType, msg string, filePath string, tmpDir string, send func(resp *DownloadResponse) error) { - _ = send(&DownloadResponse{ - EventType: eventType, - Message: msg, - FilePath: filePath, - DownloadedDir: tmpDir, - }) -} - -// parseIndexFile parses compressed index file to extract IndexFile structure -func (task *CascadeRegistrationTask) parseIndexFile(data []byte) (IndexFile, error) { - decompressed, err := utils.ZstdDecompress(data) - if err != nil { - return IndexFile{}, errors.Errorf("decompress index file: %w", err) - } - - // Parse decompressed data: base64IndexFile.signature.counter - parts := bytes.Split(decompressed, []byte{SeparatorByte}) - if len(parts) < 2 { - return IndexFile{}, errors.New("invalid index file format") - } - - // Decode the base64 index file - return decodeIndexFile(string(parts[0])) -} - -// retrieveLayoutFromIndex retrieves layout file using layout IDs from index file -func (task *CascadeRegistrationTask) retrieveLayoutFromIndex(ctx context.Context, indexData IndexFile, fields logtrace.Fields) (codec.Layout, int64, int64, int, error) { - // Try to retrieve layout files using layout IDs from index file - var ( - totalFetchMS int64 - totalDecodeMS int64 - attempts int - ) - for _, layoutID := range indexData.LayoutIDs { - attempts++ - t0 := time.Now() - layoutFile, err := task.P2PClient.Retrieve(ctx, layoutID) - totalFetchMS += time.Since(t0).Milliseconds() - if err != nil || len(layoutFile) == 0 { - continue - } - - t1 := time.Now() - layout, _, _, err := parseRQMetadataFile(layoutFile) - totalDecodeMS += time.Since(t1).Milliseconds() - if err != nil { - continue - } - - if len(layout.Blocks) > 0 { - return layout, totalFetchMS, totalDecodeMS, attempts, nil - } - } - - return codec.Layout{}, totalFetchMS, totalDecodeMS, attempts, errors.New("no valid layout found in index") -} - -func (task *CascadeRegistrationTask) CleanupDownload(ctx context.Context, actionID string) error { - if actionID == "" { - return errors.New("actionID is empty") - } - - // For now, we use actionID as the directory path to maintain compatibility - if err := os.RemoveAll(actionID); err != nil { - return errors.Errorf("failed to delete download directory: %s, :%s", actionID, err.Error()) - } - - return nil -} diff --git a/supernode/services/cascade/events.go b/supernode/services/cascade/events.go deleted file mode 100644 index 0b25d3b8..00000000 --- a/supernode/services/cascade/events.go +++ /dev/null @@ -1,25 +0,0 @@ -package cascade - -type SupernodeEventType int - -const ( - SupernodeEventTypeUNKNOWN SupernodeEventType = 0 - SupernodeEventTypeActionRetrieved SupernodeEventType = 1 - SupernodeEventTypeActionFeeVerified SupernodeEventType = 2 - SupernodeEventTypeTopSupernodeCheckPassed SupernodeEventType = 3 - SupernodeEventTypeMetadataDecoded SupernodeEventType = 4 - SupernodeEventTypeDataHashVerified SupernodeEventType = 5 - SupernodeEventTypeInputEncoded SupernodeEventType = 6 - SupernodeEventTypeSignatureVerified SupernodeEventType = 7 - SupernodeEventTypeRQIDsGenerated SupernodeEventType = 8 - SupernodeEventTypeRqIDsVerified SupernodeEventType = 9 - SupernodeEventTypeFinalizeSimulated SupernodeEventType = 10 - SupernodeEventTypeArtefactsStored SupernodeEventType = 11 - SupernodeEventTypeActionFinalized SupernodeEventType = 12 - SupernodeEventTypeArtefactsDownloaded SupernodeEventType = 13 - SupernodeEventTypeFinalizeSimulationFailed SupernodeEventType = 14 - // Download phase markers - SupernodeEventTypeNetworkRetrieveStarted SupernodeEventType = 15 - SupernodeEventTypeDecodeCompleted SupernodeEventType = 16 - SupernodeEventTypeServeReady SupernodeEventType = 17 -) diff --git a/supernode/services/cascade/events_test.go b/supernode/services/cascade/events_test.go deleted file mode 100644 index ddf98871..00000000 --- a/supernode/services/cascade/events_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package cascade - -import ( - "testing" -) - -func TestSupernodeEventTypeValues(t *testing.T) { - tests := []struct { - name string - value SupernodeEventType - expected int - }{ - {"UNKNOWN", SupernodeEventTypeUNKNOWN, 0}, - {"ActionRetrieved", SupernodeEventTypeActionRetrieved, 1}, - {"ActionFeeVerified", SupernodeEventTypeActionFeeVerified, 2}, - {"TopSupernodeCheckPassed", SupernodeEventTypeTopSupernodeCheckPassed, 3}, - {"MetadataDecoded", SupernodeEventTypeMetadataDecoded, 4}, - {"DataHashVerified", SupernodeEventTypeDataHashVerified, 5}, - {"InputEncoded", SupernodeEventTypeInputEncoded, 6}, - {"SignatureVerified", SupernodeEventTypeSignatureVerified, 7}, - {"RQIDsGenerated", SupernodeEventTypeRQIDsGenerated, 8}, - {"RqIDsVerified", SupernodeEventTypeRqIDsVerified, 9}, - {"FinalizeSimulated", SupernodeEventTypeFinalizeSimulated, 10}, - {"ArtefactsStored", SupernodeEventTypeArtefactsStored, 11}, - {"ActionFinalized", SupernodeEventTypeActionFinalized, 12}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if int(tt.value) != tt.expected { - t.Errorf("Expected %s to be %d, got %d", tt.name, tt.expected, tt.value) - } - }) - } -} diff --git a/supernode/services/cascade/helper.go b/supernode/services/cascade/helper.go deleted file mode 100644 index fb8c7ef5..00000000 --- a/supernode/services/cascade/helper.go +++ /dev/null @@ -1,382 +0,0 @@ -package cascade - -import ( - "bytes" - "context" - "encoding/base64" - "fmt" - "strconv" - "strings" - - "cosmossdk.io/math" - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - cm "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/golang/protobuf/proto" - json "github.com/json-iterator/go" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// layout stats helpers removed to keep download metrics minimal. - -func (task *CascadeRegistrationTask) fetchAction(ctx context.Context, actionID string, f logtrace.Fields) (*actiontypes.Action, error) { - res, err := task.LumeraClient.GetAction(ctx, actionID) - if err != nil { - return nil, task.wrapErr(ctx, "failed to get action", err, f) - } - - if res.GetAction().ActionID == "" { - return nil, task.wrapErr(ctx, "action not found", errors.New(""), f) - } - logtrace.Info(ctx, "action has been retrieved", f) - - return res.GetAction(), nil -} - -func (task *CascadeRegistrationTask) ensureIsTopSupernode(ctx context.Context, blockHeight uint64, f logtrace.Fields) error { - top, err := task.LumeraClient.GetTopSupernodes(ctx, blockHeight) - if err != nil { - return task.wrapErr(ctx, "failed to get top SNs", err, f) - } - logtrace.Info(ctx, "Fetched Top Supernodes", f) - - if !supernode.Exists(top.Supernodes, task.config.SupernodeAccountAddress) { - // Build information about supernodes for better error context - addresses := make([]string, len(top.Supernodes)) - for i, sn := range top.Supernodes { - addresses[i] = sn.SupernodeAccount - } - logtrace.Info(ctx, "Supernode not in top list", logtrace.Fields{ - "currentAddress": task.config.SupernodeAccountAddress, - "topSupernodes": addresses, - }) - return task.wrapErr(ctx, "current supernode does not exist in the top SNs list", - errors.Errorf("current address: %s, top supernodes: %v", task.config.SupernodeAccountAddress, addresses), f) - } - - return nil -} - -func (task *CascadeRegistrationTask) decodeCascadeMetadata(ctx context.Context, raw []byte, f logtrace.Fields) (actiontypes.CascadeMetadata, error) { - var meta actiontypes.CascadeMetadata - if err := proto.Unmarshal(raw, &meta); err != nil { - return meta, task.wrapErr(ctx, "failed to unmarshal cascade metadata", err, f) - } - return meta, nil -} - -func (task *CascadeRegistrationTask) verifyDataHash(ctx context.Context, dh []byte, expected string, f logtrace.Fields) error { - b64 := utils.B64Encode(dh) - if string(b64) != expected { - return task.wrapErr(ctx, "data hash doesn't match", errors.New(""), f) - } - logtrace.Info(ctx, "request data-hash has been matched with the action data-hash", f) - - return nil -} - -func (task *CascadeRegistrationTask) encodeInput(ctx context.Context, actionID string, path string, dataSize int, f logtrace.Fields) (*adaptors.EncodeResult, error) { - resp, err := task.RQ.EncodeInput(ctx, actionID, path, dataSize) - if err != nil { - return nil, task.wrapErr(ctx, "failed to encode data", err, f) - } - return &resp, nil -} - -func (task *CascadeRegistrationTask) verifySignatureAndDecodeLayout(ctx context.Context, encoded string, creator string, - encodedMeta codec.Layout, f logtrace.Fields) (codec.Layout, string, error) { - - // Extract index file and creator signature from encoded data - // The signatures field contains: Base64(index_file).creators_signature - indexFileB64, creatorSig, err := extractIndexFileAndSignature(encoded) - if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to extract index file and creator signature", err, f) - } - - // Verify creator signature on index file - creatorSigBytes, err := base64.StdEncoding.DecodeString(creatorSig) - if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode creator signature from base64", err, f) - } - - if err := task.LumeraClient.Verify(ctx, creator, []byte(indexFileB64), creatorSigBytes); err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify creator signature", err, f) - } - logtrace.Info(ctx, "creator signature successfully verified", f) - - // Decode index file to get the layout signature - indexFile, err := decodeIndexFile(indexFileB64) - if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode index file", err, f) - } - - // Verify layout signature on the actual layout - layoutSigBytes, err := base64.StdEncoding.DecodeString(indexFile.LayoutSignature) - if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to decode layout signature from base64", err, f) - } - - layoutJSON, err := json.Marshal(encodedMeta) - if err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to marshal layout", err, f) - } - layoutB64 := utils.B64Encode(layoutJSON) - if err := task.LumeraClient.Verify(ctx, creator, layoutB64, layoutSigBytes); err != nil { - return codec.Layout{}, "", task.wrapErr(ctx, "failed to verify layout signature", err, f) - } - logtrace.Info(ctx, "layout signature successfully verified", f) - - return encodedMeta, indexFile.LayoutSignature, nil -} - -func (task *CascadeRegistrationTask) generateRQIDFiles(ctx context.Context, meta actiontypes.CascadeMetadata, - sig, creator string, encodedMeta codec.Layout, f logtrace.Fields) (GenRQIdentifiersFilesResponse, error) { - // The signatures field contains: Base64(index_file).creators_signature - // This full format will be used for ID generation to match chain expectations - - // Generate layout files - layoutRes, err := GenRQIdentifiersFiles(ctx, GenRQIdentifiersFilesRequest{ - Metadata: encodedMeta, - CreatorSNAddress: creator, - RqMax: uint32(meta.RqIdsMax), - Signature: sig, - IC: uint32(meta.RqIdsIc), - }) - if err != nil { - return GenRQIdentifiersFilesResponse{}, - task.wrapErr(ctx, "failed to generate layout files", err, f) - } - - // Generate index files using full signatures format for ID generation (matches chain expectation) - indexIDs, indexFiles, err := GenIndexFiles(ctx, layoutRes.RedundantMetadataFiles, sig, meta.Signatures, uint32(meta.RqIdsIc), uint32(meta.RqIdsMax)) - if err != nil { - return GenRQIdentifiersFilesResponse{}, - task.wrapErr(ctx, "failed to generate index files", err, f) - } - - // Store layout files and index files separately in P2P - allFiles := append(layoutRes.RedundantMetadataFiles, indexFiles...) - - // Return index IDs (sent to chain) and all files (stored in P2P) - return GenRQIdentifiersFilesResponse{ - RQIDs: indexIDs, - RedundantMetadataFiles: allFiles, - }, nil -} - -// storeArtefacts persists cascade artefacts (ID files + RaptorQ symbols) via the -// P2P adaptor. P2P does not return metrics; cascade summarizes and emits them. -func (task *CascadeRegistrationTask) storeArtefacts(ctx context.Context, actionID string, idFiles [][]byte, symbolsDir string, f logtrace.Fields) error { - return task.P2P.StoreArtefacts(ctx, adaptors.StoreArtefactsRequest{ - IDFiles: idFiles, - SymbolsDir: symbolsDir, - TaskID: task.ID(), - ActionID: actionID, - }, f) -} - -func (task *CascadeRegistrationTask) wrapErr(ctx context.Context, msg string, err error, f logtrace.Fields) error { - if err != nil { - f[logtrace.FieldError] = err.Error() - } - logtrace.Error(ctx, msg, f) - - // Preserve the root cause in the gRPC error description so callers receive full context. - if err != nil { - return status.Errorf(codes.Internal, "%s: %v", msg, err) - } - return status.Errorf(codes.Internal, "%s", msg) -} - -// emitArtefactsStored builds a single-line metrics summary and emits the -// SupernodeEventTypeArtefactsStored event while logging the metrics line. -func (task *CascadeRegistrationTask) emitArtefactsStored( - ctx context.Context, - fields logtrace.Fields, - _ codec.Layout, - send func(resp *RegisterResponse) error, -) { - if fields == nil { - fields = logtrace.Fields{} - } - - // Build payload strictly from internal collector (no P2P snapshots) - payload := cm.BuildStoreEventPayloadFromCollector(task.ID()) - - b, _ := json.MarshalIndent(payload, "", " ") - msg := string(b) - fields["metrics_json"] = msg - logtrace.Info(ctx, "artefacts have been stored", fields) - task.streamEvent(SupernodeEventTypeArtefactsStored, msg, "", send) - // No central state to clear; adaptor returns calls inline -} - -// extractSignatureAndFirstPart extracts the signature and first part from the encoded data -// data is expected to be in format: b64(JSON(Layout)).Signature -func extractSignatureAndFirstPart(data string) (encodedMetadata string, signature string, err error) { - parts := strings.Split(data, ".") - if len(parts) < 2 { - return "", "", errors.New("invalid data format") - } - - // The first part is the base64 encoded data - return parts[0], parts[1], nil -} - -func decodeMetadataFile(data string) (layout codec.Layout, err error) { - // Decode the base64 encoded data - decodedData, err := utils.B64Decode([]byte(data)) - if err != nil { - return layout, errors.Errorf("failed to decode data: %w", err) - } - - // Unmarshal the decoded data into a layout - if err := json.Unmarshal(decodedData, &layout); err != nil { - return layout, errors.Errorf("failed to unmarshal data: %w", err) - } - - return layout, nil -} - -func verifyIDs(ticketMetadata, metadata codec.Layout) error { - // Verify that the symbol identifiers match between versions - if err := utils.EqualStrList(ticketMetadata.Blocks[0].Symbols, metadata.Blocks[0].Symbols); err != nil { - return errors.Errorf("symbol identifiers don't match: %w", err) - } - - // Verify that the block hashes match - if ticketMetadata.Blocks[0].Hash != metadata.Blocks[0].Hash { - return errors.New("block hashes don't match") - } - - return nil -} - -// verifyActionFee checks if the action fee is sufficient for the given data size -// It fetches action parameters, calculates the required fee, and compares it with the action price -func (task *CascadeRegistrationTask) verifyActionFee(ctx context.Context, action *actiontypes.Action, dataSize int, fields logtrace.Fields) error { - dataSizeInKBs := dataSize / 1024 - fee, err := task.LumeraClient.GetActionFee(ctx, strconv.Itoa(dataSizeInKBs)) - if err != nil { - return task.wrapErr(ctx, "failed to get action fee", err, fields) - } - - // Parse fee amount from string to int64 - amount, err := strconv.ParseInt(fee.Amount, 10, 64) - if err != nil { - return task.wrapErr(ctx, "failed to parse fee amount", err, fields) - } - - // Calculate per-byte fee based on data size - requiredFee := sdk.NewCoin("ulume", math.NewInt(amount)) - - // Log the calculated fee - logtrace.Info(ctx, "calculated required fee", logtrace.Fields{ - "fee": requiredFee.String(), - "dataBytes": dataSize, - }) - // Check if action price is less than required fee - if action.Price.IsLT(requiredFee) { - return task.wrapErr( - ctx, - "insufficient fee", - fmt.Errorf("expected at least %s, got %s", requiredFee.String(), action.Price.String()), - fields, - ) - } - - return nil -} - -func parseRQMetadataFile(data []byte) (layout codec.Layout, signature string, counter string, err error) { - decompressed, err := utils.ZstdDecompress(data) - if err != nil { - return layout, "", "", errors.Errorf("decompress rq metadata file: %w", err) - } - - // base64EncodeMetadata.Signature.Counter - parts := bytes.Split(decompressed, []byte{SeparatorByte}) - if len(parts) != 3 { - return layout, "", "", errors.New("invalid rq metadata format: expecting 3 parts (layout, signature, counter)") - } - - layoutJson, err := utils.B64Decode(parts[0]) - if err != nil { - return layout, "", "", errors.Errorf("base64 decode failed: %w", err) - } - - if err := json.Unmarshal(layoutJson, &layout); err != nil { - return layout, "", "", errors.Errorf("unmarshal layout: %w", err) - } - - signature = string(parts[1]) - counter = string(parts[2]) - - return layout, signature, counter, nil -} - -// extractIndexFileAndSignature extracts index file and creator signature from signatures field -// data is expected to be in format: Base64(index_file).creators_signature -func extractIndexFileAndSignature(data string) (indexFileB64 string, creatorSignature string, err error) { - parts := strings.Split(data, ".") - if len(parts) < 2 { - return "", "", errors.New("invalid signatures format") - } - return parts[0], parts[1], nil -} - -// decodeIndexFile decodes base64 encoded index file -func decodeIndexFile(data string) (IndexFile, error) { - var indexFile IndexFile - decodedData, err := utils.B64Decode([]byte(data)) - if err != nil { - return indexFile, errors.Errorf("failed to decode index file: %w", err) - } - if err := json.Unmarshal(decodedData, &indexFile); err != nil { - return indexFile, errors.Errorf("failed to unmarshal index file: %w", err) - } - return indexFile, nil -} - -// VerifyDownloadSignature verifies the download signature for actionID.creatorAddress -func (task *CascadeRegistrationTask) VerifyDownloadSignature(ctx context.Context, actionID, signature string) error { - fields := logtrace.Fields{ - logtrace.FieldActionID: actionID, - logtrace.FieldMethod: "VerifyDownloadSignature", - } - - // Get action details to extract creator address - actionDetails, err := task.LumeraClient.GetAction(ctx, actionID) - if err != nil { - return task.wrapErr(ctx, "failed to get action", err, fields) - } - - creatorAddress := actionDetails.GetAction().Creator - fields["creator_address"] = creatorAddress - - // Create the expected signature data: actionID.creatorAddress - signatureData := fmt.Sprintf("%s.%s", actionID, creatorAddress) - fields["signature_data"] = signatureData - - // Decode the base64 signature - signatureBytes, err := base64.StdEncoding.DecodeString(signature) - if err != nil { - return task.wrapErr(ctx, "failed to decode signature from base64", err, fields) - } - - // Verify the signature using Lumera client - if err := task.LumeraClient.Verify(ctx, creatorAddress, []byte(signatureData), signatureBytes); err != nil { - return task.wrapErr(ctx, "failed to verify download signature", err, fields) - } - - logtrace.Info(ctx, "download signature successfully verified", fields) - return nil -} diff --git a/supernode/services/cascade/helper_test.go b/supernode/services/cascade/helper_test.go deleted file mode 100644 index b22f5436..00000000 --- a/supernode/services/cascade/helper_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package cascade - -import ( - "encoding/json" - "testing" - - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/stretchr/testify/assert" -) - -func Test_extractSignatureAndFirstPart(t *testing.T) { - tests := []struct { - name string - input string - expected string - sig string - hasErr bool - }{ - {"valid format", "data.sig", "data", "sig", false}, - {"no dot", "nodelimiter", "", "", true}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - data, sig, err := extractSignatureAndFirstPart(tt.input) - if tt.hasErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, tt.expected, data) - assert.Equal(t, tt.sig, sig) - } - }) - } -} - -func Test_decodeMetadataFile(t *testing.T) { - layout := codec.Layout{ - Blocks: []codec.Block{{BlockID: 1, Hash: "abc", Symbols: []string{"s"}}}, - } - jsonBytes, _ := json.Marshal(layout) - encoded := utils.B64Encode(jsonBytes) - - tests := []struct { - name string - input string - expectErr bool - wantHash string - }{ - {"valid base64+json", string(encoded), false, "abc"}, - {"invalid base64", "!@#$%", true, ""}, - {"bad json", string(utils.B64Encode([]byte("{broken"))), true, ""}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - out, err := decodeMetadataFile(tt.input) - if tt.expectErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, tt.wantHash, out.Blocks[0].Hash) - } - }) - } -} - -func Test_verifyIDs(t *testing.T) { - tests := []struct { - name string - ticket codec.Layout - metadata codec.Layout - expectErr string - }{ - { - name: "success match", - ticket: codec.Layout{Blocks: []codec.Block{ - {Symbols: []string{"A"}, Hash: "abc"}, - }}, - metadata: codec.Layout{Blocks: []codec.Block{ - {Symbols: []string{"A"}, Hash: "abc"}, - }}, - }, - { - name: "symbol mismatch", - ticket: codec.Layout{Blocks: []codec.Block{ - {Symbols: []string{"A"}}, - }}, - metadata: codec.Layout{Blocks: []codec.Block{ - {Symbols: []string{"B"}}, - }}, - expectErr: "symbol identifiers don't match", - }, - { - name: "hash mismatch", - ticket: codec.Layout{Blocks: []codec.Block{ - {Symbols: []string{"A"}, Hash: "a"}, - }}, - metadata: codec.Layout{Blocks: []codec.Block{ - {Symbols: []string{"A"}, Hash: "b"}, - }}, - expectErr: "block hashes don't match", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := verifyIDs(tt.ticket, tt.metadata) - if tt.expectErr != "" { - assert.ErrorContains(t, err, tt.expectErr) - } else { - assert.NoError(t, err) - } - }) - } -} diff --git a/supernode/services/cascade/metadata.go b/supernode/services/cascade/metadata.go deleted file mode 100644 index 5ae67c07..00000000 --- a/supernode/services/cascade/metadata.go +++ /dev/null @@ -1,127 +0,0 @@ -package cascade - -import ( - "context" - - "bytes" - - "strconv" - - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/cosmos/btcutil/base58" - json "github.com/json-iterator/go" -) - -const ( - SeparatorByte byte = 46 // separator in dd_and_fingerprints.signature i.e. '.' -) - -// IndexFile represents the structure of the index file -type IndexFile struct { - Version int `json:"version"` - LayoutIDs []string `json:"layout_ids"` - LayoutSignature string `json:"layout_signature"` -} - -type GenRQIdentifiersFilesRequest struct { - Metadata codec.Layout - RqMax uint32 - CreatorSNAddress string - Signature string - IC uint32 -} - -type GenRQIdentifiersFilesResponse struct { - // IDs of the Redundant Metadata Files -- len(RQIDs) == len(RedundantMetadataFiles) - RQIDs []string - // RedundantMetadataFiles is a list of redundant files that are generated from the Metadata file - RedundantMetadataFiles [][]byte -} - -// GenRQIdentifiersFiles generates Redundant Metadata Files and IDs -func GenRQIdentifiersFiles(ctx context.Context, req GenRQIdentifiersFilesRequest) (resp GenRQIdentifiersFilesResponse, err error) { - metadataFile, err := json.Marshal(req.Metadata) - if err != nil { - return resp, errors.Errorf("marshal rqID file: %w", err) - } - b64EncodedMetadataFile := utils.B64Encode(metadataFile) - - // Create the RQID file by combining the encoded file with the signature - var buffer bytes.Buffer - buffer.Write(b64EncodedMetadataFile) - buffer.WriteByte(SeparatorByte) - buffer.Write([]byte(req.Signature)) - encMetadataFileWithSignature := buffer.Bytes() - - // Generate the specified number of variant IDs - rqIdIds, rqIDsFiles, err := GetIDFiles(ctx, encMetadataFileWithSignature, req.IC, req.RqMax) - if err != nil { - return resp, errors.Errorf("get ID Files: %w", err) - } - - return GenRQIdentifiersFilesResponse{ - RedundantMetadataFiles: rqIDsFiles, - RQIDs: rqIdIds, - }, nil -} - -// GetIDFiles generates Redundant Files for dd_and_fingerprints files and rq_id files -// encMetadataFileWithSignature is b64 encoded layout file appended with signatures and compressed, ic is the initial counter -// and max is the number of ids to generate -func GetIDFiles(ctx context.Context, encMetadataFileWithSignature []byte, ic uint32, max uint32) (ids []string, files [][]byte, err error) { - idFiles := make([][]byte, 0, max) - ids = make([]string, 0, max) - var buffer bytes.Buffer - - for i := uint32(0); i < max; i++ { - buffer.Reset() - counter := ic + i - - buffer.Write(encMetadataFileWithSignature) - buffer.WriteByte(SeparatorByte) - buffer.WriteString(strconv.Itoa(int(counter))) // Using the string representation to maintain backward compatibility - - compressedData, err := utils.ZstdCompress(buffer.Bytes()) - if err != nil { - return ids, idFiles, errors.Errorf("compress identifiers file: %w", err) - } - - idFiles = append(idFiles, compressedData) - - hash, err := utils.Blake3Hash(compressedData) - if err != nil { - return ids, idFiles, errors.Errorf("sha3-256-hash error getting an id file: %w", err) - } - - ids = append(ids, base58.Encode(hash)) - } - - return ids, idFiles, nil -} - -// GenIndexFiles generates index files and their IDs from layout files using full signatures format -func GenIndexFiles(ctx context.Context, layoutFiles [][]byte, layoutSignature string, signaturesFormat string, ic uint32, max uint32) (indexIDs []string, indexFiles [][]byte, err error) { - // Create layout IDs from layout files - layoutIDs := make([]string, len(layoutFiles)) - for i, layoutFile := range layoutFiles { - hash, err := utils.Blake3Hash(layoutFile) - if err != nil { - return nil, nil, errors.Errorf("hash layout file: %w", err) - } - layoutIDs[i] = base58.Encode(hash) - } - - // Use the full signatures format that matches what was sent during RequestAction - // The chain expects this exact format for ID generation - indexFileWithSignatures := []byte(signaturesFormat) - - // Generate index file IDs using full signatures format - indexIDs, indexFiles, err = GetIDFiles(ctx, indexFileWithSignatures, ic, max) - if err != nil { - return nil, nil, errors.Errorf("get index ID files: %w", err) - } - - return indexIDs, indexFiles, nil -} diff --git a/supernode/services/cascade/metadata_test.go b/supernode/services/cascade/metadata_test.go deleted file mode 100644 index 48110d61..00000000 --- a/supernode/services/cascade/metadata_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package cascade - -import ( - "context" - "encoding/json" - "fmt" - "testing" - - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/cosmos/btcutil/base58" - "github.com/stretchr/testify/assert" -) - -func TestGenRQIdentifiersFiles(t *testing.T) { - tests := []struct { - name string - req GenRQIdentifiersFilesRequest - expectedCount int - }{ - { - name: "basic valid request", - req: GenRQIdentifiersFilesRequest{ - Metadata: codec.Layout{ - Blocks: []codec.Block{ - { - BlockID: 1, - EncoderParameters: []int{1, 2}, - OriginalOffset: 0, - Size: 10, - Symbols: []string{"s1", "s2"}, - Hash: "abcd1234", - }, - }, - }, - Signature: "sig", - RqMax: 2, - IC: 1, - }, - expectedCount: 2, - }, - { - name: "different IC value", - req: GenRQIdentifiersFilesRequest{ - Metadata: codec.Layout{ - Blocks: []codec.Block{ - { - BlockID: 5, - EncoderParameters: []int{9}, - OriginalOffset: 99, - Size: 42, - Symbols: []string{"x"}, - Hash: "z", - }, - }, - }, - Signature: "mysig", - RqMax: 1, - IC: 5, - }, - expectedCount: 1, - }, - } - - ctx := context.Background() - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - resp, err := GenRQIdentifiersFiles(ctx, tt.req) - assert.NoError(t, err) - assert.Len(t, resp.RQIDs, tt.expectedCount) - assert.Len(t, resp.RedundantMetadataFiles, tt.expectedCount) - - // independently compute expected response - metadataBytes, err := json.Marshal(tt.req.Metadata) - assert.NoError(t, err) - - base64Meta := utils.B64Encode(metadataBytes) - - for i := 0; i < tt.expectedCount; i++ { - composite := append(base64Meta, []byte(fmt.Sprintf(".%s.%d", tt.req.Signature, tt.req.IC+uint32(i)))...) - compressed, err := utils.ZstdCompress(composite) - assert.NoError(t, err) - - hash, err := utils.Blake3Hash(compressed) - assert.NoError(t, err) - - expectedRQID := base58.Encode(hash) - - assert.Equal(t, expectedRQID, resp.RQIDs[i]) - assert.Equal(t, compressed, resp.RedundantMetadataFiles[i]) - } - }) - } -} diff --git a/supernode/services/cascade/mocks/cascade_interfaces_mock.go b/supernode/services/cascade/mocks/cascade_interfaces_mock.go deleted file mode 100644 index 44d3189c..00000000 --- a/supernode/services/cascade/mocks/cascade_interfaces_mock.go +++ /dev/null @@ -1,115 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: interfaces.go - -// Package cascademocks is a generated GoMock package. -package cascademocks - -import ( - context "context" - reflect "reflect" - - cascade "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade" - gomock "github.com/golang/mock/gomock" -) - -// MockCascadeServiceFactory is a mock of CascadeServiceFactory interface. -type MockCascadeServiceFactory struct { - ctrl *gomock.Controller - recorder *MockCascadeServiceFactoryMockRecorder -} - -// MockCascadeServiceFactoryMockRecorder is the mock recorder for MockCascadeServiceFactory. -type MockCascadeServiceFactoryMockRecorder struct { - mock *MockCascadeServiceFactory -} - -// NewMockCascadeServiceFactory creates a new mock instance. -func NewMockCascadeServiceFactory(ctrl *gomock.Controller) *MockCascadeServiceFactory { - mock := &MockCascadeServiceFactory{ctrl: ctrl} - mock.recorder = &MockCascadeServiceFactoryMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockCascadeServiceFactory) EXPECT() *MockCascadeServiceFactoryMockRecorder { - return m.recorder -} - -// NewCascadeRegistrationTask mocks base method. -func (m *MockCascadeServiceFactory) NewCascadeRegistrationTask() cascade.CascadeTask { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewCascadeRegistrationTask") - ret0, _ := ret[0].(cascade.CascadeTask) - return ret0 -} - -// NewCascadeRegistrationTask indicates an expected call of NewCascadeRegistrationTask. -func (mr *MockCascadeServiceFactoryMockRecorder) NewCascadeRegistrationTask() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewCascadeRegistrationTask", reflect.TypeOf((*MockCascadeServiceFactory)(nil).NewCascadeRegistrationTask)) -} - -// MockCascadeTask is a mock of CascadeTask interface. -type MockCascadeTask struct { - ctrl *gomock.Controller - recorder *MockCascadeTaskMockRecorder -} - -// MockCascadeTaskMockRecorder is the mock recorder for MockCascadeTask. -type MockCascadeTaskMockRecorder struct { - mock *MockCascadeTask -} - -// NewMockCascadeTask creates a new mock instance. -func NewMockCascadeTask(ctrl *gomock.Controller) *MockCascadeTask { - mock := &MockCascadeTask{ctrl: ctrl} - mock.recorder = &MockCascadeTaskMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockCascadeTask) EXPECT() *MockCascadeTaskMockRecorder { - return m.recorder -} - -// CleanupDownload mocks base method. -func (m *MockCascadeTask) CleanupDownload(ctx context.Context, actionID string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CleanupDownload", ctx, actionID) - ret0, _ := ret[0].(error) - return ret0 -} - -// CleanupDownload indicates an expected call of CleanupDownload. -func (mr *MockCascadeTaskMockRecorder) CleanupDownload(ctx, actionID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupDownload", reflect.TypeOf((*MockCascadeTask)(nil).CleanupDownload), ctx, actionID) -} - -// Download mocks base method. -func (m *MockCascadeTask) Download(ctx context.Context, req *cascade.DownloadRequest, send func(*cascade.DownloadResponse) error) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Download", ctx, req, send) - ret0, _ := ret[0].(error) - return ret0 -} - -// Download indicates an expected call of Download. -func (mr *MockCascadeTaskMockRecorder) Download(ctx, req, send interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockCascadeTask)(nil).Download), ctx, req, send) -} - -// Register mocks base method. -func (m *MockCascadeTask) Register(ctx context.Context, req *cascade.RegisterRequest, send func(*cascade.RegisterResponse) error) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Register", ctx, req, send) - ret0, _ := ret[0].(error) - return ret0 -} - -// Register indicates an expected call of Register. -func (mr *MockCascadeTaskMockRecorder) Register(ctx, req, send interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Register", reflect.TypeOf((*MockCascadeTask)(nil).Register), ctx, req, send) -} diff --git a/supernode/services/cascade/register.go b/supernode/services/cascade/register.go deleted file mode 100644 index dd6e1e77..00000000 --- a/supernode/services/cascade/register.go +++ /dev/null @@ -1,180 +0,0 @@ -package cascade - -import ( - "context" - "os" - - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" -) - -// RegisterRequest contains parameters for upload request -type RegisterRequest struct { - TaskID string - ActionID string - DataHash []byte - DataSize int - FilePath string -} - -// RegisterResponse contains the result of upload -type RegisterResponse struct { - EventType SupernodeEventType - Message string - TxHash string -} - -// Register processes the upload request for cascade input data. -// 1- Fetch & validate action (it should be a cascade action registered on the chain) -// 2- Ensure this super-node is eligible to process the action (should be in the top supernodes list for the action block height) -// 3- Get the cascade metadata from the action: it contains the data hash and the signatures -// -// Assuming data hash is a base64 encoded string of blake3 hash of the data -// The signatures field is: b64(JSON(Layout)).Signature where Layout is codec.Layout -// The layout is a JSON object that contains the metadata of the data -// -// 4- Verify the data hash (the data hash should match the one in the action ticket) - again, hash function should be blake3 -// 5- Generate Symbols with codec (RQ-Go Library) (the data should be encoded using the codec) -// 6- Extract the layout and the signature from Step 3. Verify the signature using the creator's public key (creator address is in the action) -// 7- Generate RQ-ID files from the layout that we generated locally and then match those with the ones in the action -// 8- Verify the IDs in the layout and the metadata (the IDs should match the ones in the action) -// 9- Store the artefacts in P2P Storage (the redundant metadata files and the symbols from the symbols dir) -func (task *CascadeRegistrationTask) Register( - ctx context.Context, - req *RegisterRequest, - send func(resp *RegisterResponse) error, -) (err error) { - - fields := logtrace.Fields{logtrace.FieldMethod: "Register", logtrace.FieldRequest: req} - logtrace.Info(ctx, "Cascade registration request received", fields) - - // Ensure task status and resources are finalized regardless of outcome - defer func() { - if err != nil { - task.UpdateStatus(common.StatusTaskCanceled) - } else { - task.UpdateStatus(common.StatusTaskCompleted) - } - task.Cancel() - }() - - // Always attempt to remove the uploaded file path - defer func() { - if req != nil && req.FilePath != "" { - if remErr := os.RemoveAll(req.FilePath); remErr != nil { - logtrace.Warn(ctx, "Failed to remove uploaded file", fields) - } else { - logtrace.Info(ctx, "Uploaded file cleaned up", fields) - } - } - }() - - /* 1. Fetch & validate action -------------------------------------------------- */ - action, err := task.fetchAction(ctx, req.ActionID, fields) - if err != nil { - return err - } - fields[logtrace.FieldBlockHeight] = action.BlockHeight - fields[logtrace.FieldCreator] = action.Creator - fields[logtrace.FieldStatus] = action.State - fields[logtrace.FieldPrice] = action.Price - logtrace.Info(ctx, "Action retrieved", fields) - task.streamEvent(SupernodeEventTypeActionRetrieved, "Action retrieved", "", send) - - /* 2. Verify action fee -------------------------------------------------------- */ - if err := task.verifyActionFee(ctx, action, req.DataSize, fields); err != nil { - return err - } - logtrace.Info(ctx, "Action fee verified", fields) - task.streamEvent(SupernodeEventTypeActionFeeVerified, "Action fee verified", "", send) - - /* 3. Ensure this super-node is eligible -------------------------------------- */ - fields[logtrace.FieldSupernodeState] = task.config.SupernodeAccountAddress - if err := task.ensureIsTopSupernode(ctx, uint64(action.BlockHeight), fields); err != nil { - return err - } - logtrace.Info(ctx, "Top supernode eligibility confirmed", fields) - task.streamEvent(SupernodeEventTypeTopSupernodeCheckPassed, "Top supernode eligibility confirmed", "", send) - - /* 4. Decode cascade metadata -------------------------------------------------- */ - cascadeMeta, err := task.decodeCascadeMetadata(ctx, action.Metadata, fields) - if err != nil { - return err - } - logtrace.Info(ctx, "Cascade metadata decoded", fields) - task.streamEvent(SupernodeEventTypeMetadataDecoded, "Cascade metadata decoded", "", send) - - /* 5. Verify data hash --------------------------------------------------------- */ - if err := task.verifyDataHash(ctx, req.DataHash, cascadeMeta.DataHash, fields); err != nil { - return err - } - logtrace.Info(ctx, "Data hash verified", fields) - task.streamEvent(SupernodeEventTypeDataHashVerified, "Data hash verified", "", send) - - /* 6. Encode the raw data ------------------------------------------------------ */ - encResp, err := task.encodeInput(ctx, req.ActionID, req.FilePath, req.DataSize, fields) - if err != nil { - return err - } - logtrace.Info(ctx, "Input encoded", fields) - task.streamEvent(SupernodeEventTypeInputEncoded, "Input encoded", "", send) - - /* 7. Signature verification + layout decode ---------------------------------- */ - layout, signature, err := task.verifySignatureAndDecodeLayout( - ctx, cascadeMeta.Signatures, action.Creator, encResp.Metadata, fields, - ) - if err != nil { - return err - } - logtrace.Info(ctx, "Signature verified", fields) - task.streamEvent(SupernodeEventTypeSignatureVerified, "Signature verified", "", send) - - /* 8. Generate RQ-ID files ----------------------------------------------------- */ - rqidResp, err := task.generateRQIDFiles(ctx, cascadeMeta, signature, action.Creator, encResp.Metadata, fields) - if err != nil { - return err - } - logtrace.Info(ctx, "RQID files generated", fields) - task.streamEvent(SupernodeEventTypeRQIDsGenerated, "RQID files generated", "", send) - - /* 9. Consistency checks ------------------------------------------------------- */ - if err := verifyIDs(layout, encResp.Metadata); err != nil { - return task.wrapErr(ctx, "failed to verify IDs", err, fields) - } - logtrace.Info(ctx, "RQIDs verified", fields) - task.streamEvent(SupernodeEventTypeRqIDsVerified, "RQIDs verified", "", send) - - /* 10. Simulate finalize to avoid storing artefacts if it would fail ---------- */ - if _, err := task.LumeraClient.SimulateFinalizeAction(ctx, action.ActionID, rqidResp.RQIDs); err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Info(ctx, "Finalize simulation failed", fields) - // Emit explicit simulation failure event for client visibility - task.streamEvent(SupernodeEventTypeFinalizeSimulationFailed, "Finalize simulation failed", "", send) - return task.wrapErr(ctx, "finalize action simulation failed", err, fields) - } - logtrace.Info(ctx, "Finalize simulation passed", fields) - // Transmit as a standard event so SDK can propagate it (dedicated type) - task.streamEvent(SupernodeEventTypeFinalizeSimulated, "Finalize simulation passed", "", send) - - /* 11. Persist artefacts -------------------------------------------------------- */ - // Persist artefacts to the P2P network. P2P interfaces return error only; - // metrics are summarized at the cascade layer and emitted via event. - if err := task.storeArtefacts(ctx, action.ActionID, rqidResp.RedundantMetadataFiles, encResp.SymbolsDir, fields); err != nil { - return err - } - // Emit compact analytics payload from centralized metrics collector - task.emitArtefactsStored(ctx, fields, encResp.Metadata, send) - - resp, err := task.LumeraClient.FinalizeAction(ctx, action.ActionID, rqidResp.RQIDs) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Info(ctx, "Finalize action error", fields) - return task.wrapErr(ctx, "failed to finalize action", err, fields) - } - txHash := resp.TxResponse.TxHash - fields[logtrace.FieldTxHash] = txHash - logtrace.Info(ctx, "Action finalized", fields) - task.streamEvent(SupernodeEventTypeActionFinalized, "Action finalized", txHash, send) - - return nil -} diff --git a/supernode/services/cascade/register_test.go b/supernode/services/cascade/register_test.go deleted file mode 100644 index 6f56791a..00000000 --- a/supernode/services/cascade/register_test.go +++ /dev/null @@ -1,315 +0,0 @@ -package cascade_test - -import ( - "context" - "encoding/base64" - "encoding/hex" - "encoding/json" - "os" - "testing" - - sdkmath "cosmossdk.io/math" - actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" - sntypes "github.com/LumeraProtocol/lumera/x/supernode/v1/types" - codecpkg "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade" - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - cascadeadaptormocks "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors/mocks" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" - sdk "github.com/cosmos/cosmos-sdk/types" - sdktx "github.com/cosmos/cosmos-sdk/types/tx" - "github.com/cosmos/gogoproto/proto" - "lukechampine.com/blake3" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" -) - -func TestCascadeRegistrationTask_Register(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // Setup input file - tmpFile, err := os.CreateTemp("", "cascade-test-input") - assert.NoError(t, err) - - _, _ = tmpFile.WriteString("mock data") - - err = tmpFile.Close() // ✅ ensure it's flushed to disk - assert.NoError(t, err) - - rawHash, b64Hash := blake3HashRawAndBase64(t, tmpFile.Name()) - - tests := []struct { - name string - setupMocks func(lc *cascadeadaptormocks.MockLumeraClient, codec *cascadeadaptormocks.MockCodecService, p2p *cascadeadaptormocks.MockP2PService) - expectedError string - expectedEvents int - }{ - { - name: "happy path", - setupMocks: func(lc *cascadeadaptormocks.MockLumeraClient, codec *cascadeadaptormocks.MockCodecService, p2p *cascadeadaptormocks.MockP2PService) { - - lc.EXPECT(). - GetAction(gomock.Any(), "action123"). - Return(&actiontypes.QueryGetActionResponse{ - Action: &actiontypes.Action{ - ActionID: "action123", - Creator: "creator1", - BlockHeight: 100, - Metadata: encodedCascadeMetadata(b64Hash, t), - Price: &sdk.Coin{ - Denom: "ulume", - Amount: sdkmath.NewInt(1000), - }, - }, - }, nil) - - // 2. Top SNs - lc.EXPECT(). - GetTopSupernodes(gomock.Any(), uint64(100)). - Return(&sntypes.QueryGetTopSuperNodesForBlockResponse{ - Supernodes: []*sntypes.SuperNode{ - { - SupernodeAccount: "lumera1abcxyz", // must match task.config.SupernodeAccountAddress - }, - }, - }, nil) - - // 3. Signature verification - layout signature on layout file - // Expect two verification calls: creator signature and layout signature - lc.EXPECT(). - Verify(gomock.Any(), "creator1", gomock.Any(), gomock.Any()). - Return(nil). - Times(2) - - // 4. Simulate finalize should pass - lc.EXPECT(). - SimulateFinalizeAction(gomock.Any(), "action123", gomock.Any()). - Return(&sdktx.SimulateResponse{}, nil) - - // 5. Finalize - lc.EXPECT(). - FinalizeAction(gomock.Any(), "action123", gomock.Any()). - Return(&sdktx.BroadcastTxResponse{TxResponse: &sdk.TxResponse{TxHash: "tx123"}}, nil) - - // 6. Params (if used in fee check) - lc.EXPECT().GetActionFee(gomock.Any(), "10").Return(&actiontypes.QueryGetActionFeeResponse{Amount: "1000"}, nil) - - // 7. Encode input - codec.EXPECT(). - EncodeInput(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Return(adaptors.EncodeResult{ - SymbolsDir: "/tmp", - Metadata: codecpkg.Layout{Blocks: []codecpkg.Block{{BlockID: 1, Hash: "abc"}}}, - }, nil) - - // 8. Store artefacts (no metrics returned; recorded centrally) - p2p.EXPECT(). - StoreArtefacts(gomock.Any(), gomock.Any(), gomock.Any()). - Return(nil) - }, - expectedError: "", - expectedEvents: 12, - }, - { - name: "get-action fails", - setupMocks: func(lc *cascadeadaptormocks.MockLumeraClient, _ *cascadeadaptormocks.MockCodecService, _ *cascadeadaptormocks.MockP2PService) { - lc.EXPECT(). - GetAction(gomock.Any(), "action123"). - Return(nil, assert.AnError) - }, - expectedError: "assert.AnError general error", - expectedEvents: 0, - }, - { - name: "invalid data hash mismatch", - setupMocks: func(lc *cascadeadaptormocks.MockLumeraClient, codec *cascadeadaptormocks.MockCodecService, p2p *cascadeadaptormocks.MockP2PService) { - lc.EXPECT(). - GetAction(gomock.Any(), "action123"). - Return(&actiontypes.QueryGetActionResponse{ - Action: &actiontypes.Action{ - ActionID: "action123", - Creator: "creator1", - BlockHeight: 100, - Metadata: encodedCascadeMetadata("some-other-hash", t), // ⛔ incorrect hash - Price: &sdk.Coin{ - Denom: "ulume", - Amount: sdkmath.NewInt(1000), - }, - }, - }, nil) - - lc.EXPECT(). - GetTopSupernodes(gomock.Any(), uint64(100)). - Return(&sntypes.QueryGetTopSuperNodesForBlockResponse{ - Supernodes: []*sntypes.SuperNode{ - {SupernodeAccount: "lumera1abcxyz"}, - }, - }, nil) - - lc.EXPECT().GetActionFee(gomock.Any(), "10").Return(&actiontypes.QueryGetActionFeeResponse{Amount: "1000"}, nil) - }, - expectedError: "data hash doesn't match", - expectedEvents: 5, // up to metadata decoded - }, - { - name: "fee too low", - setupMocks: func(lc *cascadeadaptormocks.MockLumeraClient, codec *cascadeadaptormocks.MockCodecService, p2p *cascadeadaptormocks.MockP2PService) { - lc.EXPECT(). - GetAction(gomock.Any(), "action123"). - Return(&actiontypes.QueryGetActionResponse{ - Action: &actiontypes.Action{ - ActionID: "action123", - Creator: "creator1", - BlockHeight: 100, - Metadata: encodedCascadeMetadata(b64Hash, t), - Price: &sdk.Coin{ - Denom: "ulume", - Amount: sdkmath.NewInt(50), - }, - }, - }, nil) - - lc.EXPECT().GetActionFee(gomock.Any(), "10").Return(&actiontypes.QueryGetActionFeeResponse{Amount: "100"}, nil) - - }, - expectedError: "action fee is too low", - expectedEvents: 2, // until fee check - }, - { - name: "supernode not in top list", - setupMocks: func(lc *cascadeadaptormocks.MockLumeraClient, codec *cascadeadaptormocks.MockCodecService, p2p *cascadeadaptormocks.MockP2PService) { - lc.EXPECT(). - GetAction(gomock.Any(), "action123"). - Return(&actiontypes.QueryGetActionResponse{ - Action: &actiontypes.Action{ - ActionID: "action123", - Creator: "creator1", - BlockHeight: 100, - Metadata: encodedCascadeMetadata(b64Hash, t), - Price: &sdk.Coin{ - Denom: "ulume", - Amount: sdkmath.NewInt(1000), - }, - }, - }, nil) - - lc.EXPECT().GetActionFee(gomock.Any(), "10").Return(&actiontypes.QueryGetActionFeeResponse{Amount: "1000"}, nil) - - lc.EXPECT(). - GetTopSupernodes(gomock.Any(), uint64(100)). - Return(&sntypes.QueryGetTopSuperNodesForBlockResponse{ - Supernodes: []*sntypes.SuperNode{ - {SupernodeAccount: "other-supernode"}, - }, - }, nil) - }, - expectedError: "not eligible supernode", - expectedEvents: 2, // fails after fee verified - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - mockLumera := cascadeadaptormocks.NewMockLumeraClient(ctrl) - mockCodec := cascadeadaptormocks.NewMockCodecService(ctrl) - mockP2P := cascadeadaptormocks.NewMockP2PService(ctrl) - - tt.setupMocks(mockLumera, mockCodec, mockP2P) - - config := &cascade.Config{Config: common.Config{ - SupernodeAccountAddress: "lumera1abcxyz", - }, - } - - service := cascade.NewCascadeService( - config, - nil, nil, nil, nil, - ) - - service.LumeraClient = mockLumera - service.P2P = mockP2P - service.RQ = mockCodec - // Inject mocks for adaptors - task := cascade.NewCascadeRegistrationTask(service) - - req := &cascade.RegisterRequest{ - TaskID: "task1", - ActionID: "action123", - DataHash: rawHash, - DataSize: 10240, - FilePath: tmpFile.Name(), - } - - var events []cascade.RegisterResponse - err := task.Register(context.Background(), req, func(resp *cascade.RegisterResponse) error { - events = append(events, *resp) - return nil - }) - - if tt.expectedError != "" { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Len(t, events, tt.expectedEvents) - } - }) - } -} - -func encodedCascadeMetadata(hash string, t *testing.T) []byte { - t.Helper() - - // Fake layout signature for new index file format - fakeLayoutSig := base64.StdEncoding.EncodeToString([]byte("fakelayoutsignature")) - - // Create index file structure - indexFile := map[string]any{ - "layout_ids": []string{"layout_id_1", "layout_id_2"}, - "layout_signature": fakeLayoutSig, - } - indexFileJSON, _ := json.Marshal(indexFile) - fakeIndexFile := base64.StdEncoding.EncodeToString(indexFileJSON) - - // Fake creators signature - this is what the chain uses for index ID generation - fakeCreatorsSig := base64.StdEncoding.EncodeToString([]byte("fakecreatorssignature")) - - metadata := &actiontypes.CascadeMetadata{ - DataHash: hash, - FileName: "file.txt", - RqIdsIc: 2, - RqIdsMax: 4, - RqIdsIds: []string{"id1", "id2"}, - Signatures: fakeIndexFile + "." + fakeCreatorsSig, - } - - bytes, err := proto.Marshal(metadata) - if err != nil { - t.Fatalf("failed to marshal CascadeMetadata: %v", err) - } - - return bytes -} - -func blake3HashRawAndBase64(t *testing.T, path string) ([]byte, string) { - t.Helper() - - data, err := os.ReadFile(path) - if err != nil { - t.Fatal(err) - } - - hash := blake3.Sum256(data) - raw := hash[:] - b64 := base64.StdEncoding.EncodeToString(raw) - return raw, b64 -} - -func decodeHexOrDie(hexStr string) []byte { - bz, err := hex.DecodeString(hexStr) - if err != nil { - panic(err) - } - return bz -} diff --git a/supernode/services/cascade/service.go b/supernode/services/cascade/service.go deleted file mode 100644 index a1d9898b..00000000 --- a/supernode/services/cascade/service.go +++ /dev/null @@ -1,66 +0,0 @@ -package cascade - -import ( - "context" - - "github.com/LumeraProtocol/supernode/v2/p2p" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/base" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/supernode" -) - -type CascadeService struct { - *base.SuperNodeService - config *Config - - LumeraClient adaptors.LumeraClient - P2P adaptors.P2PService - RQ adaptors.CodecService -} - -// Compile-time checks to ensure CascadeService implements required interfaces -var _ supernode.TaskProvider = (*CascadeService)(nil) -var _ CascadeServiceFactory = (*CascadeService)(nil) - -// NewCascadeRegistrationTask creates a new task for cascade registration -func (service *CascadeService) NewCascadeRegistrationTask() CascadeTask { - task := NewCascadeRegistrationTask(service) - service.Worker.AddTask(task) - return task -} - -// Run starts the service -func (service *CascadeService) Run(ctx context.Context) error { - return service.RunHelper(ctx, service.config.SupernodeAccountAddress, logPrefix) -} - -// GetServiceName returns the name of the cascade service -func (service *CascadeService) GetServiceName() string { - return "cascade" -} - -// GetRunningTasks returns a list of currently running task IDs -func (service *CascadeService) GetRunningTasks() []string { - var taskIDs []string - for _, t := range service.Worker.Tasks() { - // Include only tasks that are not in a final state - if st := t.Status(); st != nil && st.SubStatus != nil && !st.SubStatus.IsFinal() { - taskIDs = append(taskIDs, t.ID()) - } - } - return taskIDs -} - -// NewCascadeService returns a new CascadeService instance -func NewCascadeService(config *Config, lumera lumera.Client, p2pClient p2p.Client, codec codec.Codec, rqstore rqstore.Store) *CascadeService { - return &CascadeService{ - config: config, - SuperNodeService: base.NewSuperNodeService(p2pClient), - LumeraClient: adaptors.NewLumeraClient(lumera), - P2P: adaptors.NewP2PService(p2pClient, rqstore), - RQ: adaptors.NewCodecService(codec), - } -} diff --git a/supernode/services/cascade/service_test.go b/supernode/services/cascade/service_test.go deleted file mode 100644 index bc2998ad..00000000 --- a/supernode/services/cascade/service_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package cascade_test - -import ( - "context" - "testing" - "time" - - "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade" - cascadeadaptormocks "github.com/LumeraProtocol/supernode/v2/supernode/services/cascade/adaptors/mocks" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" -) - -func TestNewCascadeService(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockLumera := cascadeadaptormocks.NewMockLumeraClient(ctrl) - mockP2P := cascadeadaptormocks.NewMockP2PService(ctrl) - mockCodec := cascadeadaptormocks.NewMockCodecService(ctrl) - - config := &cascade.Config{ - Config: common.Config{ - SupernodeAccountAddress: "lumera1abcxyz", - }, - } - - service := cascade.NewCascadeService(config, nil, nil, nil, nil) - service.LumeraClient = mockLumera - service.RQ = mockCodec - service.P2P = mockP2P - - assert.NotNil(t, service) - assert.NotNil(t, service.LumeraClient) - assert.NotNil(t, service.P2P) - assert.NotNil(t, service.RQ) -} - -func TestNewCascadeRegistrationTask(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockLumera := cascadeadaptormocks.NewMockLumeraClient(ctrl) - mockP2P := cascadeadaptormocks.NewMockP2PService(ctrl) - mockCodec := cascadeadaptormocks.NewMockCodecService(ctrl) - - config := &cascade.Config{ - Config: common.Config{ - SupernodeAccountAddress: "lumera1abcxyz", - }, - } - - service := cascade.NewCascadeService(config, nil, nil, nil, nil) - service.LumeraClient = mockLumera - service.RQ = mockCodec - service.P2P = mockP2P - - task := cascade.NewCascadeRegistrationTask(service) - assert.NotNil(t, task) - - go func() { - service.Worker.AddTask(task) - }() - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) - defer cancel() - - err := service.RunHelper(ctx, "node-id", "prefix") - assert.NoError(t, err) -} diff --git a/supernode/services/cascade/status.go b/supernode/services/cascade/status.go deleted file mode 100644 index b5633a45..00000000 --- a/supernode/services/cascade/status.go +++ /dev/null @@ -1,22 +0,0 @@ -package cascade - -import ( - "context" - - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/supernode" -) - -// StatusResponse represents the status response for cascade service -type StatusResponse = supernode.StatusResponse - -// GetStatus delegates to the common supernode status service -func (service *CascadeService) GetStatus(ctx context.Context) (StatusResponse, error) { - // Create a status service and register the cascade service as a task provider - // Pass nil for optional dependencies (P2P, lumera client, and config) - // as cascade service doesn't have access to them in this context - statusService := supernode.NewSupernodeStatusService(nil, nil, nil) - statusService.RegisterTaskProvider(service) - - // Get the status from the common service - return statusService.GetStatus(ctx, false) -} diff --git a/supernode/services/cascade/status_test.go b/supernode/services/cascade/status_test.go deleted file mode 100644 index d85f9f8f..00000000 --- a/supernode/services/cascade/status_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package cascade - -import ( - "context" - "testing" - - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/base" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/supernode" - "github.com/stretchr/testify/assert" -) - -func TestGetStatus(t *testing.T) { - ctx := context.Background() - - tests := []struct { - name string - taskCount int - expectErr bool - expectTasks int - }{ - { - name: "no tasks", - taskCount: 0, - expectErr: false, - expectTasks: 0, - }, - { - name: "one task", - taskCount: 1, - expectErr: false, - expectTasks: 1, - }, - { - name: "multiple tasks", - taskCount: 3, - expectErr: false, - expectTasks: 3, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Setup service and worker - service := &CascadeService{ - SuperNodeService: base.NewSuperNodeService(nil), - } - - go func() { - service.RunHelper(ctx, "node-id", "prefix") - }() - - // Register tasks - for i := 0; i < tt.taskCount; i++ { - task := NewCascadeRegistrationTask(service) - service.Worker.AddTask(task) - } - - // Call GetStatus from service - resp, err := service.GetStatus(ctx) - if tt.expectErr { - assert.Error(t, err) - return - } - - assert.NoError(t, err) - - // Version check - assert.NotEmpty(t, resp.Version) - - // Uptime check - assert.True(t, resp.UptimeSeconds >= 0) - - // CPU checks - assert.True(t, resp.Resources.CPU.UsagePercent >= 0) - assert.True(t, resp.Resources.CPU.UsagePercent <= 100) - assert.True(t, resp.Resources.CPU.Cores >= 0) - - // Memory checks (now in GB) - assert.True(t, resp.Resources.Memory.TotalGB > 0) - assert.True(t, resp.Resources.Memory.UsedGB <= resp.Resources.Memory.TotalGB) - assert.True(t, resp.Resources.Memory.UsagePercent >= 0 && resp.Resources.Memory.UsagePercent <= 100) - - // Hardware summary check - if resp.Resources.CPU.Cores > 0 && resp.Resources.Memory.TotalGB > 0 { - assert.NotEmpty(t, resp.Resources.HardwareSummary) - } - - // Storage checks - should have default root filesystem - assert.NotEmpty(t, resp.Resources.Storage) - assert.Equal(t, "/", resp.Resources.Storage[0].Path) - - // Registered services check - assert.Contains(t, resp.RegisteredServices, "cascade") - - // Check new fields have default values (since service doesn't have access to P2P/lumera/config) - assert.Equal(t, int32(0), resp.Network.PeersCount) - assert.Empty(t, resp.Network.PeerAddresses) - assert.Equal(t, int32(0), resp.Rank) - assert.Empty(t, resp.IPAddress) - - // Task count check - look for cascade service in the running tasks list - var cascadeService *supernode.ServiceTasks - for _, service := range resp.RunningTasks { - if service.ServiceName == "cascade" { - cascadeService = &service - break - } - } - - if tt.expectTasks > 0 { - assert.NotNil(t, cascadeService, "cascade service should be present") - assert.Equal(t, tt.expectTasks, int(cascadeService.TaskCount)) - assert.Equal(t, tt.expectTasks, len(cascadeService.TaskIDs)) - } else { - // If no tasks expected, either no cascade service or empty task count - if cascadeService != nil { - assert.Equal(t, 0, int(cascadeService.TaskCount)) - } - } - }) - } -} diff --git a/supernode/services/cascade/task.go b/supernode/services/cascade/task.go deleted file mode 100644 index 5dcffa34..00000000 --- a/supernode/services/cascade/task.go +++ /dev/null @@ -1,58 +0,0 @@ -package cascade - -import ( - "context" - - "github.com/LumeraProtocol/supernode/v2/pkg/storage/files" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/base" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common/storage" -) - -// CascadeRegistrationTask is the task for cascade registration -type CascadeRegistrationTask struct { - *CascadeService - - *base.SuperNodeTask - storage *storage.StorageHandler - - Asset *files.File - dataHash string - creatorSignature []byte -} - -const ( - logPrefix = "cascade" -) - -// Compile-time check to ensure CascadeRegistrationTask implements CascadeTask interface -var _ CascadeTask = (*CascadeRegistrationTask)(nil) - -// Run starts the task -func (task *CascadeRegistrationTask) Run(ctx context.Context) error { - return task.RunHelper(ctx, task.removeArtifacts) -} - -// removeArtifacts cleans up any files created during processing -func (task *CascadeRegistrationTask) removeArtifacts() { - task.RemoveFile(task.Asset) -} - -// NewCascadeRegistrationTask returns a new Task instance -func NewCascadeRegistrationTask(service *CascadeService) *CascadeRegistrationTask { - task := &CascadeRegistrationTask{ - SuperNodeTask: base.NewSuperNodeTask(logPrefix), - CascadeService: service, - } - - return task -} - -func (task *CascadeRegistrationTask) streamEvent(eventType SupernodeEventType, msg, txHash string, send func(resp *RegisterResponse) error) { - _ = send(&RegisterResponse{ - EventType: eventType, - Message: msg, - TxHash: txHash, - }) - - return -} diff --git a/supernode/services/common/base/supernode_service.go b/supernode/services/common/base/supernode_service.go deleted file mode 100644 index 1d41715b..00000000 --- a/supernode/services/common/base/supernode_service.go +++ /dev/null @@ -1,70 +0,0 @@ -package base - -import ( - "context" - "time" - - "github.com/LumeraProtocol/supernode/v2/p2p" - "github.com/LumeraProtocol/supernode/v2/pkg/common/task" - "github.com/LumeraProtocol/supernode/v2/pkg/errgroup" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" -) - -// SuperNodeServiceInterface common interface for Services -type SuperNodeServiceInterface interface { - RunHelper(ctx context.Context) error - NewTask() task.Task - Task(id string) task.Task -} - -// SuperNodeService common "class" for Services -type SuperNodeService struct { - *task.Worker - P2PClient p2p.Client -} - -// run starts task -func (service *SuperNodeService) run(ctx context.Context, nodeID string, prefix string) error { - ctx = logtrace.CtxWithCorrelationID(ctx, prefix) - - if nodeID == "" { - return errors.New("PastelID is not specified in the config file") - } - - group, ctx := errgroup.WithContext(ctx) - group.Go(func() error { - return service.Worker.Run(ctx) - }) - - return group.Wait() -} - -// RunHelper common code for Service runner -func (service *SuperNodeService) RunHelper(ctx context.Context, nodeID string, prefix string) error { - for { - select { - case <-ctx.Done(): - logtrace.Error(ctx, "context done - closing sn services", logtrace.Fields{logtrace.FieldModule: "supernode"}) - return nil - case <-time.After(5 * time.Second): - if err := service.run(ctx, nodeID, prefix); err != nil { - service.Worker = task.NewWorker() - logtrace.Error(ctx, "Service run failed, retrying", logtrace.Fields{logtrace.FieldModule: "supernode", logtrace.FieldError: err.Error()}) - } else { - logtrace.Info(ctx, "Service run completed successfully - closing sn services", logtrace.Fields{logtrace.FieldModule: "supernode"}) - return nil - } - } - } -} - -// NewSuperNodeService creates SuperNodeService -func NewSuperNodeService( - p2pClient p2p.Client, -) *SuperNodeService { - return &SuperNodeService{ - Worker: task.NewWorker(), - P2PClient: p2pClient, - } -} diff --git a/supernode/services/common/base/supernode_task.go b/supernode/services/common/base/supernode_task.go deleted file mode 100644 index 937e6013..00000000 --- a/supernode/services/common/base/supernode_task.go +++ /dev/null @@ -1,71 +0,0 @@ -package base - -import ( - "context" - "fmt" - - "github.com/LumeraProtocol/supernode/v2/pkg/common/task" - "github.com/LumeraProtocol/supernode/v2/pkg/common/task/state" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/files" - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" -) - -// TaskCleanerFunc pointer to func that removes artefacts -type TaskCleanerFunc func() - -// SuperNodeTask base "class" for Task -type SuperNodeTask struct { - task.Task - - LogPrefix string -} - -// RunHelper common code for Task runner -func (task *SuperNodeTask) RunHelper(ctx context.Context, clean TaskCleanerFunc) error { - ctx = task.context(ctx) - logtrace.Debug(ctx, "Start task", logtrace.Fields{}) - defer logtrace.Info(ctx, "Task canceled", logtrace.Fields{}) - defer task.Cancel() - - task.SetStatusNotifyFunc(func(status *state.Status) { - logtrace.Debug(ctx, "States updated", logtrace.Fields{"status": status.String()}) - }) - - defer clean() - - err := task.RunAction(ctx) - - // Update task status based on completion result - if err != nil { - task.UpdateStatus(common.StatusTaskCanceled) - } else { - task.UpdateStatus(common.StatusTaskCompleted) - } - - return err -} - -func (task *SuperNodeTask) context(ctx context.Context) context.Context { - return logtrace.CtxWithCorrelationID(ctx, fmt.Sprintf("%s-%s", task.LogPrefix, task.ID())) -} - -// RemoveFile removes file from FS (TODO: move to gonode.common) -func (task *SuperNodeTask) RemoveFile(file *files.File) { - if file != nil { - logtrace.Debug(context.Background(), "remove file", logtrace.Fields{"filename": file.Name()}) - if err := file.Remove(); err != nil { - logtrace.Debug(context.Background(), "remove file failed", logtrace.Fields{logtrace.FieldError: err.Error()}) - } - } -} - -// NewSuperNodeTask returns a new Task instance. -func NewSuperNodeTask(logPrefix string) *SuperNodeTask { - snt := &SuperNodeTask{ - Task: task.New(common.StatusTaskStarted), - LogPrefix: logPrefix, - } - - return snt -} diff --git a/supernode/services/common/base/supernode_task_test.go b/supernode/services/common/base/supernode_task_test.go deleted file mode 100644 index 9e108f59..00000000 --- a/supernode/services/common/base/supernode_task_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package base - -import ( - "context" - "fmt" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func TestNewSuperNodeTask(t *testing.T) { - task := NewSuperNodeTask("testprefix") - assert.NotNil(t, task) - assert.Equal(t, "testprefix", task.LogPrefix) -} - -func TestSuperNodeTask_RunHelper(t *testing.T) { - called := false - cleaner := func() { - called = true - } - - snt := NewSuperNodeTask("log") - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Run the helper in a goroutine - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - err := snt.RunHelper(ctx, cleaner) - assert.NoError(t, err) - }() - - // Give the RunHelper some time to start and block on actionCh - time.Sleep(10 * time.Millisecond) - - // Submit dummy action to allow RunAction to proceed - done := snt.NewAction(func(ctx context.Context) error { - return nil - }) - - <-done // wait for action to complete - - snt.CloseActionCh() // close to allow RunAction to return - wg.Wait() // wait for RunHelper to exit - - assert.True(t, called) -} - -func TestSuperNodeTask_RunHelper_WithError(t *testing.T) { - snt := NewSuperNodeTask("log") - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - var wg sync.WaitGroup - wg.Add(1) - - var runErr error - go func() { - defer wg.Done() - runErr = snt.RunHelper(ctx, func() {}) - }() - - // Give RunHelper time to start - time.Sleep(10 * time.Millisecond) - - done := snt.NewAction(func(ctx context.Context) error { - return fmt.Errorf("fail") - }) - - <-done // wait for the action to complete - snt.CloseActionCh() // allow RunAction to exit - wg.Wait() // wait for RunHelper to return - - assert.EqualError(t, runErr, "fail") -} diff --git a/supernode/services/common/config.go b/supernode/services/common/config.go deleted file mode 100644 index 684d1fd1..00000000 --- a/supernode/services/common/config.go +++ /dev/null @@ -1,19 +0,0 @@ -package common - -const ( - defaultNumberSuperNodes = 10 -) - -// Config contains common configuration of the services. -type Config struct { - SupernodeAccountAddress string - SupernodeIPAddress string - NumberSuperNodes int -} - -// NewConfig returns a new Config instance -func NewConfig() *Config { - return &Config{ - NumberSuperNodes: defaultNumberSuperNodes, - } -} diff --git a/supernode/services/common/storage/handler.go b/supernode/services/common/storage/handler.go deleted file mode 100644 index 210dab0f..00000000 --- a/supernode/services/common/storage/handler.go +++ /dev/null @@ -1,185 +0,0 @@ -package storage - -import ( - "context" - "fmt" - "io/fs" - "math" - "math/rand/v2" - "path/filepath" - "sort" - "strings" - "time" - - "github.com/LumeraProtocol/supernode/v2/p2p" - "github.com/LumeraProtocol/supernode/v2/pkg/errors" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/p2pmetrics" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/files" - "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" -) - -const ( - loadSymbolsBatchSize = 2500 - storeSymbolsPercent = 10 - concurrency = 1 - - UnknownDataType = iota // 1 - P2PDataRaptorQSymbol // 1 - P2PDataCascadeMetadata // 2 -) - -// StorageHandler provides common logic for RQ and P2P operations -type StorageHandler struct { - P2PClient p2p.Client - rqDir string - - TaskID string - TxID string - - store rqstore.Store - semaphore chan struct{} -} - -// NewStorageHandler creates instance of StorageHandler -func NewStorageHandler(p2p p2p.Client, rqDir string, store rqstore.Store) *StorageHandler { - return &StorageHandler{ - P2PClient: p2p, - rqDir: rqDir, - store: store, - semaphore: make(chan struct{}, concurrency), - } -} - -// StoreFileIntoP2P stores file into P2P -func (h *StorageHandler) StoreFileIntoP2P(ctx context.Context, file *files.File, typ int) (string, error) { - data, err := file.Bytes() - if err != nil { - return "", errors.Errorf("store file %s into p2p", file.Name()) - } - return h.StoreBytesIntoP2P(ctx, data, typ) -} - -// StoreBytesIntoP2P into P2P actual data -func (h *StorageHandler) StoreBytesIntoP2P(ctx context.Context, data []byte, typ int) (string, error) { - return h.P2PClient.Store(ctx, data, typ) -} - -// StoreBatch stores into P2P an array of byte slices. -func (h *StorageHandler) StoreBatch(ctx context.Context, list [][]byte, typ int) error { - val := ctx.Value(logtrace.CorrelationIDKey) - taskID := "" - if val != nil { - taskID = fmt.Sprintf("%v", val) - } - - logtrace.Info(ctx, "task_id in storeList", logtrace.Fields{logtrace.FieldTaskID: taskID}) - // Add taskID to context for metrics - ctx = p2pmetrics.WithTaskID(ctx, taskID) - return h.P2PClient.StoreBatch(ctx, list, typ, taskID) -} - -// StoreRaptorQSymbolsIntoP2P stores RaptorQ symbols into P2P -// It first records the directory in the database, then gathers all symbol paths -// under the specified directory. If the number of keys exceeds a certain threshold, -// it randomly samples a percentage of them. Finally, it streams the symbols in -// fixed-size batches to the P2P network. -// -// Note: P2P client returns (ratePct, requests, err) for each batch; we ignore -// the metrics here and only validate error semantics. -func (h *StorageHandler) StoreRaptorQSymbolsIntoP2P(ctx context.Context, taskID, symbolsDir string) error { - /* record directory in DB */ - if err := h.store.StoreSymbolDirectory(taskID, symbolsDir); err != nil { - return fmt.Errorf("store symbol dir: %w", err) - } - - /* gather every symbol path under symbolsDir ------------------------- */ - keys, err := walkSymbolTree(symbolsDir) - if err != nil { - return err - } - - /* down-sample if we exceed the "big directory" threshold ------------- */ - if len(keys) > loadSymbolsBatchSize { - want := int(math.Ceil(float64(len(keys)) * storeSymbolsPercent / 100)) - if want < len(keys) { - rand.Shuffle(len(keys), func(i, j int) { keys[i], keys[j] = keys[j], keys[i] }) - keys = keys[:want] - } - sort.Strings(keys) // deterministic order inside the sample - } - - logtrace.Info(ctx, "storing RaptorQ symbols", logtrace.Fields{"count": len(keys)}) - - /* stream in fixed-size batches -------------------------------------- */ - for start := 0; start < len(keys); { - end := start + loadSymbolsBatchSize - if end > len(keys) { - end = len(keys) - } - if err := h.storeSymbolsInP2P(ctx, taskID, symbolsDir, keys[start:end]); err != nil { - return err - } - start = end - } - - if err := h.store.UpdateIsFirstBatchStored(h.TxID); err != nil { - return fmt.Errorf("update first-batch flag: %w", err) - } - - logtrace.Info(ctx, "finished storing RaptorQ symbols", logtrace.Fields{"curr-time": time.Now().UTC(), "count": len(keys)}) - - return nil -} - -func walkSymbolTree(root string) ([]string, error) { - var keys []string - err := filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error { - if err != nil { - return err // propagate I/O errors - } - if d.IsDir() { - return nil // skip directory nodes - } - // ignore layout json if present - if strings.EqualFold(filepath.Ext(d.Name()), ".json") { - return nil - } - rel, err := filepath.Rel(root, path) - if err != nil { - return err - } - keys = append(keys, rel) // store as "block_0/filename" - return nil - }) - if err != nil { - return nil, fmt.Errorf("walk symbol tree: %w", err) - } - return keys, nil -} - -func (h *StorageHandler) storeSymbolsInP2P(ctx context.Context, taskID, root string, fileKeys []string) error { - logtrace.Info(ctx, "loading batch symbols", logtrace.Fields{"count": len(fileKeys)}) - - symbols, err := utils.LoadSymbols(root, fileKeys) - if err != nil { - return fmt.Errorf("load symbols: %w", err) - } - - // Add taskID to context for metrics - ctx = p2pmetrics.WithTaskID(ctx, taskID) - if err := h.P2PClient.StoreBatch(ctx, symbols, P2PDataRaptorQSymbol, taskID); err != nil { - return fmt.Errorf("p2p store batch: %w", err) - } - - logtrace.Info(ctx, "stored batch symbols", logtrace.Fields{"count": len(symbols)}) - - if err := utils.DeleteSymbols(ctx, root, fileKeys); err != nil { - return fmt.Errorf("delete symbols: %w", err) - } - - logtrace.Info(ctx, "deleted batch symbols", logtrace.Fields{"count": len(symbols)}) - - return nil -} diff --git a/supernode/services/common/storage/handler_test.go b/supernode/services/common/storage/handler_test.go deleted file mode 100644 index fd4e0d8e..00000000 --- a/supernode/services/common/storage/handler_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package storage - -import ( - "context" - "testing" - - "github.com/LumeraProtocol/supernode/v2/p2p/mocks" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" -) - -// --- Mocks --- - -type mockP2PClient struct { - mocks.Client -} - -type mockStore struct { - mock.Mock -} - -func (m *mockStore) StoreSymbolDirectory(taskID, dir string) error { - args := m.Called(taskID, dir) - return args.Error(0) -} - -func (m *mockStore) UpdateIsFirstBatchStored(txID string) error { - args := m.Called(txID) - return args.Error(0) -} - -func TestStoreBytesIntoP2P(t *testing.T) { - p2pClient := new(mockP2PClient) - handler := NewStorageHandler(p2pClient, "", nil) - - data := []byte("hello") - p2pClient.On("Store", mock.Anything, data, 1).Return("some-id", nil) - - id, err := handler.StoreBytesIntoP2P(context.Background(), data, 1) - assert.NoError(t, err) - assert.Equal(t, "some-id", id) - p2pClient.AssertExpectations(t) -} - -func TestStoreBatch(t *testing.T) { - p2pClient := new(mockP2PClient) - handler := NewStorageHandler(p2pClient, "", nil) - - ctx := context.WithValue(context.Background(), "task_id", "123") - list := [][]byte{[]byte("a"), []byte("b")} - // StoreBatch now returns error only - p2pClient.On("StoreBatch", mock.Anything, list, 3, "").Return(nil) - - err := handler.StoreBatch(ctx, list, 3) - assert.NoError(t, err) -} diff --git a/supernode/services/common/supernode/service.go b/supernode/services/common/supernode/service.go deleted file mode 100644 index 13d5efe4..00000000 --- a/supernode/services/common/supernode/service.go +++ /dev/null @@ -1,391 +0,0 @@ -package supernode - -import ( - "context" - "fmt" - "time" - - "github.com/LumeraProtocol/supernode/v2/p2p" - "github.com/LumeraProtocol/supernode/v2/p2p/kademlia" - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - "github.com/LumeraProtocol/supernode/v2/pkg/utils" - "github.com/LumeraProtocol/supernode/v2/supernode/config" -) - -// Version is the supernode version, set by the main application -var Version = "dev" - -// SupernodeStatusService provides centralized status information -// by collecting system metrics and aggregating task information from registered services -type SupernodeStatusService struct { - taskProviders []TaskProvider // List of registered services that provide task information - metrics *MetricsCollector // System metrics collector for CPU and memory stats - storagePaths []string // Paths to monitor for storage metrics - startTime time.Time // Service start time for uptime calculation - p2pService p2p.Client // P2P service for network information - lumeraClient lumera.Client // Lumera client for blockchain queries - config *config.Config // Supernode configuration -} - -// NewSupernodeStatusService creates a new supernode status service instance -func NewSupernodeStatusService(p2pService p2p.Client, lumeraClient lumera.Client, cfg *config.Config) *SupernodeStatusService { - return &SupernodeStatusService{ - taskProviders: make([]TaskProvider, 0), - metrics: NewMetricsCollector(), - storagePaths: []string{"/"}, // Default to monitoring root filesystem - startTime: time.Now(), - p2pService: p2pService, - lumeraClient: lumeraClient, - config: cfg, - } -} - -// RegisterTaskProvider registers a service as a task provider -// This allows the service to report its running tasks in status responses -func (s *SupernodeStatusService) RegisterTaskProvider(provider TaskProvider) { - s.taskProviders = append(s.taskProviders, provider) -} - -// GetStatus returns the current system status including all registered services -// This method collects CPU metrics, memory usage, and task information from all providers -func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetrics bool) (StatusResponse, error) { - fields := logtrace.Fields{ - logtrace.FieldMethod: "GetStatus", - logtrace.FieldModule: "SupernodeStatusService", - } - logtrace.Info(ctx, "status request received", fields) - - var resp StatusResponse - resp.Version = Version - - // Calculate uptime - resp.UptimeSeconds = uint64(time.Since(s.startTime).Seconds()) - - // Collect CPU metrics - cpuUsage, err := s.metrics.CollectCPUMetrics(ctx) - if err != nil { - return resp, err - } - resp.Resources.CPU.UsagePercent = cpuUsage - - // Get CPU cores - cpuCores, err := s.metrics.GetCPUCores(ctx) - if err != nil { - // Log error but continue - non-critical - logtrace.Error(ctx, "failed to get cpu cores", logtrace.Fields{logtrace.FieldError: err.Error()}) - cpuCores = 0 - } - resp.Resources.CPU.Cores = cpuCores - - // Collect memory metrics - memTotal, memUsed, memAvailable, memUsedPerc, err := s.metrics.CollectMemoryMetrics(ctx) - if err != nil { - return resp, err - } - - // Convert to GB - const bytesToGB = 1024 * 1024 * 1024 - resp.Resources.Memory.TotalGB = float64(memTotal) / bytesToGB - resp.Resources.Memory.UsedGB = float64(memUsed) / bytesToGB - resp.Resources.Memory.AvailableGB = float64(memAvailable) / bytesToGB - resp.Resources.Memory.UsagePercent = memUsedPerc - - // Generate hardware summary - if cpuCores > 0 && resp.Resources.Memory.TotalGB > 0 { - resp.Resources.HardwareSummary = fmt.Sprintf("%d cores / %.0fGB RAM", cpuCores, resp.Resources.Memory.TotalGB) - } - - // Collect storage metrics - resp.Resources.Storage = s.metrics.CollectStorageMetrics(ctx, s.storagePaths) - - // Collect service information from all registered providers - resp.RunningTasks = make([]ServiceTasks, 0, len(s.taskProviders)) - resp.RegisteredServices = make([]string, 0, len(s.taskProviders)) - - for _, provider := range s.taskProviders { - serviceName := provider.GetServiceName() - tasks := provider.GetRunningTasks() - - // Add to registered services list - resp.RegisteredServices = append(resp.RegisteredServices, serviceName) - - // Add all services to running tasks (even with 0 tasks) - serviceTask := ServiceTasks{ - ServiceName: serviceName, - TaskIDs: tasks, - TaskCount: int32(len(tasks)), - } - resp.RunningTasks = append(resp.RunningTasks, serviceTask) - } - - // Initialize network info - resp.Network = NetworkInfo{ - PeersCount: 0, - PeerAddresses: []string{}, - } - - // Prepare P2P metrics container (always present in response) - metrics := P2PMetrics{ - NetworkHandleMetrics: map[string]HandleCounters{}, - ConnPoolMetrics: map[string]int64{}, - BanList: []BanEntry{}, - } - - // Collect P2P network information and metrics (fill when available and requested) - if includeP2PMetrics && s.p2pService != nil { - p2pStats, err := s.p2pService.Stats(ctx) - if err != nil { - // Log error but continue - non-critical - logtrace.Error(ctx, "failed to get p2p stats", logtrace.Fields{logtrace.FieldError: err.Error()}) - } else { - if dhtStats, ok := p2pStats["dht"].(map[string]interface{}); ok { - if peersCount, ok := dhtStats["peers_count"].(int); ok { - resp.Network.PeersCount = int32(peersCount) - } - - // Extract peer addresses - if peers, ok := dhtStats["peers"].([]*kademlia.Node); ok { - resp.Network.PeerAddresses = make([]string, 0, len(peers)) - for _, peer := range peers { - // Format peer address as "ID@IP:Port" - peerAddr := fmt.Sprintf("%s@%s:%d", string(peer.ID), peer.IP, peer.Port) - resp.Network.PeerAddresses = append(resp.Network.PeerAddresses, peerAddr) - } - } else { - resp.Network.PeerAddresses = []string{} - } - } - - // Disk info - if du, ok := p2pStats["disk-info"].(utils.DiskStatus); ok { - metrics.Disk = DiskStatus{AllMB: du.All, UsedMB: du.Used, FreeMB: du.Free} - } else if duPtr, ok := p2pStats["disk-info"].(*utils.DiskStatus); ok && duPtr != nil { - metrics.Disk = DiskStatus{AllMB: duPtr.All, UsedMB: duPtr.Used, FreeMB: duPtr.Free} - } - - // Ban list - if bans, ok := p2pStats["ban-list"].([]kademlia.BanSnapshot); ok { - for _, b := range bans { - metrics.BanList = append(metrics.BanList, BanEntry{ - ID: b.ID, - IP: b.IP, - Port: uint32(b.Port), - Count: int32(b.Count), - CreatedAtUnix: b.CreatedAt.Unix(), - AgeSeconds: int64(b.Age.Seconds()), - }) - } - } - - // Conn pool metrics - if pool, ok := p2pStats["conn-pool"].(map[string]int64); ok { - for k, v := range pool { - metrics.ConnPoolMetrics[k] = v - } - } - - // DHT metrics and database/network counters live inside dht map - if dhtStats, ok := p2pStats["dht"].(map[string]interface{}); ok { - // Database - if db, ok := dhtStats["database"].(map[string]interface{}); ok { - var sizeMB float64 - if v, ok := db["p2p_db_size"].(float64); ok { - sizeMB = v - } - var recs int64 - switch v := db["p2p_db_records_count"].(type) { - case int: - recs = int64(v) - case int64: - recs = v - case float64: - recs = int64(v) - } - metrics.Database = DatabaseStats{P2PDBSizeMB: sizeMB, P2PDBRecordsCount: recs} - } - - // Network handle metrics - if nhm, ok := dhtStats["network"].(map[string]kademlia.HandleCounters); ok { - for k, c := range nhm { - metrics.NetworkHandleMetrics[k] = HandleCounters{Total: c.Total, Success: c.Success, Failure: c.Failure, Timeout: c.Timeout} - } - } else if nhmI, ok := dhtStats["network"].(map[string]interface{}); ok { - for k, vi := range nhmI { - if c, ok := vi.(kademlia.HandleCounters); ok { - metrics.NetworkHandleMetrics[k] = HandleCounters{Total: c.Total, Success: c.Success, Failure: c.Failure, Timeout: c.Timeout} - } - } - } - - // Recent batch store/retrieve (overall lists) - if rbs, ok := dhtStats["recent_batch_store_overall"].([]kademlia.RecentBatchStoreEntry); ok { - for _, e := range rbs { - metrics.RecentBatchStore = append(metrics.RecentBatchStore, RecentBatchStoreEntry{ - TimeUnix: e.TimeUnix, - SenderID: e.SenderID, - SenderIP: e.SenderIP, - Keys: e.Keys, - DurationMS: e.DurationMS, - OK: e.OK, - Error: e.Error, - }) - } - } else if anyList, ok := dhtStats["recent_batch_store_overall"].([]interface{}); ok { - for _, vi := range anyList { - if e, ok := vi.(kademlia.RecentBatchStoreEntry); ok { - metrics.RecentBatchStore = append(metrics.RecentBatchStore, RecentBatchStoreEntry{ - TimeUnix: e.TimeUnix, - SenderID: e.SenderID, - SenderIP: e.SenderIP, - Keys: e.Keys, - DurationMS: e.DurationMS, - OK: e.OK, - Error: e.Error, - }) - } - } - } - if rbr, ok := dhtStats["recent_batch_retrieve_overall"].([]kademlia.RecentBatchRetrieveEntry); ok { - for _, e := range rbr { - metrics.RecentBatchRetrieve = append(metrics.RecentBatchRetrieve, RecentBatchRetrieveEntry{ - TimeUnix: e.TimeUnix, - SenderID: e.SenderID, - SenderIP: e.SenderIP, - Requested: e.Requested, - Found: e.Found, - DurationMS: e.DurationMS, - Error: e.Error, - }) - } - } else if anyList, ok := dhtStats["recent_batch_retrieve_overall"].([]interface{}); ok { - for _, vi := range anyList { - if e, ok := vi.(kademlia.RecentBatchRetrieveEntry); ok { - metrics.RecentBatchRetrieve = append(metrics.RecentBatchRetrieve, RecentBatchRetrieveEntry{ - TimeUnix: e.TimeUnix, - SenderID: e.SenderID, - SenderIP: e.SenderIP, - Requested: e.Requested, - Found: e.Found, - DurationMS: e.DurationMS, - Error: e.Error, - }) - } - } - } - - // Per-IP buckets - if byip, ok := dhtStats["recent_batch_store_by_ip"].(map[string][]kademlia.RecentBatchStoreEntry); ok { - for ip, list := range byip { - bucket := make([]RecentBatchStoreEntry, 0, len(list)) - for _, e := range list { - bucket = append(bucket, RecentBatchStoreEntry{ - TimeUnix: e.TimeUnix, - SenderID: e.SenderID, - SenderIP: e.SenderIP, - Keys: e.Keys, - DurationMS: e.DurationMS, - OK: e.OK, - Error: e.Error, - }) - } - // initialize map if needed - if metrics.RecentBatchStoreByIP == nil { - metrics.RecentBatchStoreByIP = map[string][]RecentBatchStoreEntry{} - } - metrics.RecentBatchStoreByIP[ip] = bucket - } - } - if byip, ok := dhtStats["recent_batch_retrieve_by_ip"].(map[string][]kademlia.RecentBatchRetrieveEntry); ok { - for ip, list := range byip { - bucket := make([]RecentBatchRetrieveEntry, 0, len(list)) - for _, e := range list { - bucket = append(bucket, RecentBatchRetrieveEntry{ - TimeUnix: e.TimeUnix, - SenderID: e.SenderID, - SenderIP: e.SenderIP, - Requested: e.Requested, - Found: e.Found, - DurationMS: e.DurationMS, - Error: e.Error, - }) - } - if metrics.RecentBatchRetrieveByIP == nil { - metrics.RecentBatchRetrieveByIP = map[string][]RecentBatchRetrieveEntry{} - } - metrics.RecentBatchRetrieveByIP[ip] = bucket - } - } - } - - // DHT rolling metrics snapshot is attached at top-level under dht_metrics - if snap, ok := p2pStats["dht_metrics"].(kademlia.DHTMetricsSnapshot); ok { - // Store success - for _, p := range snap.StoreSuccessRecent { - metrics.DhtMetrics.StoreSuccessRecent = append(metrics.DhtMetrics.StoreSuccessRecent, StoreSuccessPoint{ - TimeUnix: p.Time.Unix(), - Requests: int32(p.Requests), - Successful: int32(p.Successful), - SuccessRate: p.SuccessRate, - }) - } - // Batch retrieve - for _, p := range snap.BatchRetrieveRecent { - metrics.DhtMetrics.BatchRetrieveRecent = append(metrics.DhtMetrics.BatchRetrieveRecent, BatchRetrievePoint{ - TimeUnix: p.Time.Unix(), - Keys: int32(p.Keys), - Required: int32(p.Required), - FoundLocal: int32(p.FoundLocal), - FoundNetwork: int32(p.FoundNet), - DurationMS: p.Duration.Milliseconds(), - }) - } - metrics.DhtMetrics.HotPathBannedSkips = snap.HotPathBannedSkips - metrics.DhtMetrics.HotPathBanIncrements = snap.HotPathBanIncrements - } - } - } - - // Always include metrics (may be empty if not available) - resp.P2PMetrics = metrics - - // Calculate rank from top supernodes - if s.lumeraClient != nil && s.config != nil { - // Get current block height - blockInfo, err := s.lumeraClient.Node().GetLatestBlock(ctx) - if err != nil { - // Log error but continue - non-critical - logtrace.Error(ctx, "failed to get latest block", logtrace.Fields{logtrace.FieldError: err.Error()}) - } else { - // Get top supernodes for current block - topNodes, err := s.lumeraClient.SuperNode().GetTopSuperNodesForBlock(ctx, uint64(blockInfo.SdkBlock.Header.Height)) - if err != nil { - // Log error but continue - non-critical - logtrace.Error(ctx, "failed to get top supernodes", logtrace.Fields{logtrace.FieldError: err.Error()}) - } else { - // Find our rank - for idx, node := range topNodes.Supernodes { - if node.SupernodeAccount == s.config.SupernodeConfig.Identity { - resp.Rank = int32(idx + 1) // Rank starts from 1 - break - } - } - } - } - } - - if s.config != nil && s.lumeraClient != nil { - if supernodeInfo, err := s.lumeraClient.SuperNode().GetSupernodeWithLatestAddress(ctx, s.config.SupernodeConfig.Identity); err == nil && supernodeInfo != nil { - resp.IPAddress = supernodeInfo.LatestAddress - } - - } - - // Log summary statistics - totalTasks := 0 - for _, service := range resp.RunningTasks { - totalTasks += int(service.TaskCount) - } - - return resp, nil -} diff --git a/supernode/services/common/supernode/service_test.go b/supernode/services/common/supernode/service_test.go deleted file mode 100644 index e2f82287..00000000 --- a/supernode/services/common/supernode/service_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package supernode - -import ( - "context" - "testing" - - "github.com/LumeraProtocol/supernode/v2/supernode/services/common" - "github.com/stretchr/testify/assert" -) - -func TestSupernodeStatusService(t *testing.T) { - ctx := context.Background() - - t.Run("empty service", func(t *testing.T) { - statusService := NewSupernodeStatusService(nil, nil, nil) - - resp, err := statusService.GetStatus(ctx, false) - assert.NoError(t, err) - - // Should have version info - assert.NotEmpty(t, resp.Version) - - // Should have uptime - assert.True(t, resp.UptimeSeconds >= 0) - - // Should have CPU and Memory info - assert.True(t, resp.Resources.CPU.UsagePercent >= 0) - assert.True(t, resp.Resources.CPU.UsagePercent <= 100) - assert.True(t, resp.Resources.CPU.Cores >= 0) - assert.True(t, resp.Resources.Memory.TotalGB > 0) - assert.True(t, resp.Resources.Memory.UsagePercent >= 0) - assert.True(t, resp.Resources.Memory.UsagePercent <= 100) - - // Should have hardware summary if cores and memory are available - if resp.Resources.CPU.Cores > 0 && resp.Resources.Memory.TotalGB > 0 { - assert.NotEmpty(t, resp.Resources.HardwareSummary) - } - - // Should have storage info (default root filesystem) - assert.NotEmpty(t, resp.Resources.Storage) - assert.Equal(t, "/", resp.Resources.Storage[0].Path) - - // Should have empty services list - assert.Empty(t, resp.RunningTasks) - assert.Empty(t, resp.RegisteredServices) - - // Should have default values for new fields - assert.Equal(t, int32(0), resp.Network.PeersCount) - assert.Empty(t, resp.Network.PeerAddresses) - assert.Equal(t, int32(0), resp.Rank) - assert.Empty(t, resp.IPAddress) - }) - - t.Run("single service with tasks", func(t *testing.T) { - statusService := NewSupernodeStatusService(nil, nil, nil) - - // Register a mock task provider - mockProvider := &common.MockTaskProvider{ - ServiceName: "test-service", - TaskIDs: []string{"task1", "task2", "task3"}, - } - statusService.RegisterTaskProvider(mockProvider) - - resp, err := statusService.GetStatus(ctx, false) - assert.NoError(t, err) - - // Should have one service - assert.Len(t, resp.RunningTasks, 1) - assert.Len(t, resp.RegisteredServices, 1) - assert.Equal(t, []string{"test-service"}, resp.RegisteredServices) - - service := resp.RunningTasks[0] - assert.Equal(t, "test-service", service.ServiceName) - assert.Equal(t, int32(3), service.TaskCount) - assert.Equal(t, []string{"task1", "task2", "task3"}, service.TaskIDs) - }) - - t.Run("multiple services", func(t *testing.T) { - statusService := NewSupernodeStatusService(nil, nil, nil) - - // Register multiple mock task providers - cascadeProvider := &common.MockTaskProvider{ - ServiceName: "cascade", - TaskIDs: []string{"cascade1", "cascade2"}, - } - senseProvider := &common.MockTaskProvider{ - ServiceName: "sense", - TaskIDs: []string{"sense1"}, - } - - statusService.RegisterTaskProvider(cascadeProvider) - statusService.RegisterTaskProvider(senseProvider) - - resp, err := statusService.GetStatus(ctx, false) - assert.NoError(t, err) - - // Should have two services - assert.Len(t, resp.RunningTasks, 2) - assert.Len(t, resp.RegisteredServices, 2) - assert.Contains(t, resp.RegisteredServices, "cascade") - assert.Contains(t, resp.RegisteredServices, "sense") - - // Check services are present - serviceMap := make(map[string]ServiceTasks) - for _, service := range resp.RunningTasks { - serviceMap[service.ServiceName] = service - } - - cascade, ok := serviceMap["cascade"] - assert.True(t, ok) - assert.Equal(t, int32(2), cascade.TaskCount) - assert.Equal(t, []string{"cascade1", "cascade2"}, cascade.TaskIDs) - - sense, ok := serviceMap["sense"] - assert.True(t, ok) - assert.Equal(t, int32(1), sense.TaskCount) - assert.Equal(t, []string{"sense1"}, sense.TaskIDs) - }) - - t.Run("service with no tasks", func(t *testing.T) { - statusService := NewSupernodeStatusService(nil, nil, nil) - - // Register a mock task provider with no tasks - mockProvider := &common.MockTaskProvider{ - ServiceName: "empty-service", - TaskIDs: []string{}, - } - statusService.RegisterTaskProvider(mockProvider) - - resp, err := statusService.GetStatus(ctx, false) - assert.NoError(t, err) - - // Should have one service - assert.Len(t, resp.RunningTasks, 1) - assert.Len(t, resp.RegisteredServices, 1) - assert.Equal(t, []string{"empty-service"}, resp.RegisteredServices) - - service := resp.RunningTasks[0] - assert.Equal(t, "empty-service", service.ServiceName) - assert.Equal(t, int32(0), service.TaskCount) - assert.Empty(t, service.TaskIDs) - }) -} diff --git a/supernode/services/common/supernode/types.go b/supernode/services/common/supernode/types.go deleted file mode 100644 index 9a6f0953..00000000 --- a/supernode/services/common/supernode/types.go +++ /dev/null @@ -1,153 +0,0 @@ -package supernode - -// StatusResponse represents the complete system status information -// with clear organization of resources and services -type StatusResponse struct { - Version string // Supernode version - UptimeSeconds uint64 // Uptime in seconds - Resources Resources // System resource information - RunningTasks []ServiceTasks // Services with currently running tasks - RegisteredServices []string // All registered/available services - Network NetworkInfo // P2P network information - Rank int32 // Rank in the top supernodes list (0 if not in top list) - IPAddress string // Supernode IP address with port (e.g., "192.168.1.1:4445") - P2PMetrics P2PMetrics // Detailed P2P metrics snapshot -} - -// Resources contains system resource metrics -type Resources struct { - CPU CPUInfo // CPU usage information - Memory MemoryInfo // Memory usage information - Storage []StorageInfo // Storage volumes information - HardwareSummary string // Formatted hardware summary (e.g., "8 cores / 32GB RAM") -} - -// CPUInfo contains CPU usage metrics -type CPUInfo struct { - UsagePercent float64 // CPU usage percentage (0-100) - Cores int32 // Number of CPU cores -} - -// MemoryInfo contains memory usage metrics -type MemoryInfo struct { - TotalGB float64 // Total memory in GB - UsedGB float64 // Used memory in GB - AvailableGB float64 // Available memory in GB - UsagePercent float64 // Memory usage percentage (0-100) -} - -// StorageInfo contains storage metrics for a specific path -type StorageInfo struct { - Path string // Storage path being monitored - TotalBytes uint64 // Total storage in bytes - UsedBytes uint64 // Used storage in bytes - AvailableBytes uint64 // Available storage in bytes - UsagePercent float64 // Storage usage percentage (0-100) -} - -// ServiceTasks contains task information for a specific service -type ServiceTasks struct { - ServiceName string // Name of the service (e.g., "cascade") - TaskIDs []string // List of currently running task IDs - TaskCount int32 // Total number of running tasks -} - -// NetworkInfo contains P2P network information -type NetworkInfo struct { - PeersCount int32 // Number of connected peers in P2P network - PeerAddresses []string // List of connected peer addresses (optional, may be empty for privacy) -} - -// P2PMetrics mirrors the proto P2P metrics for status API -type P2PMetrics struct { - DhtMetrics DhtMetrics - NetworkHandleMetrics map[string]HandleCounters - ConnPoolMetrics map[string]int64 - BanList []BanEntry - Database DatabaseStats - Disk DiskStatus - RecentBatchStore []RecentBatchStoreEntry - RecentBatchRetrieve []RecentBatchRetrieveEntry - RecentBatchStoreByIP map[string][]RecentBatchStoreEntry - RecentBatchRetrieveByIP map[string][]RecentBatchRetrieveEntry -} - -type StoreSuccessPoint struct { - TimeUnix int64 - Requests int32 - Successful int32 - SuccessRate float64 -} - -type BatchRetrievePoint struct { - TimeUnix int64 - Keys int32 - Required int32 - FoundLocal int32 - FoundNetwork int32 - DurationMS int64 -} - -type DhtMetrics struct { - StoreSuccessRecent []StoreSuccessPoint - BatchRetrieveRecent []BatchRetrievePoint - HotPathBannedSkips int64 - HotPathBanIncrements int64 -} - -type HandleCounters struct { - Total int64 - Success int64 - Failure int64 - Timeout int64 -} - -type BanEntry struct { - ID string - IP string - Port uint32 - Count int32 - CreatedAtUnix int64 - AgeSeconds int64 -} - -type DatabaseStats struct { - P2PDBSizeMB float64 - P2PDBRecordsCount int64 -} - -type DiskStatus struct { - AllMB float64 - UsedMB float64 - FreeMB float64 -} - -type RecentBatchStoreEntry struct { - TimeUnix int64 - SenderID string - SenderIP string - Keys int - DurationMS int64 - OK bool - Error string -} - -type RecentBatchRetrieveEntry struct { - TimeUnix int64 - SenderID string - SenderIP string - Requested int - Found int - DurationMS int64 - Error string -} - -// TaskProvider interface defines the contract for services to provide -// their running task information to the status service -type TaskProvider interface { - // GetServiceName returns the unique name identifier for this service - GetServiceName() string - - // GetRunningTasks returns a list of currently active task IDs - GetRunningTasks() []string -} diff --git a/supernode/services/common/task_status.go b/supernode/services/common/task_status.go deleted file mode 100644 index 22b63b7a..00000000 --- a/supernode/services/common/task_status.go +++ /dev/null @@ -1,51 +0,0 @@ -package common - -// List of task statuses. -const ( - StatusTaskStarted Status = iota - // Mode - StatusPrimaryMode - StatusSecondaryMode - - // Process - StatusConnected - - // Final - StatusTaskCanceled - StatusTaskCompleted -) - -var statusNames = map[Status]string{ - StatusTaskStarted: "Task started", - StatusTaskCanceled: "Task Canceled", - StatusTaskCompleted: "Task Completed", -} - -// Status represents status of the task -type Status byte - -func (status Status) String() string { - if name, ok := statusNames[status]; ok { - return name - } - return "" -} - -// IsFinal returns true if the status is the final. -func (status Status) IsFinal() bool { - return status == StatusTaskCanceled || status == StatusTaskCompleted -} - -// IsFailure returns true if the task failed due to an error -func (status Status) IsFailure() bool { - return status == StatusTaskCanceled -} - -// StatusNames returns a sorted list of status names. -func StatusNames() []string { - list := make([]string, len(statusNames)) - for i, name := range statusNames { - list[i] = name - } - return list -} diff --git a/supernode/services/common/task_status_test.go b/supernode/services/common/task_status_test.go deleted file mode 100644 index b9853120..00000000 --- a/supernode/services/common/task_status_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package common - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestStatus_String(t *testing.T) { - tests := []struct { - status Status - expected string - }{ - {StatusTaskStarted, "Task started"}, - {StatusTaskCanceled, "Task Canceled"}, - {StatusTaskCompleted, "Task Completed"}, - {StatusPrimaryMode, ""}, - {StatusSecondaryMode, ""}, - {StatusConnected, ""}, - {Status(255), ""}, // unknown status - } - - for _, tt := range tests { - assert.Equal(t, tt.expected, tt.status.String(), "Status.String() should match expected name") - } -} - -func TestStatus_IsFinal(t *testing.T) { - tests := []struct { - status Status - expected bool - }{ - {StatusTaskStarted, false}, - {StatusPrimaryMode, false}, - {StatusSecondaryMode, false}, - {StatusConnected, false}, - {StatusTaskCanceled, true}, - {StatusTaskCompleted, true}, - } - - for _, tt := range tests { - assert.Equal(t, tt.expected, tt.status.IsFinal(), "Status.IsFinal() mismatch") - } -} - -func TestStatus_IsFailure(t *testing.T) { - tests := []struct { - status Status - expected bool - }{ - {StatusTaskStarted, false}, - {StatusTaskCanceled, true}, - {StatusTaskCompleted, false}, - } - - for _, tt := range tests { - assert.Equal(t, tt.expected, tt.status.IsFailure(), "Status.IsFailure() mismatch") - } -} diff --git a/supernode/services/common/test_helpers.go b/supernode/services/common/test_helpers.go deleted file mode 100644 index c49b940a..00000000 --- a/supernode/services/common/test_helpers.go +++ /dev/null @@ -1,15 +0,0 @@ -package common - -// MockTaskProvider for testing (exported for use in other packages) -type MockTaskProvider struct { - ServiceName string - TaskIDs []string -} - -func (m *MockTaskProvider) GetServiceName() string { - return m.ServiceName -} - -func (m *MockTaskProvider) GetRunningTasks() []string { - return m.TaskIDs -} diff --git a/supernode/services/verifier/interface.go b/supernode/services/verifier/interface.go deleted file mode 100644 index 7414201a..00000000 --- a/supernode/services/verifier/interface.go +++ /dev/null @@ -1,55 +0,0 @@ -package verifier - -import ( - "context" - "strings" -) - -// ConfigVerifierService defines the interface for config verification service -type ConfigVerifierService interface { - // VerifyConfig performs comprehensive config validation against chain - VerifyConfig(ctx context.Context) (*VerificationResult, error) -} - -// VerificationResult contains the results of config verification -type VerificationResult struct { - Valid bool `json:"valid"` - Errors []ConfigError `json:"errors,omitempty"` - Warnings []ConfigError `json:"warnings,omitempty"` -} - -// ConfigError represents a configuration validation error or warning -type ConfigError struct { - Field string `json:"field"` - Expected string `json:"expected,omitempty"` - Actual string `json:"actual,omitempty"` - Message string `json:"message"` -} - -// IsValid returns true if all verifications passed -func (vr *VerificationResult) IsValid() bool { - return vr.Valid && len(vr.Errors) == 0 -} - -// HasWarnings returns true if there are any warnings -func (vr *VerificationResult) HasWarnings() bool { - return len(vr.Warnings) > 0 -} - -// Summary returns a human-readable summary of verification results -func (vr *VerificationResult) Summary() string { - if vr.IsValid() && !vr.HasWarnings() { - return "✓ Config verification successful" - } - - var summary string - for _, err := range vr.Errors { - summary += "✗ " + err.Message + "\n" - } - - for _, warn := range vr.Warnings { - summary += "⚠ " + warn.Message + "\n" - } - - return strings.TrimSuffix(summary, "\n") -} diff --git a/supernode/services/verifier/verifier.go b/supernode/services/verifier/verifier.go deleted file mode 100644 index 867bd966..00000000 --- a/supernode/services/verifier/verifier.go +++ /dev/null @@ -1,222 +0,0 @@ -package verifier - -import ( - "context" - "fmt" - "net" - - "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" - "github.com/LumeraProtocol/supernode/v2/pkg/lumera" - snmodule "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" - "github.com/LumeraProtocol/supernode/v2/supernode/config" - "github.com/cosmos/cosmos-sdk/crypto/keyring" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -// ConfigVerifier implements ConfigVerifierService -type ConfigVerifier struct { - config *config.Config - lumeraClient lumera.Client - keyring keyring.Keyring -} - -// NewConfigVerifier creates a new config verifier service -func NewConfigVerifier(cfg *config.Config, client lumera.Client, kr keyring.Keyring) ConfigVerifierService { - return &ConfigVerifier{ - config: cfg, - lumeraClient: client, - keyring: kr, - } -} - -// VerifyConfig performs comprehensive config validation against chain -func (cv *ConfigVerifier) VerifyConfig(ctx context.Context) (*VerificationResult, error) { - result := &VerificationResult{ - Valid: true, - Errors: []ConfigError{}, - Warnings: []ConfigError{}, - } - - logtrace.Debug(ctx, "Starting config verification", logtrace.Fields{ - "identity": cv.config.SupernodeConfig.Identity, - "key_name": cv.config.SupernodeConfig.KeyName, - "p2p_port": cv.config.P2PConfig.Port, - }) - - // Check 1: Verify keyring contains the key - if err := cv.checkKeyExists(result); err != nil { - return result, err - } - - // Check 2: Verify key resolves to correct identity - if err := cv.checkIdentityMatches(result); err != nil { - return result, err - } - - // If keyring checks failed, don't proceed with chain queries - if !result.IsValid() { - return result, nil - } - - // Check 3: Query chain for supernode registration - supernodeInfo, err := cv.checkSupernodeExists(ctx, result) - if err != nil { - return result, err - } - - // If supernode doesn't exist, don't proceed with field comparisons - if supernodeInfo == nil { - return result, nil - } - - // Check 4: Verify supernode state is active - cv.checkSupernodeState(result, supernodeInfo) - - // Check 5: Verify all required ports are available - cv.checkPortsAvailable(result) - - logtrace.Info(ctx, "Config verification completed", logtrace.Fields{ - "valid": result.IsValid(), - "errors": len(result.Errors), - "warnings": len(result.Warnings), - }) - - return result, nil -} - -// checkKeyExists verifies the configured key exists in keyring -func (cv *ConfigVerifier) checkKeyExists(result *VerificationResult) error { - _, err := cv.keyring.Key(cv.config.SupernodeConfig.KeyName) - if err != nil { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "key_name", - Actual: cv.config.SupernodeConfig.KeyName, - Message: fmt.Sprintf("Key '%s' not found in keyring", cv.config.SupernodeConfig.KeyName), - }) - } - return nil -} - -// checkIdentityMatches verifies key resolves to configured identity -func (cv *ConfigVerifier) checkIdentityMatches(result *VerificationResult) error { - keyInfo, err := cv.keyring.Key(cv.config.SupernodeConfig.KeyName) - if err != nil { - // Already handled in checkKeyExists - return nil - } - - pubKey, err := keyInfo.GetPubKey() - if err != nil { - return fmt.Errorf("failed to get public key for key '%s': %w", cv.config.SupernodeConfig.KeyName, err) - } - - addr := sdk.AccAddress(pubKey.Address()) - if addr.String() != cv.config.SupernodeConfig.Identity { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "identity", - Expected: addr.String(), - Actual: cv.config.SupernodeConfig.Identity, - Message: fmt.Sprintf("Key '%s' resolves to %s but config identity is %s", cv.config.SupernodeConfig.KeyName, addr.String(), cv.config.SupernodeConfig.Identity), - }) - } - return nil -} - -// checkSupernodeExists queries chain for supernode registration -func (cv *ConfigVerifier) checkSupernodeExists(ctx context.Context, result *VerificationResult) (*snmodule.SuperNodeInfo, error) { - sn, err := cv.lumeraClient.SuperNode().GetSupernodeWithLatestAddress(ctx, cv.config.SupernodeConfig.Identity) - if err != nil { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "registration", - Actual: "not_registered", - Message: fmt.Sprintf("Supernode not registered on chain for address %s", cv.config.SupernodeConfig.Identity), - }) - return nil, nil - } - return sn, nil -} - -// checkP2PPortMatches compares config P2P port with chain -func (cv *ConfigVerifier) checkP2PPortMatches(result *VerificationResult, supernodeInfo *snmodule.SuperNodeInfo) { - configPort := fmt.Sprintf("%d", cv.config.P2PConfig.Port) - chainPort := supernodeInfo.P2PPort - - if chainPort != "" && chainPort != configPort { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "p2p_port", - Expected: chainPort, - Actual: configPort, - Message: fmt.Sprintf("P2P port mismatch: config=%s, chain=%s", configPort, chainPort), - }) - } -} - -// checkSupernodeState verifies supernode is in active state -func (cv *ConfigVerifier) checkSupernodeState(result *VerificationResult, supernodeInfo *snmodule.SuperNodeInfo) { - if supernodeInfo.CurrentState != "" && supernodeInfo.CurrentState != "SUPERNODE_STATE_ACTIVE" { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "state", - Expected: "SUPERNODE_STATE_ACTIVE", - Actual: supernodeInfo.CurrentState, - Message: fmt.Sprintf("Supernode state is %s (expected ACTIVE)", supernodeInfo.CurrentState), - }) - } -} - -// checkPortsAvailable verifies that all required ports are available for binding -func (cv *ConfigVerifier) checkPortsAvailable(result *VerificationResult) { - // Check supernode port - if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, int(cv.config.SupernodeConfig.Port)) { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "supernode_port", - Actual: fmt.Sprintf("%d", cv.config.SupernodeConfig.Port), - Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", cv.config.SupernodeConfig.Port), - }) - } - - // Check P2P port - if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, int(cv.config.P2PConfig.Port)) { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "p2p_port", - Actual: fmt.Sprintf("%d", cv.config.P2PConfig.Port), - Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", cv.config.P2PConfig.Port), - }) - } - - // Check gateway port (use configured port or default port 8002) - gatewayPort := int(cv.config.SupernodeConfig.GatewayPort) - if gatewayPort == 0 { - gatewayPort = 8002 // Default gateway port (same as gateway.DefaultGatewayPort) - } - - if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, gatewayPort) { - result.Valid = false - result.Errors = append(result.Errors, ConfigError{ - Field: "gateway_port", - Actual: fmt.Sprintf("%d", gatewayPort), - Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", gatewayPort), - }) - } -} - -// isPortAvailable checks if a port is available for binding -func (cv *ConfigVerifier) isPortAvailable(host string, port int) bool { - address := fmt.Sprintf("%s:%d", host, port) - - // Try to listen on the port - listener, err := net.Listen("tcp", address) - if err != nil { - return false // Port is not available - } - - // Close the listener immediately since we're just checking availability - listener.Close() - return true // Port is available -} diff --git a/supernode/services/verifier/verifier_test.go b/supernode/services/verifier/verifier_test.go deleted file mode 100644 index 56fd3fb7..00000000 --- a/supernode/services/verifier/verifier_test.go +++ /dev/null @@ -1,261 +0,0 @@ -package verifier - -import ( - "net" - "strconv" - "testing" - - "github.com/LumeraProtocol/supernode/v2/supernode/config" - "github.com/stretchr/testify/assert" -) - -func TestNewConfigVerifier(t *testing.T) { - cfg := &config.Config{ - SupernodeConfig: config.SupernodeConfig{ - Identity: "lumera1testaddress", - KeyName: "test-key", - Host: "192.168.1.100", - }, - P2PConfig: config.P2PConfig{ - Port: 4445, - }, - } - - // Test that NewConfigVerifier returns a non-nil service - verifier := NewConfigVerifier(cfg, nil, nil) - assert.NotNil(t, verifier) - assert.Implements(t, (*ConfigVerifierService)(nil), verifier) -} - -func TestVerificationResult_IsValid(t *testing.T) { - tests := []struct { - name string - result *VerificationResult - expected bool - }{ - { - name: "valid with no errors", - result: &VerificationResult{ - Valid: true, - Errors: []ConfigError{}, - }, - expected: true, - }, - { - name: "invalid with errors", - result: &VerificationResult{ - Valid: false, - Errors: []ConfigError{ - {Message: "test error"}, - }, - }, - expected: false, - }, - { - name: "valid flag true but has errors", - result: &VerificationResult{ - Valid: true, - Errors: []ConfigError{ - {Message: "test error"}, - }, - }, - expected: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, tt.expected, tt.result.IsValid()) - }) - } -} - -func TestVerificationResult_HasWarnings(t *testing.T) { - tests := []struct { - name string - result *VerificationResult - expected bool - }{ - { - name: "no warnings", - result: &VerificationResult{ - Warnings: []ConfigError{}, - }, - expected: false, - }, - { - name: "has warnings", - result: &VerificationResult{ - Warnings: []ConfigError{ - {Message: "test warning"}, - }, - }, - expected: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, tt.expected, tt.result.HasWarnings()) - }) - } -} - -func TestVerificationResult_Summary(t *testing.T) { - tests := []struct { - name string - result *VerificationResult - contains []string - }{ - { - name: "success with no warnings", - result: &VerificationResult{ - Valid: true, - Errors: []ConfigError{}, - Warnings: []ConfigError{}, - }, - contains: []string{"✓ Config verification successful"}, - }, - { - name: "error message", - result: &VerificationResult{ - Valid: false, - Errors: []ConfigError{ - { - Message: "Key not found", - }, - }, - }, - contains: []string{"✗ Key not found"}, - }, - { - name: "warning message", - result: &VerificationResult{ - Valid: true, - Errors: []ConfigError{}, - Warnings: []ConfigError{ - { - Message: "Host mismatch: config=localhost, chain=192.168.1.1", - }, - }, - }, - contains: []string{"⚠ Host mismatch: config=localhost, chain=192.168.1.1"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - summary := tt.result.Summary() - for _, expected := range tt.contains { - assert.Contains(t, summary, expected) - } - }) - } -} - -func TestConfigVerifier_isPortAvailable(t *testing.T) { - cfg := &config.Config{ - SupernodeConfig: config.SupernodeConfig{ - Identity: "lumera1testaddress", - KeyName: "test-key", - Host: "127.0.0.1", - }, - } - - verifier := NewConfigVerifier(cfg, nil, nil).(*ConfigVerifier) - - // Test available port - available := verifier.isPortAvailable("127.0.0.1", 0) // Port 0 lets OS choose available port - assert.True(t, available) - - // Test unavailable port by creating a listener - listener, err := net.Listen("tcp", "127.0.0.1:0") - assert.NoError(t, err) - defer listener.Close() - - // Extract the port that was assigned - _, portStr, err := net.SplitHostPort(listener.Addr().String()) - assert.NoError(t, err) - port, err := strconv.Atoi(portStr) - assert.NoError(t, err) - - // Now test that this port is not available - available = verifier.isPortAvailable("127.0.0.1", port) - assert.False(t, available) -} - -func TestConfigVerifier_checkPortsAvailable(t *testing.T) { - // Create a listener to occupy a port - listener, err := net.Listen("tcp", "127.0.0.1:0") - assert.NoError(t, err) - defer listener.Close() - - // Extract the port that was assigned - _, portStr, err := net.SplitHostPort(listener.Addr().String()) - assert.NoError(t, err) - port, err := strconv.Atoi(portStr) - assert.NoError(t, err) - - cfg := &config.Config{ - SupernodeConfig: config.SupernodeConfig{ - Identity: "lumera1testaddress", - KeyName: "test-key", - Host: "127.0.0.1", - Port: uint16(port), // Use the occupied port - }, - P2PConfig: config.P2PConfig{ - Port: 0, // Available port - }, - } - - verifier := NewConfigVerifier(cfg, nil, nil).(*ConfigVerifier) - result := &VerificationResult{ - Valid: true, - Errors: []ConfigError{}, - Warnings: []ConfigError{}, - } - - verifier.checkPortsAvailable(result) - - // Should have error for supernode port being unavailable - assert.False(t, result.IsValid()) - assert.Len(t, result.Errors, 1) - assert.Equal(t, "supernode_port", result.Errors[0].Field) - assert.Contains(t, result.Errors[0].Message, "already in use") -} - -func TestConfigVerifier_checkPortsAvailable_DefaultGatewayPort(t *testing.T) { - // Create a listener to occupy the default gateway port 8002 - listener, err := net.Listen("tcp", "127.0.0.1:8002") - assert.NoError(t, err) - defer listener.Close() - - cfg := &config.Config{ - SupernodeConfig: config.SupernodeConfig{ - Identity: "lumera1testaddress", - KeyName: "test-key", - Host: "127.0.0.1", - Port: 4444, // Available port - GatewayPort: 0, // Not configured, should use default 8002 - }, - P2PConfig: config.P2PConfig{ - Port: 4445, // Available port - }, - } - - verifier := NewConfigVerifier(cfg, nil, nil).(*ConfigVerifier) - result := &VerificationResult{ - Valid: true, - Errors: []ConfigError{}, - Warnings: []ConfigError{}, - } - - verifier.checkPortsAvailable(result) - - // Should have error for default gateway port being unavailable - assert.False(t, result.IsValid()) - assert.Len(t, result.Errors, 1) - assert.Equal(t, "gateway_port", result.Errors[0].Field) - assert.Equal(t, "8002", result.Errors[0].Actual) - assert.Contains(t, result.Errors[0].Message, "Port 8002 is already in use") -} diff --git a/supernode/services/common/supernode/metrics.go b/supernode/status/metrics.go similarity index 74% rename from supernode/services/common/supernode/metrics.go rename to supernode/status/metrics.go index 6c36ab35..ff29d100 100644 --- a/supernode/services/common/supernode/metrics.go +++ b/supernode/status/metrics.go @@ -1,4 +1,4 @@ -package supernode +package status import ( "context" @@ -14,19 +14,15 @@ import ( type MetricsCollector struct{} // NewMetricsCollector creates a new metrics collector instance -func NewMetricsCollector() *MetricsCollector { - return &MetricsCollector{} -} +func NewMetricsCollector() *MetricsCollector { return &MetricsCollector{} } // CollectCPUMetrics gathers CPU usage information -// Returns usage percentage as a float64 func (m *MetricsCollector) CollectCPUMetrics(ctx context.Context) (float64, error) { percentages, err := cpu.Percent(time.Second, false) if err != nil { logtrace.Error(ctx, "failed to get cpu info", logtrace.Fields{logtrace.FieldError: err.Error()}) return 0, err } - return percentages[0], nil } @@ -37,49 +33,41 @@ func (m *MetricsCollector) GetCPUCores(ctx context.Context) (int32, error) { logtrace.Error(ctx, "failed to get cpu core count", logtrace.Fields{logtrace.FieldError: err.Error()}) return 0, err } - return int32(cores), nil } // CollectMemoryMetrics gathers memory usage information -// Returns memory statistics including total, used, available, and usage percentage func (m *MetricsCollector) CollectMemoryMetrics(ctx context.Context) (total, used, available uint64, usedPerc float64, err error) { vmem, err := mem.VirtualMemory() if err != nil { logtrace.Error(ctx, "failed to get memory info", logtrace.Fields{logtrace.FieldError: err.Error()}) return 0, 0, 0, 0, err } - return vmem.Total, vmem.Used, vmem.Available, vmem.UsedPercent, nil } +// StorageInfo holds disk usage stats +type StorageInfo struct { + Path string + TotalBytes uint64 + UsedBytes uint64 + AvailableBytes uint64 + UsagePercent float64 +} + // CollectStorageMetrics gathers storage usage information for specified paths -// If paths is empty, it will collect metrics for the root filesystem func (m *MetricsCollector) CollectStorageMetrics(ctx context.Context, paths []string) []StorageInfo { if len(paths) == 0 { - // Default to root filesystem paths = []string{"/"} } - var storageInfos []StorageInfo for _, path := range paths { usage, err := disk.Usage(path) if err != nil { - logtrace.Error(ctx, "failed to get storage info", logtrace.Fields{ - logtrace.FieldError: err.Error(), - "path": path, - }) - continue // Skip this path but continue with others + logtrace.Error(ctx, "failed to get storage info", logtrace.Fields{logtrace.FieldError: err.Error(), "path": path}) + continue } - - storageInfos = append(storageInfos, StorageInfo{ - Path: path, - TotalBytes: usage.Total, - UsedBytes: usage.Used, - AvailableBytes: usage.Free, - UsagePercent: usage.UsedPercent, - }) + storageInfos = append(storageInfos, StorageInfo{Path: path, TotalBytes: usage.Total, UsedBytes: usage.Used, AvailableBytes: usage.Free, UsagePercent: usage.UsedPercent}) } - return storageInfos } diff --git a/supernode/status/service.go b/supernode/status/service.go new file mode 100644 index 00000000..0645385f --- /dev/null +++ b/supernode/status/service.go @@ -0,0 +1,226 @@ +package status + +import ( + "context" + "fmt" + "time" + + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + "github.com/LumeraProtocol/supernode/v2/p2p" + "github.com/LumeraProtocol/supernode/v2/p2p/kademlia" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + "github.com/LumeraProtocol/supernode/v2/pkg/task" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/LumeraProtocol/supernode/v2/supernode/config" +) + +// Version is the supernode version, set by the main application +var Version = "dev" + +const statusSubsystemTimeout = 8 * time.Second + +// SupernodeStatusService provides centralized status information +type SupernodeStatusService struct { + metrics *MetricsCollector + storagePaths []string + startTime time.Time + p2pService p2p.Client + lumeraClient lumera.Client + config *config.Config + tracker task.Tracker +} + +// NewSupernodeStatusService creates a new supernode status service instance +func NewSupernodeStatusService(p2pService p2p.Client, lumeraClient lumera.Client, cfg *config.Config, tracker task.Tracker) *SupernodeStatusService { + return &SupernodeStatusService{metrics: NewMetricsCollector(), storagePaths: []string{"/"}, startTime: time.Now(), p2pService: p2pService, lumeraClient: lumeraClient, config: cfg, tracker: tracker} +} + +// GetChainID returns the chain ID from the configuration +func (s *SupernodeStatusService) GetChainID() string { + if s.config != nil { + return s.config.LumeraClientConfig.ChainID + } + return "" +} + +// GetStatus returns the current system status including optional P2P info +func (s *SupernodeStatusService) GetStatus(ctx context.Context, includeP2PMetrics bool) (*pb.StatusResponse, error) { + fields := logtrace.Fields{logtrace.FieldMethod: "GetStatus", logtrace.FieldModule: "SupernodeStatusService"} + logtrace.Debug(ctx, "status request received", fields) + + resp := &pb.StatusResponse{} + resp.Version = Version + resp.UptimeSeconds = uint64(time.Since(s.startTime).Seconds()) + + cpuUsage, err := s.metrics.CollectCPUMetrics(ctx) + if err != nil { + return resp, err + } + if resp.Resources == nil { + resp.Resources = &pb.StatusResponse_Resources{} + } + if resp.Resources.Cpu == nil { + resp.Resources.Cpu = &pb.StatusResponse_Resources_CPU{} + } + resp.Resources.Cpu.UsagePercent = cpuUsage + cores, err := s.metrics.GetCPUCores(ctx) + if err != nil { + logtrace.Error(ctx, "failed to get cpu cores", logtrace.Fields{logtrace.FieldError: err.Error()}) + cores = 0 + } + resp.Resources.Cpu.Cores = cores + memTotal, memUsed, memAvail, memUsedPerc, err := s.metrics.CollectMemoryMetrics(ctx) + if err != nil { + return resp, err + } + const bytesToGB = 1024 * 1024 * 1024 + if resp.Resources.Memory == nil { + resp.Resources.Memory = &pb.StatusResponse_Resources_Memory{} + } + resp.Resources.Memory.TotalGb = float64(memTotal) / bytesToGB + resp.Resources.Memory.UsedGb = float64(memUsed) / bytesToGB + resp.Resources.Memory.AvailableGb = float64(memAvail) / bytesToGB + resp.Resources.Memory.UsagePercent = memUsedPerc + if cores > 0 && resp.Resources.Memory.TotalGb > 0 { + resp.Resources.HardwareSummary = fmt.Sprintf("%d cores / %.0fGB RAM", cores, resp.Resources.Memory.TotalGb) + } + // Storage metrics + for _, si := range s.metrics.CollectStorageMetrics(ctx, s.storagePaths) { + resp.Resources.StorageVolumes = append(resp.Resources.StorageVolumes, &pb.StatusResponse_Resources_Storage{ + Path: si.Path, + TotalBytes: si.TotalBytes, + UsedBytes: si.UsedBytes, + AvailableBytes: si.AvailableBytes, + UsagePercent: si.UsagePercent, + }) + } + + if resp.Network == nil { + resp.Network = &pb.StatusResponse_Network{} + } + resp.Network.PeersCount = 0 + resp.Network.PeerAddresses = []string{} + + // Populate running tasks from injected tracker + if s.tracker != nil { + snap := s.tracker.Snapshot() + if len(snap) > 0 { + for svc, ids := range snap { + resp.RunningTasks = append(resp.RunningTasks, &pb.StatusResponse_ServiceTasks{ + ServiceName: svc, + TaskIds: ids, + TaskCount: int32(len(ids)), + }) + } + } + } + + // Prepare optional P2P metrics container + pm := &pb.StatusResponse_P2PMetrics{ + DhtMetrics: &pb.StatusResponse_P2PMetrics_DhtMetrics{}, + NetworkHandleMetrics: map[string]*pb.StatusResponse_P2PMetrics_HandleCounters{}, + ConnPoolMetrics: map[string]int64{}, + BanList: []*pb.StatusResponse_P2PMetrics_BanEntry{}, + Database: &pb.StatusResponse_P2PMetrics_DatabaseStats{}, + Disk: &pb.StatusResponse_P2PMetrics_DiskStatus{}, + } + + if includeP2PMetrics && s.p2pService != nil { + // Bound P2P metrics collection so status can't hang if P2P is slow + p2pCtx, cancel := context.WithTimeout(ctx, statusSubsystemTimeout) + defer cancel() + p2pStats, err := s.p2pService.Stats(p2pCtx) + if err != nil { + logtrace.Error(ctx, "failed to get p2p stats", logtrace.Fields{logtrace.FieldError: err.Error()}) + } else { + if dhtStats, ok := p2pStats["dht"].(map[string]interface{}); ok { + if peersCount, ok := dhtStats["peers_count"].(int); ok { + resp.Network.PeersCount = int32(peersCount) + } + if peers, ok := dhtStats["peers"].([]*kademlia.Node); ok { + resp.Network.PeerAddresses = make([]string, 0, len(peers)) + for _, peer := range peers { + resp.Network.PeerAddresses = append(resp.Network.PeerAddresses, fmt.Sprintf("%s@%s:%d", string(peer.ID), peer.IP, peer.Port)) + } + } else { + resp.Network.PeerAddresses = []string{} + } + } + if du, ok := p2pStats["disk-info"].(utils.DiskStatus); ok { + pm.Disk.AllMb = du.All + pm.Disk.UsedMb = du.Used + pm.Disk.FreeMb = du.Free + } else if duPtr, ok := p2pStats["disk-info"].(*utils.DiskStatus); ok && duPtr != nil { + pm.Disk.AllMb = duPtr.All + pm.Disk.UsedMb = duPtr.Used + pm.Disk.FreeMb = duPtr.Free + } + if bans, ok := p2pStats["ban-list"].([]kademlia.BanSnapshot); ok { + for _, b := range bans { + pm.BanList = append(pm.BanList, &pb.StatusResponse_P2PMetrics_BanEntry{Id: b.ID, Ip: b.IP, Port: uint32(b.Port), Count: int32(b.Count), CreatedAtUnix: b.CreatedAt.Unix(), AgeSeconds: int64(b.Age.Seconds())}) + } + } + if pool, ok := p2pStats["conn-pool"].(map[string]int64); ok { + for k, v := range pool { + pm.ConnPoolMetrics[k] = v + } + } + if dhtStats, ok := p2pStats["dht"].(map[string]interface{}); ok { + if db, ok := dhtStats["database"].(map[string]interface{}); ok { + var sizeMB float64 + if v, ok := db["p2p_db_size"].(float64); ok { + sizeMB = v + } + var recs int64 + switch v := db["p2p_db_records_count"].(type) { + case int: + recs = int64(v) + case int64: + recs = v + case float64: + recs = int64(v) + } + pm.Database.P2PDbSizeMb = sizeMB + pm.Database.P2PDbRecordsCount = recs + } + if nhm, ok := dhtStats["network"].(map[string]kademlia.HandleCounters); ok { + for k, c := range nhm { + pm.NetworkHandleMetrics[k] = &pb.StatusResponse_P2PMetrics_HandleCounters{Total: c.Total, Success: c.Success, Failure: c.Failure, Timeout: c.Timeout} + } + } else if nhmI, ok := dhtStats["network"].(map[string]interface{}); ok { + for k, vi := range nhmI { + if c, ok := vi.(kademlia.HandleCounters); ok { + pm.NetworkHandleMetrics[k] = &pb.StatusResponse_P2PMetrics_HandleCounters{Total: c.Total, Success: c.Success, Failure: c.Failure, Timeout: c.Timeout} + } + } + } + } + if snap, ok := p2pStats["dht_metrics"].(kademlia.DHTMetricsSnapshot); ok { + for _, sp := range snap.StoreSuccessRecent { + pm.DhtMetrics.StoreSuccessRecent = append(pm.DhtMetrics.StoreSuccessRecent, &pb.StatusResponse_P2PMetrics_DhtMetrics_StoreSuccessPoint{TimeUnix: sp.Time.Unix(), Requests: int32(sp.Requests), Successful: int32(sp.Successful), SuccessRate: sp.SuccessRate}) + } + for _, bp := range snap.BatchRetrieveRecent { + pm.DhtMetrics.BatchRetrieveRecent = append(pm.DhtMetrics.BatchRetrieveRecent, &pb.StatusResponse_P2PMetrics_DhtMetrics_BatchRetrievePoint{TimeUnix: bp.Time.Unix(), Keys: int32(bp.Keys), Required: int32(bp.Required), FoundLocal: int32(bp.FoundLocal), FoundNetwork: int32(bp.FoundNet), DurationMs: bp.Duration.Milliseconds()}) + } + pm.DhtMetrics.HotPathBannedSkips = snap.HotPathBannedSkips + pm.DhtMetrics.HotPathBanIncrements = snap.HotPathBanIncrements + } + } + } + if includeP2PMetrics { + resp.P2PMetrics = pm + } + + if s.config != nil && s.lumeraClient != nil { + // Bound chain query for latest address to avoid slow network hangs + chainCtx, cancel := context.WithTimeout(ctx, statusSubsystemTimeout) + defer cancel() + if supernodeInfo, err := s.lumeraClient.SuperNode().GetSupernodeWithLatestAddress(chainCtx, s.config.SupernodeConfig.Identity); err == nil && supernodeInfo != nil { + resp.IpAddress = supernodeInfo.LatestAddress + } else if err != nil { + logtrace.Error(ctx, "failed to resolve latest supernode address", logtrace.Fields{logtrace.FieldError: err.Error()}) + } + } + return resp, nil +} diff --git a/supernode/transport/gateway/server.go b/supernode/transport/gateway/server.go new file mode 100644 index 00000000..5c4df034 --- /dev/null +++ b/supernode/transport/gateway/server.go @@ -0,0 +1,236 @@ +package gateway + +import ( + "context" + "fmt" + "net" + "net/http" + _ "net/http/pprof" + "os" + "strconv" + "strings" + "time" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "google.golang.org/protobuf/encoding/protojson" + + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" +) + +// DefaultGatewayPort is an uncommon port for internal gateway use +const DefaultGatewayPort = 8002 + +// Server represents the HTTP gateway server +type Server struct { + ipAddress string + port int + server *http.Server + supernodeServer pb.SupernodeServiceServer + chainID string + pprofEnabled bool +} + +// NewServer creates a new HTTP gateway server that directly calls the service +// If port is 0, it will use the default port +func NewServer(ipAddress string, port int, supernodeServer pb.SupernodeServiceServer) (*Server, error) { + if supernodeServer == nil { + return nil, fmt.Errorf("supernode server is required") + } + + // Use default port if not specified + if port == 0 { + port = DefaultGatewayPort + } + + return &Server{ + ipAddress: ipAddress, + port: port, + supernodeServer: supernodeServer, + }, nil +} + +// NewServerWithConfig creates a new HTTP gateway server with additional configuration +func NewServerWithConfig(ipAddress string, port int, supernodeServer pb.SupernodeServiceServer, chainID string) (*Server, error) { + if supernodeServer == nil { + return nil, fmt.Errorf("supernode server is required") + } + + // Use default port if not specified + if port == 0 { + port = DefaultGatewayPort + } + + // Determine if pprof should be enabled + pprofEnabled := strings.Contains(strings.ToLower(chainID), "testnet") || os.Getenv("ENABLE_PPROF") == "true" + + return &Server{ + ipAddress: ipAddress, + port: port, + supernodeServer: supernodeServer, + chainID: chainID, + pprofEnabled: pprofEnabled, + }, nil +} + +// Run starts the HTTP gateway server (implements service interface) +func (s *Server) Run(ctx context.Context) error { + // Create gRPC-Gateway mux with custom JSON marshaler options + mux := runtime.NewServeMux( + runtime.WithMarshalerOption(runtime.MIMEWildcard, &runtime.JSONPb{ + MarshalOptions: protojson.MarshalOptions{ + EmitUnpopulated: true, // This ensures zero values are included + UseProtoNames: true, // Use original proto field names + }, + }), + ) + + // Register the service handler directly + err := pb.RegisterSupernodeServiceHandlerServer(ctx, mux, s.supernodeServer) + if err != nil { + return fmt.Errorf("failed to register gateway handler: %w", err) + } + + // Create HTTP mux for custom endpoints + httpMux := http.NewServeMux() + + // Register raw pprof endpoints BEFORE the gRPC gateway to intercept them + // These must be registered before the /api/ handler to take precedence + if s.pprofEnabled { + // Raw pprof endpoints that return actual pprof data (not JSON) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/heap", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/goroutine", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/allocs", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/block", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/mutex", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/threadcreate", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/profile", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/cmdline", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/symbol", s.rawPprofHandler) + httpMux.HandleFunc("/api/v1/debug/raw/pprof/trace", s.rawPprofHandler) + } + + // Register gRPC-Gateway endpoints + httpMux.Handle("/api/", mux) + + // Register Swagger endpoints + httpMux.HandleFunc("/swagger.json", s.serveSwaggerJSON) + httpMux.HandleFunc("/swagger-ui/", s.serveSwaggerUI) + + // Register pprof endpoints (only on testnet) + if s.pprofEnabled { + httpMux.HandleFunc("/debug/pprof/", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/cmdline", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/profile", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/symbol", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/trace", s.pprofHandler) + // Register specific pprof profiles + httpMux.HandleFunc("/debug/pprof/allocs", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/block", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/goroutine", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/heap", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/mutex", s.pprofHandler) + httpMux.HandleFunc("/debug/pprof/threadcreate", s.pprofHandler) + + logtrace.Debug(ctx, "Pprof endpoints enabled on gateway", logtrace.Fields{ + "chain_id": s.chainID, + "port": s.port, + }) + } + + httpMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/" { + http.Redirect(w, r, "/swagger-ui/", http.StatusFound) + } else { + http.NotFound(w, r) + } + }) + + // Create HTTP server + s.server = &http.Server{ + Addr: net.JoinHostPort(s.ipAddress, strconv.Itoa(s.port)), + Handler: s.corsMiddleware(httpMux), + ReadTimeout: 15 * time.Second, + WriteTimeout: 15 * time.Second, + IdleTimeout: 60 * time.Second, + } + + logtrace.Debug(ctx, "Starting HTTP gateway server", logtrace.Fields{ + "address": s.ipAddress, + "port": s.port, + "pprof_enabled": s.pprofEnabled, + }) + + // Start server + if err := s.server.ListenAndServe(); err != nil && err != http.ErrServerClosed { + return fmt.Errorf("gateway server failed: %w", err) + } + + return nil +} + +// Stop gracefully stops the HTTP gateway server (implements service interface) +func (s *Server) Stop(ctx context.Context) error { + if s.server == nil { + return nil + } + + logtrace.Debug(ctx, "Shutting down HTTP gateway server", nil) + return s.server.Shutdown(ctx) +} + +// corsMiddleware adds CORS headers for web access +func (s *Server) corsMiddleware(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS") + w.Header().Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, Authorization") + + if r.Method == "OPTIONS" { + w.WriteHeader(http.StatusOK) + return + } + + h.ServeHTTP(w, r) + }) +} + +// pprofHandler proxies requests to the pprof handlers +func (s *Server) pprofHandler(w http.ResponseWriter, r *http.Request) { + // Check if pprof is enabled + if !s.pprofEnabled { + http.Error(w, "Profiling is not enabled", http.StatusForbidden) + return + } + + // Get the default pprof handler and serve + if handler, pattern := http.DefaultServeMux.Handler(r); pattern != "" { + handler.ServeHTTP(w, r) + } else { + http.NotFound(w, r) + } +} + +// rawPprofHandler handles the raw pprof endpoints that return actual pprof data +func (s *Server) rawPprofHandler(w http.ResponseWriter, r *http.Request) { + // Check if pprof is enabled + if !s.pprofEnabled { + http.Error(w, "Profiling is not enabled", http.StatusForbidden) + return + } + + // Map the /api/v1/debug/raw/pprof/* path to /debug/pprof/* + originalPath := r.URL.Path + r.URL.Path = strings.Replace(originalPath, "/api/v1/debug/raw/pprof", "/debug/pprof", 1) + + // Get the default pprof handler and serve + if handler, pattern := http.DefaultServeMux.Handler(r); pattern != "" { + handler.ServeHTTP(w, r) + } else { + http.NotFound(w, r) + } + + // Restore the original path + r.URL.Path = originalPath +} diff --git a/supernode/node/supernode/gateway/swagger.go b/supernode/transport/gateway/swagger.go similarity index 59% rename from supernode/node/supernode/gateway/swagger.go rename to supernode/transport/gateway/swagger.go index d86d0ad9..4bcd3f3d 100644 --- a/supernode/node/supernode/gateway/swagger.go +++ b/supernode/transport/gateway/swagger.go @@ -22,30 +22,30 @@ const swaggerUIHTML = ` *, *:before, *:after { box-sizing: inherit; } body { margin:0; background: #fafafa; } - - -
- - - - - + + +
+ + + + + ` // serveSwaggerJSON serves the OpenAPI specification @@ -58,12 +58,12 @@ func (s *Server) serveSwaggerJSON(w http.ResponseWriter, r *http.Request) { // serveSwaggerUI serves the Swagger UI interface func (s *Server) serveSwaggerUI(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/html") - + tmpl, err := template.New("swagger").Parse(swaggerUIHTML) if err != nil { http.Error(w, "Failed to load Swagger UI", http.StatusInternalServerError) return } - + tmpl.Execute(w, nil) -} \ No newline at end of file +} diff --git a/supernode/transport/gateway/swagger.json b/supernode/transport/gateway/swagger.json new file mode 100644 index 00000000..c3944e9d --- /dev/null +++ b/supernode/transport/gateway/swagger.json @@ -0,0 +1,857 @@ +{ + "swagger": "2.0", + "info": { + "title": "supernode/service.proto", + "version": "version not set" + }, + "tags": [ + { + "name": "SupernodeService" + } + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/api/v1/debug/raw/pprof": { + "get": { + "summary": "Raw pprof endpoints - return standard pprof output directly", + "operationId": "SupernodeService_GetRawPprof", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/allocs": { + "get": { + "operationId": "SupernodeService_GetRawPprofAllocs", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/block": { + "get": { + "operationId": "SupernodeService_GetRawPprofBlock", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/cmdline": { + "get": { + "operationId": "SupernodeService_GetRawPprofCmdline", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/goroutine": { + "get": { + "operationId": "SupernodeService_GetRawPprofGoroutine", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/heap": { + "get": { + "operationId": "SupernodeService_GetRawPprofHeap", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/mutex": { + "get": { + "operationId": "SupernodeService_GetRawPprofMutex", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/profile": { + "get": { + "operationId": "SupernodeService_GetRawPprofProfile", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "seconds", + "description": "CPU profile duration in seconds (default 30)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/symbol": { + "get": { + "operationId": "SupernodeService_GetRawPprofSymbol", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/threadcreate": { + "get": { + "operationId": "SupernodeService_GetRawPprofThreadcreate", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/debug/raw/pprof/trace": { + "get": { + "operationId": "SupernodeService_GetRawPprofTrace", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeRawPprofResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "debug", + "description": "Debug level (0 for binary, \u003e0 for text)", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/services": { + "get": { + "operationId": "SupernodeService_ListServices", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeListServicesResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "tags": [ + "SupernodeService" + ] + } + }, + "/api/v1/status": { + "get": { + "operationId": "SupernodeService_GetStatus", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/supernodeStatusResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "includeP2pMetrics", + "description": "Optional: include detailed P2P metrics in the response\nMaps to query param via grpc-gateway: /api/v1/status?include_p2p_metrics=true", + "in": "query", + "required": false, + "type": "boolean" + } + ], + "tags": [ + "SupernodeService" + ] + } + } + }, + "definitions": { + "DhtMetricsBatchRetrievePoint": { + "type": "object", + "properties": { + "timeUnix": { + "type": "string", + "format": "int64", + "title": "event time (unix seconds)" + }, + "keys": { + "type": "integer", + "format": "int32", + "title": "keys requested" + }, + "required": { + "type": "integer", + "format": "int32", + "title": "required count" + }, + "foundLocal": { + "type": "integer", + "format": "int32", + "title": "found locally" + }, + "foundNetwork": { + "type": "integer", + "format": "int32", + "title": "found on network" + }, + "durationMs": { + "type": "string", + "format": "int64", + "title": "duration in milliseconds" + } + } + }, + "DhtMetricsStoreSuccessPoint": { + "type": "object", + "properties": { + "timeUnix": { + "type": "string", + "format": "int64", + "title": "event time (unix seconds)" + }, + "requests": { + "type": "integer", + "format": "int32", + "title": "total node RPCs attempted" + }, + "successful": { + "type": "integer", + "format": "int32", + "title": "successful node RPCs" + }, + "successRate": { + "type": "number", + "format": "double", + "title": "percentage (0-100)" + } + } + }, + "P2PMetricsBanEntry": { + "type": "object", + "properties": { + "id": { + "type": "string", + "title": "printable ID" + }, + "ip": { + "type": "string", + "title": "last seen IP" + }, + "port": { + "type": "integer", + "format": "int64", + "title": "last seen port" + }, + "count": { + "type": "integer", + "format": "int32", + "title": "failure count" + }, + "createdAtUnix": { + "type": "string", + "format": "int64", + "title": "first ban time (unix seconds)" + }, + "ageSeconds": { + "type": "string", + "format": "int64", + "title": "age in seconds" + } + }, + "title": "Ban list entry" + }, + "P2PMetricsDatabaseStats": { + "type": "object", + "properties": { + "p2pDbSizeMb": { + "type": "number", + "format": "double" + }, + "p2pDbRecordsCount": { + "type": "string", + "format": "int64" + } + }, + "title": "DB stats" + }, + "P2PMetricsDhtMetrics": { + "type": "object", + "properties": { + "storeSuccessRecent": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/DhtMetricsStoreSuccessPoint" + } + }, + "batchRetrieveRecent": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/DhtMetricsBatchRetrievePoint" + } + }, + "hotPathBannedSkips": { + "type": "string", + "format": "int64", + "title": "counter" + }, + "hotPathBanIncrements": { + "type": "string", + "format": "int64", + "title": "counter" + } + }, + "title": "Rolling DHT metrics snapshot" + }, + "P2PMetricsDiskStatus": { + "type": "object", + "properties": { + "allMb": { + "type": "number", + "format": "double" + }, + "usedMb": { + "type": "number", + "format": "double" + }, + "freeMb": { + "type": "number", + "format": "double" + } + }, + "title": "Disk status" + }, + "P2PMetricsHandleCounters": { + "type": "object", + "properties": { + "total": { + "type": "string", + "format": "int64" + }, + "success": { + "type": "string", + "format": "int64" + }, + "failure": { + "type": "string", + "format": "int64" + }, + "timeout": { + "type": "string", + "format": "int64" + } + }, + "title": "Per-handler counters from network layer" + }, + "ResourcesCPU": { + "type": "object", + "properties": { + "usagePercent": { + "type": "number", + "format": "double", + "title": "CPU usage percentage (0-100)" + }, + "cores": { + "type": "integer", + "format": "int32", + "title": "Number of CPU cores" + } + } + }, + "ResourcesMemory": { + "type": "object", + "properties": { + "totalGb": { + "type": "number", + "format": "double", + "title": "Total memory in GB" + }, + "usedGb": { + "type": "number", + "format": "double", + "title": "Used memory in GB" + }, + "availableGb": { + "type": "number", + "format": "double", + "title": "Available memory in GB" + }, + "usagePercent": { + "type": "number", + "format": "double", + "title": "Memory usage percentage (0-100)" + } + } + }, + "ResourcesStorage": { + "type": "object", + "properties": { + "path": { + "type": "string", + "title": "Storage path being monitored" + }, + "totalBytes": { + "type": "string", + "format": "uint64" + }, + "usedBytes": { + "type": "string", + "format": "uint64" + }, + "availableBytes": { + "type": "string", + "format": "uint64" + }, + "usagePercent": { + "type": "number", + "format": "double", + "title": "Storage usage percentage (0-100)" + } + } + }, + "StatusResponseNetwork": { + "type": "object", + "properties": { + "peersCount": { + "type": "integer", + "format": "int32", + "title": "Number of connected peers in P2P network" + }, + "peerAddresses": { + "type": "array", + "items": { + "type": "string" + }, + "title": "List of connected peer addresses (optional, may be empty for privacy)" + } + }, + "title": "Network information" + }, + "StatusResponseP2PMetrics": { + "type": "object", + "properties": { + "dhtMetrics": { + "$ref": "#/definitions/P2PMetricsDhtMetrics" + }, + "networkHandleMetrics": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/P2PMetricsHandleCounters" + } + }, + "connPoolMetrics": { + "type": "object", + "additionalProperties": { + "type": "string", + "format": "int64" + } + }, + "banList": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/P2PMetricsBanEntry" + } + }, + "database": { + "$ref": "#/definitions/P2PMetricsDatabaseStats" + }, + "disk": { + "$ref": "#/definitions/P2PMetricsDiskStatus" + } + }, + "title": "P2P metrics and diagnostics (additive field)" + }, + "StatusResponseResources": { + "type": "object", + "properties": { + "cpu": { + "$ref": "#/definitions/ResourcesCPU" + }, + "memory": { + "$ref": "#/definitions/ResourcesMemory" + }, + "storageVolumes": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/ResourcesStorage" + } + }, + "hardwareSummary": { + "type": "string", + "title": "Formatted hardware summary (e.g., \"8 cores / 32GB RAM\")" + } + }, + "title": "System resource information" + }, + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string" + } + }, + "additionalProperties": {} + }, + "rpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "supernodeListServicesResponse": { + "type": "object", + "properties": { + "services": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/supernodeServiceInfo" + } + }, + "count": { + "type": "integer", + "format": "int32" + } + } + }, + "supernodeRawPprofResponse": { + "type": "object", + "properties": { + "data": { + "type": "string", + "format": "byte", + "title": "Raw pprof data exactly as returned by runtime/pprof" + } + } + }, + "supernodeServiceInfo": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "methods": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "supernodeStatusResponse": { + "type": "object", + "properties": { + "version": { + "type": "string", + "title": "Supernode version" + }, + "uptimeSeconds": { + "type": "string", + "format": "uint64", + "title": "Uptime in seconds" + }, + "resources": { + "$ref": "#/definitions/StatusResponseResources" + }, + "registeredServices": { + "type": "array", + "items": { + "type": "string" + }, + "title": "All registered/available services" + }, + "network": { + "$ref": "#/definitions/StatusResponseNetwork", + "title": "P2P network information" + }, + "rank": { + "type": "integer", + "format": "int32", + "title": "Rank in the top supernodes list (0 if not in top list)" + }, + "ipAddress": { + "type": "string", + "title": "Supernode IP address with port (e.g., \"192.168.1.1:4445\")" + }, + "p2pMetrics": { + "$ref": "#/definitions/StatusResponseP2PMetrics" + } + }, + "title": "The StatusResponse represents system status with clear organization" + } + } +} diff --git a/supernode/transport/grpc/cascade/handler.go b/supernode/transport/grpc/cascade/handler.go new file mode 100644 index 00000000..96237b98 --- /dev/null +++ b/supernode/transport/grpc/cascade/handler.go @@ -0,0 +1,356 @@ +package cascade + +import ( + "encoding/hex" + "fmt" + "hash" + "io" + "os" + "path/filepath" + "time" + + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" + "github.com/LumeraProtocol/supernode/v2/pkg/errors" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + tasks "github.com/LumeraProtocol/supernode/v2/pkg/task" + cascadeService "github.com/LumeraProtocol/supernode/v2/supernode/cascade" + "lukechampine.com/blake3" +) + +type ActionServer struct { + pb.UnimplementedCascadeServiceServer + factory cascadeService.CascadeServiceFactory + tracker tasks.Tracker + uploadTimeout time.Duration + downloadTimeout time.Duration +} + +const ( + serviceCascadeUpload = "cascade.upload" + serviceCascadeDownload = "cascade.download" +) + +// NewCascadeActionServer creates a new CascadeActionServer with injected service and tracker +func NewCascadeActionServer(factory cascadeService.CascadeServiceFactory, tracker tasks.Tracker, uploadTO, downloadTO time.Duration) *ActionServer { + if uploadTO <= 0 { + uploadTO = 30 * time.Minute + } + if downloadTO <= 0 { + downloadTO = 30 * time.Minute + } + return &ActionServer{factory: factory, tracker: tracker, uploadTimeout: uploadTO, downloadTimeout: downloadTO} +} + +// calculateOptimalChunkSize returns an optimal chunk size based on file size +// to balance throughput and memory usage + +var ( + startedTask bool + handle *tasks.Handle +) + +func calculateOptimalChunkSize(fileSize int64) int { + const ( + minChunkSize = 64 * 1024 // 64 KB minimum + maxChunkSize = 4 * 1024 * 1024 // 4 MB maximum for 1GB+ files + smallFileThreshold = 1024 * 1024 // 1 MB + mediumFileThreshold = 50 * 1024 * 1024 // 50 MB + largeFileThreshold = 500 * 1024 * 1024 // 500 MB + ) + + var chunkSize int + + switch { + case fileSize <= smallFileThreshold: + chunkSize = minChunkSize + case fileSize <= mediumFileThreshold: + chunkSize = 256 * 1024 + case fileSize <= largeFileThreshold: + chunkSize = 1024 * 1024 + default: + chunkSize = maxChunkSize + } + + if chunkSize < minChunkSize { + chunkSize = minChunkSize + } + if chunkSize > maxChunkSize { + chunkSize = maxChunkSize + } + return chunkSize +} + +func (server *ActionServer) Register(stream pb.CascadeService_RegisterServer) error { + fields := logtrace.Fields{ + logtrace.FieldMethod: "Register", + logtrace.FieldModule: "CascadeActionServer", + } + + ctx := stream.Context() + logtrace.Info(ctx, "register: stream open", fields) + + const maxFileSize = 1 * 1024 * 1024 * 1024 // 1GB limit + + var ( + metadata *pb.Metadata + totalSize int + ) + + hasher, tempFile, tempFilePath, err := initializeHasherAndTempFile() + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to initialize hasher and temp file", fields) + return fmt.Errorf("initializing hasher and temp file: %w", err) + } + defer func(tempFile *os.File) { + err := tempFile.Close() + if err != nil && !errors.Is(err, os.ErrClosed) { + fields[logtrace.FieldError] = err.Error() + logtrace.Warn(ctx, "error closing temp file", fields) + } + }(tempFile) + + for { + req, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "error receiving stream data", fields) + return fmt.Errorf("failed to receive stream data: %w", err) + } + + switch x := req.RequestType.(type) { + case *pb.RegisterRequest_Chunk: + if x.Chunk != nil { + if _, err := hasher.Write(x.Chunk.Data); err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to write chunk to hasher", fields) + return fmt.Errorf("hashing error: %w", err) + } + if _, err := tempFile.Write(x.Chunk.Data); err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to write chunk to file", fields) + return fmt.Errorf("file write error: %w", err) + } + totalSize += len(x.Chunk.Data) + if totalSize > maxFileSize { + fields[logtrace.FieldError] = "file size exceeds 1GB limit" + fields["total_size"] = totalSize + logtrace.Error(ctx, "upload rejected: file too large", fields) + return fmt.Errorf("file size %d exceeds maximum allowed size of 1GB", totalSize) + } + // Keep chunk logs at debug to avoid verbosity + logtrace.Debug(ctx, "received data chunk", logtrace.Fields{"chunk_size": len(x.Chunk.Data), "total_size_so_far": totalSize}) + } + case *pb.RegisterRequest_Metadata: + metadata = x.Metadata + // Set correlation ID for the rest of the flow + ctx = logtrace.CtxWithCorrelationID(ctx, metadata.ActionId) + fields[logtrace.FieldTaskID] = metadata.GetTaskId() + fields[logtrace.FieldActionID] = metadata.GetActionId() + logtrace.Info(ctx, "register: metadata received", fields) + // Start live task tracking on first metadata (covers remaining stream and processing) + if !startedTask { + startedTask = true + handle = tasks.StartWith(server.tracker, ctx, serviceCascadeUpload, metadata.ActionId, server.uploadTimeout) + defer handle.End(ctx) + } + } + } + + if metadata == nil { + logtrace.Error(ctx, "no metadata received in stream", fields) + return fmt.Errorf("no metadata received") + } + fields[logtrace.FieldTaskID] = metadata.GetTaskId() + fields[logtrace.FieldActionID] = metadata.GetActionId() + logtrace.Info(ctx, "register: stream upload complete", fields) + + if err := tempFile.Sync(); err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to sync temp file", fields) + return fmt.Errorf("failed to sync temp file: %w", err) + } + + hash := hasher.Sum(nil) + hashHex := hex.EncodeToString(hash) + fields[logtrace.FieldHashHex] = hashHex + logtrace.Info(ctx, "register: hash computed", fields) + + targetPath, err := replaceTempDirWithTaskDir(metadata.GetTaskId(), tempFilePath, tempFile) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to replace temp dir with task dir", fields) + return fmt.Errorf("failed to replace temp dir with task dir: %w", err) + } + + task := server.factory.NewCascadeRegistrationTask() + logtrace.Info(ctx, "register: task start", fields) + err = task.Register(ctx, &cascadeService.RegisterRequest{ + TaskID: metadata.TaskId, + ActionID: metadata.ActionId, + DataHash: hash, + DataSize: totalSize, + FilePath: targetPath, + }, func(resp *cascadeService.RegisterResponse) error { + grpcResp := &pb.RegisterResponse{ + EventType: pb.SupernodeEventType(resp.EventType), + Message: resp.Message, + TxHash: resp.TxHash, + } + if err := stream.Send(grpcResp); err != nil { + logtrace.Error(ctx, "failed to send response to client", logtrace.Fields{logtrace.FieldError: err.Error()}) + return err + } + // Mirror event to Info logs for high-level tracing + logtrace.Info(ctx, "register: event", logtrace.Fields{"event_type": resp.EventType, "message": resp.Message, logtrace.FieldTxHash: resp.TxHash, logtrace.FieldActionID: metadata.ActionId, logtrace.FieldTaskID: metadata.TaskId}) + return nil + }) + if err != nil { + logtrace.Error(ctx, "registration task failed", logtrace.Fields{logtrace.FieldError: err.Error()}) + return fmt.Errorf("registration failed: %w", err) + } + logtrace.Info(ctx, "register: task ok", fields) + return nil +} + +func (server *ActionServer) Download(req *pb.DownloadRequest, stream pb.CascadeService_DownloadServer) error { + ctx := stream.Context() + fields := logtrace.Fields{ + logtrace.FieldMethod: "Download", + logtrace.FieldModule: "CascadeActionServer", + logtrace.FieldActionID: req.GetActionId(), + } + logtrace.Debug(ctx, "download request received", fields) + + // Start live task tracking for the entire download RPC (including file streaming) + dlHandle := tasks.StartWith(server.tracker, ctx, serviceCascadeDownload, req.GetActionId(), server.downloadTimeout) + defer dlHandle.End(ctx) + + // Prepare to capture decoded file path from task events + var decodedFilePath string + var tmpDir string + + task := server.factory.NewCascadeRegistrationTask() + // Run cascade task Download; stream events back to client + err := task.Download(ctx, &cascadeService.DownloadRequest{ActionID: req.GetActionId(), Signature: req.GetSignature()}, func(resp *cascadeService.DownloadResponse) error { + // Forward event to gRPC client + evt := &pb.DownloadResponse{ + ResponseType: &pb.DownloadResponse_Event{ + Event: &pb.DownloadEvent{ + EventType: pb.SupernodeEventType(resp.EventType), + Message: resp.Message, + }, + }, + } + if sendErr := stream.Send(evt); sendErr != nil { + return sendErr + } + // Capture decode-completed info for streaming + if resp.EventType == cascadeService.SupernodeEventTypeDecodeCompleted { + decodedFilePath = resp.FilePath + tmpDir = resp.DownloadedDir + } + return nil + }) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "download task failed", fields) + return fmt.Errorf("download task failed: %w", err) + } + + if decodedFilePath == "" { + logtrace.Warn(ctx, "decode completed without file path", fields) + return nil + } + + // Notify client that server is ready to stream the file + logtrace.Debug(ctx, "download: serve ready", logtrace.Fields{"event_type": cascadeService.SupernodeEventTypeServeReady, logtrace.FieldActionID: req.GetActionId()}) + if err := stream.Send(&pb.DownloadResponse{ResponseType: &pb.DownloadResponse_Event{Event: &pb.DownloadEvent{EventType: pb.SupernodeEventType_SERVE_READY, Message: "Serve ready"}}}); err != nil { + return fmt.Errorf("send serve-ready: %w", err) + } + + // Stream file content in chunks + fi, err := os.Stat(decodedFilePath) + if err != nil { + return fmt.Errorf("stat decoded file: %w", err) + } + chunkSize := calculateOptimalChunkSize(fi.Size()) + f, err := os.Open(decodedFilePath) + if err != nil { + return fmt.Errorf("open decoded file: %w", err) + } + defer f.Close() + + buf := make([]byte, chunkSize) + for { + n, rerr := f.Read(buf) + if n > 0 { + if err := stream.Send(&pb.DownloadResponse{ResponseType: &pb.DownloadResponse_Chunk{Chunk: &pb.DataChunk{Data: append([]byte(nil), buf[:n]...)}}}); err != nil { + return fmt.Errorf("send chunk: %w", err) + } + } + if rerr == io.EOF { + break + } + if rerr != nil { + return fmt.Errorf("read decoded file: %w", rerr) + } + } + + // Cleanup temp directory if provided + if tmpDir != "" { + if cerr := task.CleanupDownload(ctx, tmpDir); cerr != nil { + logtrace.Warn(ctx, "cleanup of tmp dir failed", logtrace.Fields{"tmp_dir": tmpDir, logtrace.FieldError: cerr.Error()}) + } + } + + logtrace.Debug(ctx, "download stream completed", fields) + return nil +} + +// initializeHasherAndTempFile prepares a hasher and a temporary file to stream upload data into. +func initializeHasherAndTempFile() (hash.Hash, *os.File, string, error) { + // Create a temp directory for the upload + tmpDir, err := os.MkdirTemp("", "supernode-upload-*") + if err != nil { + return nil, nil, "", fmt.Errorf("create temp dir: %w", err) + } + + // Create a file within the temp directory + filePath := filepath.Join(tmpDir, "data.bin") + f, err := os.Create(filePath) + if err != nil { + return nil, nil, "", fmt.Errorf("create temp file: %w", err) + } + + // Create a BLAKE3 hasher (32 bytes output) + hasher := blake3.New(32, nil) + return hasher, f, filePath, nil +} + +// replaceTempDirWithTaskDir moves the uploaded file into a task-scoped directory +// and returns the new absolute path. +func replaceTempDirWithTaskDir(taskID, tempFilePath string, tempFile *os.File) (string, error) { + // Ensure data is flushed + _ = tempFile.Sync() + // Close now; deferred close may run later and is safe to ignore + _ = tempFile.Close() + + // Create a stable target directory under OS temp + targetDir := filepath.Join(os.TempDir(), "supernode", "uploads", taskID) + if err := os.MkdirAll(targetDir, 0700); err != nil { + return "", fmt.Errorf("create task dir: %w", err) + } + + newPath := filepath.Join(targetDir, filepath.Base(tempFilePath)) + if err := os.Rename(tempFilePath, newPath); err != nil { + return "", fmt.Errorf("move uploaded file: %w", err) + } + + // Attempt to cleanup the original temp directory + _ = os.RemoveAll(filepath.Dir(tempFilePath)) + return newPath, nil +} diff --git a/supernode/transport/grpc/status/handler.go b/supernode/transport/grpc/status/handler.go new file mode 100644 index 00000000..e543e7b1 --- /dev/null +++ b/supernode/transport/grpc/status/handler.go @@ -0,0 +1,59 @@ +package server + +import ( + "context" + + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" + pbcascade "github.com/LumeraProtocol/supernode/v2/gen/supernode/action/cascade" + statussvc "github.com/LumeraProtocol/supernode/v2/supernode/status" +) + +// SupernodeServer implements the SupernodeService gRPC service +type SupernodeServer struct { + pb.UnimplementedSupernodeServiceServer + statusService *statussvc.SupernodeStatusService + gatewayPort int +} + +// NewSupernodeServer creates a new SupernodeServer +func NewSupernodeServer(statusService *statussvc.SupernodeStatusService) *SupernodeServer { + return &SupernodeServer{statusService: statusService, gatewayPort: 8002} +} + +// SetGatewayPort sets the gateway port for internal proxy requests +func (s *SupernodeServer) SetGatewayPort(port int) { + s.gatewayPort = port +} + +// GetStatus implements SupernodeService.GetStatus +func (s *SupernodeServer) GetStatus(ctx context.Context, req *pb.StatusRequest) (*pb.StatusResponse, error) { + return s.statusService.GetStatus(ctx, req.GetIncludeP2PMetrics()) +} + +// ListServices implements SupernodeService.ListServices +func (s *SupernodeServer) ListServices(ctx context.Context, _ *pb.ListServicesRequest) (*pb.ListServicesResponse, error) { + // Describe available services and methods/streams exposed by this node + var services []*pb.ServiceInfo + + // SupernodeService methods + var supernodeMethods []string + for _, m := range pb.SupernodeService_ServiceDesc.Methods { + supernodeMethods = append(supernodeMethods, m.MethodName) + } + services = append(services, &pb.ServiceInfo{ + Name: pb.SupernodeService_ServiceDesc.ServiceName, + Methods: supernodeMethods, + }) + + // CascadeService streams (surface stream names as methods for discovery) + var cascadeMethods []string + for _, st := range pbcascade.CascadeService_ServiceDesc.Streams { + cascadeMethods = append(cascadeMethods, st.StreamName) + } + services = append(services, &pb.ServiceInfo{ + Name: pbcascade.CascadeService_ServiceDesc.ServiceName, + Methods: cascadeMethods, + }) + + return &pb.ListServicesResponse{Services: services, Count: int32(len(services))}, nil +} diff --git a/supernode/transport/grpc/status/pprof_handlers.go b/supernode/transport/grpc/status/pprof_handlers.go new file mode 100644 index 00000000..00be8b99 --- /dev/null +++ b/supernode/transport/grpc/status/pprof_handlers.go @@ -0,0 +1,252 @@ +package server + +import ( + "context" + "fmt" + "io" + "net/http" + "os" + "strings" + + pb "github.com/LumeraProtocol/supernode/v2/gen/supernode" +) + +// isPprofEnabled checks if pprof should be enabled based on chain ID or environment variable +func (s *SupernodeServer) isPprofEnabled() bool { + // Check if chain ID contains testnet + if s.statusService != nil && s.statusService.GetChainID() != "" { + if strings.Contains(strings.ToLower(s.statusService.GetChainID()), "testnet") { + return true + } + } + + // Check environment variable + return os.Getenv("ENABLE_PPROF") == "true" +} + +// Raw pprof handlers - these proxy to the actual pprof HTTP endpoints + +// pprofProxy makes an internal HTTP request to the actual pprof endpoint +func (s *SupernodeServer) pprofProxy(path string, queryParams string) ([]byte, error) { + // Determine the port - use gateway port if available, otherwise use default + port := 8002 // Default gateway port + if s.gatewayPort != 0 { + port = s.gatewayPort + } + + // Construct the URL + url := fmt.Sprintf("http://localhost:%d/debug/pprof%s", port, path) + if queryParams != "" { + url += "?" + queryParams + } + + // Make the HTTP request + resp, err := http.Get(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + // Read the response body + data, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return data, nil +} + +// GetRawPprof returns the pprof index +func (s *SupernodeServer) GetRawPprof(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte("Profiling disabled")}, nil + } + + data, err := s.pprofProxy("/", "") + if err != nil { + return &pb.RawPprofResponse{Data: []byte(fmt.Sprintf("Error: %v", err))}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} + +// GetRawPprofHeap returns raw heap profile +func (s *SupernodeServer) GetRawPprofHeap(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + queryParams := "" + if req.GetDebug() > 0 { + queryParams = fmt.Sprintf("debug=%d", req.GetDebug()) + } + + data, err := s.pprofProxy("/heap", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} + +// GetRawPprofGoroutine returns raw goroutine profile +func (s *SupernodeServer) GetRawPprofGoroutine(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + queryParams := "" + if req.GetDebug() > 0 { + queryParams = fmt.Sprintf("debug=%d", req.GetDebug()) + } + + data, err := s.pprofProxy("/goroutine", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} + +// GetRawPprofAllocs returns raw allocations profile +func (s *SupernodeServer) GetRawPprofAllocs(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + queryParams := "" + if req.GetDebug() > 0 { + queryParams = fmt.Sprintf("debug=%d", req.GetDebug()) + } + + data, err := s.pprofProxy("/allocs", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} + +// GetRawPprofBlock returns raw block profile +func (s *SupernodeServer) GetRawPprofBlock(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + queryParams := "" + if req.GetDebug() > 0 { + queryParams = fmt.Sprintf("debug=%d", req.GetDebug()) + } + + data, err := s.pprofProxy("/block", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} + +// GetRawPprofMutex returns raw mutex profile +func (s *SupernodeServer) GetRawPprofMutex(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + queryParams := "" + if req.GetDebug() > 0 { + queryParams = fmt.Sprintf("debug=%d", req.GetDebug()) + } + + data, err := s.pprofProxy("/mutex", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} + +// GetRawPprofThreadcreate returns raw threadcreate profile +func (s *SupernodeServer) GetRawPprofThreadcreate(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + queryParams := "" + if req.GetDebug() > 0 { + queryParams = fmt.Sprintf("debug=%d", req.GetDebug()) + } + + data, err := s.pprofProxy("/threadcreate", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} + +// GetRawPprofProfile returns raw CPU profile +func (s *SupernodeServer) GetRawPprofProfile(ctx context.Context, req *pb.RawPprofCpuRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + seconds := req.GetSeconds() + if seconds <= 0 { + seconds = 30 + } + if seconds > 300 { + seconds = 300 + } + + queryParams := fmt.Sprintf("seconds=%d", seconds) + data, err := s.pprofProxy("/profile", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} + +// GetRawPprofCmdline returns the command line +func (s *SupernodeServer) GetRawPprofCmdline(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + data, err := s.pprofProxy("/cmdline", "") + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} + +// GetRawPprofSymbol returns symbol information +func (s *SupernodeServer) GetRawPprofSymbol(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + data, err := s.pprofProxy("/symbol", "") + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} + +// GetRawPprofTrace returns execution trace +func (s *SupernodeServer) GetRawPprofTrace(ctx context.Context, req *pb.RawPprofRequest) (*pb.RawPprofResponse, error) { + if !s.isPprofEnabled() { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + // Trace typically takes a seconds parameter + queryParams := "seconds=1" + data, err := s.pprofProxy("/trace", queryParams) + if err != nil { + return &pb.RawPprofResponse{Data: []byte{}}, nil + } + + return &pb.RawPprofResponse{Data: data}, nil +} diff --git a/supernode/node/supernode/server/server.go b/supernode/transport/grpc/status/server.go similarity index 71% rename from supernode/node/supernode/server/server.go rename to supernode/transport/grpc/status/server.go index 37e8f4dd..b7938983 100644 --- a/supernode/node/supernode/server/server.go +++ b/supernode/transport/grpc/status/server.go @@ -7,7 +7,6 @@ import ( "strconv" "strings" - "google.golang.org/grpc" "google.golang.org/grpc/health" healthpb "google.golang.org/grpc/health/grpc_health_v1" @@ -22,14 +21,12 @@ import ( "github.com/cosmos/cosmos-sdk/crypto/keyring" ) -type service interface { - Desc() *grpc.ServiceDesc -} - // Server represents supernode server type Server struct { - config *Config - services []service + identity string + listenAddrs string + port int + services []grpcserver.ServiceDesc name string kr keyring.Keyring grpcServer *grpcserver.Server @@ -48,12 +45,12 @@ func (server *Server) Run(ctx context.Context) error { // Set up gRPC logging logtrace.SetGRPCLogger() - logtrace.Info(ctx, "Server identity configured", logtrace.Fields{logtrace.FieldModule: "server", "identity": server.config.Identity}) - logtrace.Info(ctx, "Server listening", logtrace.Fields{logtrace.FieldModule: "server", "addresses": server.config.ListenAddresses}) + logtrace.Debug(ctx, "Server identity configured", logtrace.Fields{logtrace.FieldModule: "server", "identity": server.identity}) + logtrace.Debug(ctx, "Server listening", logtrace.Fields{logtrace.FieldModule: "server", "addresses": server.listenAddrs}) group, ctx := errgroup.WithContext(ctx) - addresses := strings.Split(server.config.ListenAddresses, ",") + addresses := strings.Split(server.listenAddrs, ",") if err := server.setupGRPCServer(); err != nil { logtrace.Fatal(ctx, "Failed to setup gRPC server", logtrace.Fields{logtrace.FieldModule: "server", logtrace.FieldError: err.Error()}) } @@ -70,11 +67,11 @@ func (server *Server) Run(ctx context.Context) error { opts.WriteBufferSize = (8 * 1024 * 1024) // 8MB TCP buffer for _, address := range addresses { - addr := net.JoinHostPort(strings.TrimSpace(address), strconv.Itoa(server.config.Port)) + addr := net.JoinHostPort(strings.TrimSpace(address), strconv.Itoa(server.port)) address := addr // Create a new variable to avoid closure issues group.Go(func() error { - logtrace.Info(ctx, "Starting gRPC server", logtrace.Fields{logtrace.FieldModule: "server", "address": address}) + logtrace.Debug(ctx, "Starting gRPC server", logtrace.Fields{logtrace.FieldModule: "server", "address": address}) return server.grpcServer.Serve(ctx, address, opts) }) } @@ -87,7 +84,7 @@ func (server *Server) setupGRPCServer() error { serverCreds, err := ltc.NewServerCreds(<c.ServerOptions{ CommonOptions: ltc.CommonOptions{ Keyring: server.kr, - LocalIdentity: server.config.Identity, + LocalIdentity: server.identity, PeerType: securekeyx.Supernode, Validator: lumera.NewSecureKeyExchangeValidator(server.lumeraClient), }, @@ -107,29 +104,13 @@ func (server *Server) setupGRPCServer() error { server.healthServer.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) // Register all services - var supernodeServer *SupernodeServer - for _, service := range server.services { - server.grpcServer.RegisterService(service.Desc(), service) - server.healthServer.SetServingStatus(service.Desc().ServiceName, healthpb.HealthCheckResponse_SERVING) - - // Keep reference to SupernodeServer - if ss, ok := service.(*SupernodeServer); ok { - supernodeServer = ss + for _, s := range server.services { + server.grpcServer.RegisterService(s.Desc, s.Service) + if s.Desc != nil { + server.healthServer.SetServingStatus(s.Desc.ServiceName, healthpb.HealthCheckResponse_SERVING) } } - // After all services are registered, update SupernodeServer with the list - if supernodeServer != nil { - // Register all custom services - for _, svc := range server.services { - supernodeServer.RegisterService(svc.Desc().ServiceName, svc.Desc()) - } - - // Also register the health service - healthDesc := healthpb.Health_ServiceDesc - supernodeServer.RegisterService(healthDesc.ServiceName, &healthDesc) - } - return nil } @@ -146,7 +127,10 @@ func (server *Server) Close() { // Set all services to NOT_SERVING before shutdown server.healthServer.SetServingStatus("", healthpb.HealthCheckResponse_NOT_SERVING) for _, service := range server.services { - serviceName := service.Desc().ServiceName + serviceName := "" + if service.Desc != nil { + serviceName = service.Desc.ServiceName + } server.healthServer.SetServingStatus(serviceName, healthpb.HealthCheckResponse_NOT_SERVING) } } @@ -158,13 +142,14 @@ func (server *Server) Close() { } // New returns a new Server instance. -func New(config *Config, name string, kr keyring.Keyring, lumeraClient lumera.Client, services ...service) (*Server, error) { - if config == nil { - return nil, fmt.Errorf("config is nil") +func New(identity, listenAddrs string, port int, name string, kr keyring.Keyring, lumeraClient lumera.Client, services ...grpcserver.ServiceDesc) (*Server, error) { + if listenAddrs == "" { + return nil, fmt.Errorf("listen addresses cannot be empty") } - return &Server{ - config: config, + identity: identity, + listenAddrs: listenAddrs, + port: port, services: services, name: name, kr: kr, diff --git a/supernode/verifier/interface.go b/supernode/verifier/interface.go new file mode 100644 index 00000000..d2668c9c --- /dev/null +++ b/supernode/verifier/interface.go @@ -0,0 +1,35 @@ +package verifier + +import "context" + +// ConfigVerifierService defines verification methods +type ConfigVerifierService interface { + VerifyConfig(ctx context.Context) (*VerificationResult, error) +} + +// ConfigError represents a config validation error or warning +type ConfigError struct { + Field string + Expected string + Actual string + Message string +} + +// VerificationResult holds the outcome of config verification +type VerificationResult struct { + Valid bool + Errors []ConfigError + Warnings []ConfigError +} + +func (r *VerificationResult) IsValid() bool { return r.Valid && len(r.Errors) == 0 } +func (r *VerificationResult) HasWarnings() bool { return len(r.Warnings) > 0 } +func (r *VerificationResult) Summary() string { + if !r.IsValid() { + return "invalid: check errors" + } + if r.HasWarnings() { + return "valid with warnings" + } + return "valid" +} diff --git a/supernode/verifier/verifier.go b/supernode/verifier/verifier.go new file mode 100644 index 00000000..b6f29559 --- /dev/null +++ b/supernode/verifier/verifier.go @@ -0,0 +1,129 @@ +package verifier + +import ( + "context" + "fmt" + "net" + + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + snmodule "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/supernode" + "github.com/LumeraProtocol/supernode/v2/supernode/config" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type ConfigVerifier struct { + config *config.Config + lumeraClient lumera.Client + keyring keyring.Keyring +} + +func NewConfigVerifier(cfg *config.Config, client lumera.Client, kr keyring.Keyring) ConfigVerifierService { + return &ConfigVerifier{config: cfg, lumeraClient: client, keyring: kr} +} + +func (cv *ConfigVerifier) VerifyConfig(ctx context.Context) (*VerificationResult, error) { + result := &VerificationResult{Valid: true, Errors: []ConfigError{}, Warnings: []ConfigError{}} + logtrace.Debug(ctx, "Starting config verification", logtrace.Fields{"identity": cv.config.SupernodeConfig.Identity, "key_name": cv.config.SupernodeConfig.KeyName, "p2p_port": cv.config.P2PConfig.Port}) + if err := cv.checkKeyExists(result); err != nil { + return result, err + } + if err := cv.checkIdentityMatches(result); err != nil { + return result, err + } + if !result.IsValid() { + return result, nil + } + supernodeInfo, err := cv.checkSupernodeExists(ctx, result) + if err != nil { + return result, err + } + if supernodeInfo == nil { + return result, nil + } + cv.checkSupernodeState(result, supernodeInfo) + cv.checkPortsAvailable(result) + logtrace.Debug(ctx, "Config verification completed", logtrace.Fields{"valid": result.IsValid(), "errors": len(result.Errors), "warnings": len(result.Warnings)}) + return result, nil +} + +func (cv *ConfigVerifier) checkKeyExists(result *VerificationResult) error { + _, err := cv.keyring.Key(cv.config.SupernodeConfig.KeyName) + if err != nil { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "key_name", Actual: cv.config.SupernodeConfig.KeyName, Message: fmt.Sprintf("Key '%s' not found in keyring", cv.config.SupernodeConfig.KeyName)}) + } + return nil +} + +func (cv *ConfigVerifier) checkIdentityMatches(result *VerificationResult) error { + keyInfo, err := cv.keyring.Key(cv.config.SupernodeConfig.KeyName) + if err != nil { + return nil + } + pubKey, err := keyInfo.GetPubKey() + if err != nil { + return fmt.Errorf("failed to get public key for key '%s': %w", cv.config.SupernodeConfig.KeyName, err) + } + addr := sdk.AccAddress(pubKey.Address()) + if addr.String() != cv.config.SupernodeConfig.Identity { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "identity", Expected: addr.String(), Actual: cv.config.SupernodeConfig.Identity, Message: fmt.Sprintf("Key '%s' resolves to %s but config identity is %s", cv.config.SupernodeConfig.KeyName, addr.String(), cv.config.SupernodeConfig.Identity)}) + } + return nil +} + +func (cv *ConfigVerifier) checkSupernodeExists(ctx context.Context, result *VerificationResult) (*snmodule.SuperNodeInfo, error) { + sn, err := cv.lumeraClient.SuperNode().GetSupernodeWithLatestAddress(ctx, cv.config.SupernodeConfig.Identity) + if err != nil { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "registration", Actual: "not_registered", Message: fmt.Sprintf("Supernode not registered on chain for address %s", cv.config.SupernodeConfig.Identity)}) + return nil, err + } + return sn, nil +} + +func (cv *ConfigVerifier) checkP2PPortMatches(result *VerificationResult, supernodeInfo *snmodule.SuperNodeInfo) { + configPort := fmt.Sprintf("%d", cv.config.P2PConfig.Port) + chainPort := supernodeInfo.P2PPort + if chainPort != "" && chainPort != configPort { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "p2p_port", Expected: chainPort, Actual: configPort, Message: fmt.Sprintf("P2P port mismatch: config=%s, chain=%s", configPort, chainPort)}) + } +} + +func (cv *ConfigVerifier) checkSupernodeState(result *VerificationResult, supernodeInfo *snmodule.SuperNodeInfo) { + if supernodeInfo.CurrentState != "" && supernodeInfo.CurrentState != "SUPERNODE_STATE_ACTIVE" { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "state", Expected: "SUPERNODE_STATE_ACTIVE", Actual: supernodeInfo.CurrentState, Message: fmt.Sprintf("Supernode state is %s (expected ACTIVE)", supernodeInfo.CurrentState)}) + } +} + +func (cv *ConfigVerifier) checkPortsAvailable(result *VerificationResult) { + if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, int(cv.config.SupernodeConfig.Port)) { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "supernode_port", Actual: fmt.Sprintf("%d", cv.config.SupernodeConfig.Port), Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", cv.config.SupernodeConfig.Port)}) + } + if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, int(cv.config.P2PConfig.Port)) { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "p2p_port", Actual: fmt.Sprintf("%d", cv.config.P2PConfig.Port), Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", cv.config.P2PConfig.Port)}) + } + gatewayPort := int(cv.config.SupernodeConfig.GatewayPort) + if gatewayPort == 0 { + gatewayPort = 8002 + } + if !cv.isPortAvailable(cv.config.SupernodeConfig.Host, gatewayPort) { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{Field: "gateway_port", Actual: fmt.Sprintf("%d", gatewayPort), Message: fmt.Sprintf("Port %d is already in use. Please stop the conflicting service or choose a different port", gatewayPort)}) + } +} + +func (cv *ConfigVerifier) isPortAvailable(host string, port int) bool { + ln, err := net.Listen("tcp", fmt.Sprintf("%s:%d", host, port)) + if err != nil { + return false + } + _ = ln.Close() + return true +} diff --git a/tests/integration/p2p/p2p_integration_test.go b/tests/integration/p2p/p2p_integration_test.go index bce71f58..a0191b63 100644 --- a/tests/integration/p2p/p2p_integration_test.go +++ b/tests/integration/p2p/p2p_integration_test.go @@ -108,7 +108,7 @@ func TestP2PBasicIntegration(t *testing.T) { // Add debug logging log.Printf("Storing batch with keys: %v", expectedKeys) - err := services[0].StoreBatch(ctx, batchData, 0, taskID) + err := services[0].StoreBatch(ctx, batchData, 0, taskID) require.NoError(t, err) // Add immediate verification @@ -122,7 +122,9 @@ func TestP2PBasicIntegration(t *testing.T) { } // Now try batch retrieve - retrieved, err := services[0].BatchRetrieve(ctx, expectedKeys, batchSize, taskID) + retrieved, err := services[0].BatchRetrieve(ctx, expectedKeys, batchSize, taskID, func(symbolID string, data []byte) error { + return nil + }) require.NoError(t, err) require.Equal(t, batchSize, len(retrieved), "Expected %d items, got %d", batchSize, len(retrieved)) @@ -203,6 +205,7 @@ func SetupTestP2PNodes(t *testing.T, ctx context.Context) ([]p2p.Client, []*rqst require.NoError(t, err, "failed to create rqstore for node %d: %v", i, err) rqStores = append(rqStores, rqStore) + // Disable metrics in integration tests by default service, err := p2p.New(ctx, p2pConfig, mockClient, kr, rqStore, nil, nil) require.NoError(t, err, "failed to create p2p service for node %d: %v", i, err) diff --git a/tests/system/e2e_cascade_test.go b/tests/system/e2e_cascade_test.go index e457ccd0..b9af06d2 100644 --- a/tests/system/e2e_cascade_test.go +++ b/tests/system/e2e_cascade_test.go @@ -3,7 +3,6 @@ package system import ( "context" "crypto/sha256" - "encoding/base64" "encoding/json" "fmt" "io" @@ -14,7 +13,6 @@ import ( "testing" "time" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" "github.com/LumeraProtocol/supernode/v2/pkg/keyring" "github.com/LumeraProtocol/supernode/v2/pkg/lumera" "github.com/LumeraProtocol/supernode/v2/supernode/config" @@ -22,7 +20,6 @@ import ( "github.com/LumeraProtocol/supernode/v2/sdk/action" "github.com/LumeraProtocol/supernode/v2/sdk/event" - "github.com/LumeraProtocol/lumera/x/action/v1/types" sdkconfig "github.com/LumeraProtocol/supernode/v2/sdk/config" "github.com/stretchr/testify/require" @@ -67,14 +64,13 @@ func TestCascadeE2E(t *testing.T) { ) // Action request parameters - const ( - actionType = "CASCADE" // The action type for fountain code processing - price = "23800ulume" // Price for the action in ulume tokens - ) + const actionType = "CASCADE" // The action type for fountain code processing t.Log("Step 1: Starting all services") - // Update the genesis file with action parameters - sut.ModifyGenesisJSON(t, SetActionParams(t)) + // Update the genesis file with required params before starting + // - Set staking bond denom to match ulume used by gentxs + // - Configure action module params used by the test + sut.ModifyGenesisJSON(t, SetStakingBondDenomUlume(t), SetActionParams(t)) // Reset and start the blockchain sut.StartChain(t) @@ -125,7 +121,7 @@ func TestCascadeE2E(t *testing.T) { args := []string{ "query", "supernode", - "get-top-super-nodes-for-block", + "get-top-supernodes-for-block", fmt.Sprint(queryHeight), "--output", "json", } @@ -190,13 +186,11 @@ func TestCascadeE2E(t *testing.T) { // Fund the account with tokens for transactions t.Logf("Funding test address %s with %s", recoveredAddress, fundAmount) - cli.FundAddress(recoveredAddress, fundAmount) // ulume tokens for action fees - cli.FundAddress(recoveredAddress, "10000000stake") // stake tokens + cli.FundAddress(recoveredAddress, fundAmount) // ulume tokens for action fees // Fund user account t.Logf("Funding user address %s with %s", userAddress, fundAmount) - cli.FundAddress(userAddress, fundAmount) // ulume tokens for action fees - cli.FundAddress(userAddress, "10000000stake") // stake tokens + cli.FundAddress(userAddress, fundAmount) // ulume tokens for action fees sut.AwaitNextBlock(t) // Wait for funding transaction to be processed @@ -274,78 +268,36 @@ func TestCascadeE2E(t *testing.T) { originalHash := sha256.Sum256(data) t.Logf("Original file SHA256 hash: %x", originalHash) - rqCodec := codec.NewRaptorQCodec(raptorQFilesDir) - - encodeRes, err := rqCodec.Encode(ctx, codec.EncodeRequest{ - Path: testFileFullpath, - DataSize: int(fileInfo.Size()), - TaskID: "1", - }) - - require.NoError(t, err, "Failed to encode data with RaptorQ") - - metadataFile := encodeRes.Metadata - - // Cascade signature creation process - const ic = uint32(121) - const maxFiles = uint32(50) - - // Create cascade signature format - signatureFormat, indexFileIDs, err := createCascadeLayoutSignature(metadataFile, keplrKeyring, userKeyName, ic, maxFiles) - require.NoError(t, err, "Failed to create cascade signature") - - t.Logf("Signature format prepared with length: %d bytes", len(signatureFormat)) - t.Logf("Generated %d index file IDs for chain verification", len(indexFileIDs)) + // Cascade signature creation process (high-level via action SDK) - // Data hash with blake3 - hash, err := ComputeBlake3Hash(data) - b64EncodedHash := base64.StdEncoding.EncodeToString(hash) - require.NoError(t, err, "Failed to compute Blake3 hash") + // Build action client for metadata generation and cascade operations + // Use the same account that submits RequestAction so signatures match the on-chain creator + accConfig := sdkconfig.AccountConfig{LocalCosmosAddress: userAddress, KeyName: userKeyName, Keyring: keplrKeyring} + lumraConfig := sdkconfig.LumeraConfig{GRPCAddr: lumeraGRPCAddr, ChainID: lumeraChainID} + actionConfig := sdkconfig.Config{Account: accConfig, Lumera: lumraConfig} + actionClient, err := action.NewClient(context.Background(), actionConfig, nil) + require.NoError(t, err, "Failed to create action client") - // Also Create a signature for the hash - signedHash, err := keyring.SignBytes(keplrKeyring, userKeyName, hash) - require.NoError(t, err, "Failed to sign hash") + // Use the new SDK helper to build Cascade metadata (includes signatures, price, and expiration) + builtMeta, autoPrice, expirationTime, err := actionClient.BuildCascadeMetadataFromFile(ctx, testFileFullpath, false) + require.NoError(t, err, "Failed to build cascade metadata from file") - // Encode the signed hash as base64 - signedHashBase64 := base64.StdEncoding.EncodeToString(signedHash) + // Create a signature for StartCascade using the SDK helper + signedHashBase64, err := actionClient.GenerateStartCascadeSignatureFromFile(ctx, testFileFullpath) + require.NoError(t, err, "Failed to generate StartCascade signature") // --------------------------------------- t.Log("Step 7: Creating metadata and submitting action request") - // Create CascadeMetadata struct with all required fields - cascadeMetadata := types.CascadeMetadata{ - DataHash: b64EncodedHash, // Hash of the original file - FileName: filepath.Base(testFileFullpath), // Original filename - RqIdsIc: uint64(121), // Count of RQ identifiers - Signatures: signatureFormat, // Combined signature format - } - - // Marshal the struct to JSON for the blockchain transaction - metadataBytes, err := json.Marshal(cascadeMetadata) + // Marshal the helper-built metadata to JSON for the blockchain transaction + metadataBytes, err := json.Marshal(builtMeta) require.NoError(t, err, "Failed to marshal CascadeMetadata to JSON") metadata := string(metadataBytes) - // Set expiration time 25 hours in the future (minimum is 24 hours) - // This defines how long the action request is valid - expirationTime := fmt.Sprintf("%d", time.Now().Add(25*time.Hour).Unix()) - t.Logf("Requesting cascade action with metadata: %s", metadata) - t.Logf("Action type: %s, Price: %s, Expiration: %s", actionType, price, expirationTime) - - // Submit the action request transaction to the blockchain using user key - // This registers the request with metadata for supernodes to process - // actionRequestResp := cli.CustomCommand( - // "tx", "action", "request-action", - // actionType, // CASCADE action type - // metadata, // JSON metadata with all required fields - // price, // Price in ulume tokens - // expirationTime, // Unix timestamp for expiration - // "--from", userKeyName, // Use user key for transaction submission - // "--gas", "auto", - // "--gas-adjustment", "1.5", - // ) - - response, err := lumeraClinet.ActionMsg().RequestAction(ctx, actionType, metadata, price, expirationTime) + t.Logf("Action type: %s, Price: %s, Expiration: %s", actionType, autoPrice, expirationTime) + + response, _ := lumeraClinet.ActionMsg().RequestAction(ctx, actionType, metadata, autoPrice, expirationTime) txresp := response.TxResponse @@ -400,56 +352,42 @@ func TestCascadeE2E(t *testing.T) { require.NotEmpty(t, actionID, "Action ID should not be empty") t.Logf("Extracted action ID: %s", actionID) - // Set up action client configuration - // This defines how to connect to network services - accConfig := sdkconfig.AccountConfig{ - LocalCosmosAddress: recoveredAddress, - KeyName: testKeyName, - Keyring: keplrKeyring, - } - - lumraConfig := sdkconfig.LumeraConfig{ - GRPCAddr: lumeraGRPCAddr, - ChainID: lumeraChainID, - } - actionConfig := sdkconfig.Config{ - Account: accConfig, - Lumera: lumraConfig, - } - - // Initialize action client for cascade operations - actionClient, err := action.NewClient( - context.Background(), - actionConfig, - nil, // Nil logger - use default - - ) - require.NoError(t, err, "Failed to create action client") - // --------------------------------------- // Step 9: Subscribe to all events and extract tx hash // --------------------------------------- - // Channel to receive the transaction hash - txHashCh := make(chan string, 1) - completionCh := make(chan bool, 1) - - // Subscribe to ALL events - err = actionClient.SubscribeToAllEvents(context.Background(), func(ctx context.Context, e event.Event) { - // Only capture TxhasReceived events - if e.Type == event.SDKTaskTxHashReceived { - if txHash, ok := e.Data[event.KeyTxHash].(string); ok && txHash != "" { - // Send the hash to our channel - txHashCh <- txHash - } - } - - // Also monitor for task completion - if e.Type == event.SDKTaskCompleted { - completionCh <- true - } - }) - require.NoError(t, err, "Failed to subscribe to events") + // Channels to receive async signals + txHashCh := make(chan string, 1) + completionCh := make(chan bool, 1) + errCh := make(chan string, 1) + + // Subscribe to ALL events (non-blocking sends to avoid handler stalls) + err = actionClient.SubscribeToAllEvents(context.Background(), func(ctx context.Context, e event.Event) { + // Log every event for debugging and capture key ones + t.Logf("SDK event: type=%s data=%v", e.Type, e.Data) + // Only capture TxhasReceived events + if e.Type == event.SDKTaskTxHashReceived { + if txHash, ok := e.Data[event.KeyTxHash].(string); ok && txHash != "" { + // Non-blocking send; drop if buffer full + select { case txHashCh <- txHash: default: } + } + } + + // Also monitor for task completion + if e.Type == event.SDKTaskCompleted { + // Non-blocking send; drop if buffer full + select { case completionCh <- true: default: } + } + // Capture task failures and propagate error message to main goroutine + if e.Type == event.SDKTaskFailed { + if msg, ok := e.Data[event.KeyError].(string); ok && msg != "" { + select { case errCh <- msg: default: } + } else { + select { case errCh <- "task failed (no error message)" : default: } + } + } + }) + require.NoError(t, err, "Failed to subscribe to events") // Start cascade operation @@ -464,8 +402,26 @@ func TestCascadeE2E(t *testing.T) { require.NoError(t, err, "Failed to start cascade operation") t.Logf("Cascade operation started with task ID: %s", taskID) - recievedhash := <-txHashCh - <-completionCh + // Wait for both tx-hash and completion with a timeout + var recievedhash string + done := false + timeout := time.After(2 * time.Minute) +waitLoop: + for { + if recievedhash != "" && done { + break waitLoop + } + select { + case h := <-txHashCh: + if recievedhash == "" { recievedhash = h } + case <-completionCh: + done = true + case emsg := <-errCh: + t.Fatalf("cascade task reported failure: %s", emsg) + case <-timeout: + t.Fatalf("timeout waiting for events; recievedhash=%q done=%v", recievedhash, done) + } + } t.Logf("Received transaction hash: %s", recievedhash) @@ -507,7 +463,7 @@ func TestCascadeE2E(t *testing.T) { if event.Get("type").String() == "coin_spent" { attrs := event.Get("attributes").Array() for i, attr := range attrs { - if attr.Get("key").String() == "amount" && attr.Get("value").String() == price { + if attr.Get("key").String() == "amount" && attr.Get("value").String() == autoPrice { feeSpent = true // Get the spender address from the same event group for j, addrAttr := range attrs { @@ -524,7 +480,7 @@ func TestCascadeE2E(t *testing.T) { if event.Get("type").String() == "coin_received" { attrs := event.Get("attributes").Array() for i, attr := range attrs { - if attr.Get("key").String() == "amount" && attr.Get("value").String() == price { + if attr.Get("key").String() == "amount" && attr.Get("value").String() == autoPrice { feeReceived = true // Get the receiver address from the same event group for j, addrAttr := range attrs { @@ -548,18 +504,13 @@ func TestCascadeE2E(t *testing.T) { t.Logf("Payment flow: %s paid %s to %s", fromAddress, amount, toAddress) require.NotEmpty(t, fromAddress, "Spender address should not be empty") require.NotEmpty(t, toAddress, "Receiver address should not be empty") - require.Equal(t, price, amount, "Payment amount should match action price") + require.Equal(t, autoPrice, amount, "Payment amount should match action price") time.Sleep(10 * time.Second) outputFileBaseDir := filepath.Join(".") - // Create signature: actionId.creatorsaddress (using the same address that was used for StartCascade) - signatureData := fmt.Sprintf("%s.%s", actionID, userAddress) - // Sign the signature data with user key - signedSignature, err := keyring.SignBytes(keplrKeyring, userKeyName, []byte(signatureData)) - require.NoError(t, err, "Failed to sign signature data") - // Base64 encode the signed signature - signature := base64.StdEncoding.EncodeToString(signedSignature) + // Create download signature for actionID (using the same address that was used for StartCascade) + signature, err := actionClient.GenerateDownloadSignature(context.Background(), actionID, userAddress) // Try to download the file using the action ID and signature dtaskID, err := actionClient.DownloadCascade(context.Background(), actionID, outputFileBaseDir, signature) @@ -681,3 +632,13 @@ func SetActionParams(t *testing.T) GenesisMutator { return state } } + +// SetStakingBondDenomUlume sets the staking module bond denom to "ulume" in genesis +func SetStakingBondDenomUlume(t *testing.T) GenesisMutator { + return func(genesis []byte) []byte { + t.Helper() + state, err := sjson.SetBytes(genesis, "app_state.staking.params.bond_denom", "ulume") + require.NoError(t, err) + return state + } +} diff --git a/tests/system/go.mod b/tests/system/go.mod index e6eb3bba..f974568e 100644 --- a/tests/system/go.mod +++ b/tests/system/go.mod @@ -1,110 +1,116 @@ module github.com/LumeraProtocol/supernode/v2/tests/systemtests -go 1.24.1 +go 1.25.1 -replace github.com/LumeraProtocol/supernode/v2 => ../../ +replace ( + github.com/LumeraProtocol/supernode/v2 => ../../ + github.com/cosmos/cosmos-sdk => github.com/cosmos/cosmos-sdk v0.50.14 +) + +require ( + cosmossdk.io/math v1.5.3 + github.com/LumeraProtocol/supernode/v2 v2.0.0-00010101000000-000000000000 + github.com/cometbft/cometbft v0.38.18 + github.com/tidwall/gjson v1.14.2 + github.com/tidwall/sjson v1.2.5 + golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b + gopkg.in/yaml.v3 v3.0.1 +) require ( github.com/cosmos/cosmos-proto v1.0.0-beta.5 // indirect github.com/cosmos/cosmos-sdk v0.53.0 github.com/cosmos/gogogateway v1.2.0 // indirect github.com/cosmos/gogoproto v1.7.0 // indirect - github.com/cosmos/iavl v1.2.2 // indirect - github.com/dvsekhvalnov/jose2go v1.6.0 // indirect + github.com/cosmos/iavl v1.2.4 // indirect + github.com/dvsekhvalnov/jose2go v1.7.0 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_golang v1.22.0 // indirect - github.com/spf13/cast v1.7.1 // indirect - github.com/spf13/cobra v1.9.1 // indirect - github.com/spf13/pflag v1.0.6 // indirect - github.com/stretchr/testify v1.10.0 + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/cobra v1.10.1 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/stretchr/testify v1.11.1 github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect - google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect - google.golang.org/grpc v1.72.0 // indirect -) - -require ( - cosmossdk.io/math v1.5.3 - github.com/LumeraProtocol/lumera v1.7.0 - github.com/LumeraProtocol/supernode/v2 v2.0.0-00010101000000-000000000000 - github.com/cometbft/cometbft v0.38.17 - github.com/tidwall/gjson v1.14.2 - github.com/tidwall/sjson v1.2.5 - golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 - gopkg.in/yaml.v3 v3.0.1 + google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect + google.golang.org/grpc v1.76.0 // indirect ) require ( cosmossdk.io/api v0.9.2 // indirect - cosmossdk.io/collections v1.2.0 // indirect + cosmossdk.io/collections v1.3.0 // indirect cosmossdk.io/core v0.11.3 // indirect cosmossdk.io/depinject v1.2.0 // indirect cosmossdk.io/errors v1.0.2 // indirect - cosmossdk.io/log v1.5.1 // indirect + cosmossdk.io/log v1.6.0 // indirect cosmossdk.io/schema v1.1.0 // indirect cosmossdk.io/store v1.1.2 // indirect cosmossdk.io/x/tx v0.14.0 // indirect + cosmossdk.io/x/upgrade v0.2.0 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.2 // indirect - github.com/DataDog/datadog-go v3.2.0+incompatible // indirect + github.com/DataDog/datadog-go v4.8.3+incompatible // indirect github.com/DataDog/zstd v1.5.7 // indirect + github.com/LumeraProtocol/lumera v1.8.0 // indirect github.com/LumeraProtocol/rq-go v0.2.1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/speakeasy v0.2.0 // indirect - github.com/bytedance/sonic v1.13.2 // indirect + github.com/bytedance/sonic v1.14.1 // indirect github.com/bytedance/sonic/loader v0.3.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cloudwego/base64x v0.1.5 // indirect + github.com/cloudwego/base64x v0.1.6 // indirect github.com/cloudwego/iasm v0.2.0 // indirect - github.com/cockroachdb/errors v1.11.3 // indirect - github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect - github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/errors v1.12.0 // indirect + github.com/cockroachdb/fifo v0.0.0-20240616162244-4768e80dfb9a // indirect + github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 // indirect github.com/cockroachdb/pebble v1.1.5 // indirect github.com/cockroachdb/redact v1.1.6 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/cometbft/cometbft-db v0.14.1 // indirect github.com/cosmos/btcutil v1.0.5 // indirect - github.com/cosmos/cosmos-db v1.1.1 // indirect + github.com/cosmos/cosmos-db v1.1.2 // indirect github.com/cosmos/go-bip39 v1.0.0 // indirect + github.com/cosmos/ibc-go/v10 v10.3.0 // indirect github.com/cosmos/ics23/go v0.11.0 // indirect github.com/cosmos/ledger-cosmos-go v0.14.0 // indirect - github.com/danieljoos/wincred v1.2.1 // indirect + github.com/danieljoos/wincred v1.2.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect - github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect + github.com/desertbit/timer v1.0.1 // indirect github.com/dgraph-io/badger/v4 v4.2.0 // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/dgraph-io/ristretto/v2 v2.2.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/emicklei/dot v1.6.2 // indirect - github.com/fatih/color v1.16.0 // indirect + github.com/fatih/color v1.18.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/getsentry/sentry-go v0.32.0 // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-kit/kit v0.13.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-viper/mapstructure/v2 v2.2.1 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.2.4 // indirect + github.com/golang/glog v1.2.5 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/mock v1.6.0 // indirect - github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/flatbuffers v1.12.1 // indirect + github.com/google/flatbuffers v24.3.25+incompatible // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/orderedcode v0.0.1 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/handlers v1.5.2 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect @@ -142,11 +148,11 @@ require ( github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/rs/cors v1.11.1 // indirect github.com/rs/zerolog v1.34.0 // indirect - github.com/sagikazarmark/locafero v0.7.0 // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/sasha-s/go-deadlock v0.3.5 // indirect - github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/afero v1.12.0 // indirect - github.com/spf13/viper v1.20.1 // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/viper v1.21.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/tendermint/go-amino v0.16.0 // indirect github.com/tidwall/btree v1.7.0 // indirect @@ -155,24 +161,25 @@ require ( github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/zondax/hid v0.9.2 // indirect github.com/zondax/ledger-go v0.14.3 // indirect - go.etcd.io/bbolt v1.4.0-alpha.0.0.20240404170359-43604f3112c5 // indirect + go.etcd.io/bbolt v1.4.0-alpha.1 // indirect go.opencensus.io v0.24.0 // indirect - go.uber.org/mock v0.5.2 // indirect + go.uber.org/mock v0.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/arch v0.15.0 // indirect - golang.org/x/crypto v0.37.0 // indirect - golang.org/x/net v0.39.0 // indirect - golang.org/x/sync v0.13.0 // indirect - golang.org/x/sys v0.32.0 // indirect - golang.org/x/term v0.31.0 // indirect - golang.org/x/text v0.24.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f // indirect - google.golang.org/protobuf v1.36.6 // indirect + golang.org/x/crypto v0.42.0 // indirect + golang.org/x/net v0.44.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/sys v0.36.0 // indirect + golang.org/x/term v0.35.0 // indirect + golang.org/x/text v0.29.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 // indirect + google.golang.org/protobuf v1.36.10 // indirect gotest.tools/v3 v3.5.2 // indirect - lukechampine.com/blake3 v1.4.0 // indirect - nhooyr.io/websocket v1.8.10 // indirect + lukechampine.com/blake3 v1.4.1 // indirect + nhooyr.io/websocket v1.8.17 // indirect pgregory.net/rapid v1.2.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/tests/system/go.sum b/tests/system/go.sum index 6e9c0112..c9818229 100644 --- a/tests/system/go.sum +++ b/tests/system/go.sum @@ -1,36 +1,36 @@ -cel.dev/expr v0.20.0 h1:OunBvVCfvpWlt4dN7zg3FM6TDkzOePe1+foGJ9AXeeI= -cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= -cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= -cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs= -cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q= -cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= -cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= -cloud.google.com/go/compute v1.29.0 h1:Lph6d8oPi38NHkOr6S55Nus/Pbbcp37m/J0ohgKAefs= -cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= -cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= -cloud.google.com/go/iam v1.2.2 h1:ozUSofHUGf/F4tCNy/mu9tHLTaxZFLOUiKzjcgWHGIA= -cloud.google.com/go/iam v1.2.2/go.mod h1:0Ys8ccaZHdI1dEUilwzqng/6ps2YB6vRsjIe00/+6JY= -cloud.google.com/go/monitoring v1.21.2 h1:FChwVtClH19E7pJ+e0xUhJPGksctZNVOk2UhMmblmdU= -cloud.google.com/go/monitoring v1.21.2/go.mod h1:hS3pXvaG8KgWTSz+dAdyzPrGUYmi2Q+WFX8g2hqVEZU= -cloud.google.com/go/storage v1.49.0 h1:zenOPBOWHCnojRd9aJZAyQXBYqkJkdQS42dxL55CIMw= -cloud.google.com/go/storage v1.49.0/go.mod h1:k1eHhhpLvrPjVGfo0mOUPEJ4Y2+a/Hv5PiwehZI9qGU= +cloud.google.com/go v0.120.0 h1:wc6bgG9DHyKqF5/vQvX1CiZrtHnxJjBlKUyF9nP6meA= +cloud.google.com/go v0.120.0/go.mod h1:/beW32s8/pGRuj4IILWQNd4uuebeT4dkOhKmkfit64Q= +cloud.google.com/go/auth v0.16.0 h1:Pd8P1s9WkcrBE2n/PhAwKsdrR35V3Sg2II9B+ndM3CU= +cloud.google.com/go/auth v0.16.0/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute v1.37.0 h1:XxtZlXYkZXub3LNaLu90TTemcFqIU1yZ4E4q9VlR39A= +cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= +cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= +cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= +cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= +cloud.google.com/go/storage v1.50.0 h1:3TbVkzTooBvnZsk7WaAQfOsNrdoM8QHusXA1cpk6QJs= +cloud.google.com/go/storage v1.50.0/go.mod h1:l7XeiD//vx5lfqE3RavfmU9yvk5Pp0Zhcv482poyafY= cosmossdk.io/api v0.9.2 h1:9i9ptOBdmoIEVEVWLtYYHjxZonlF/aOVODLFaxpmNtg= cosmossdk.io/api v0.9.2/go.mod h1:CWt31nVohvoPMTlPv+mMNCtC0a7BqRdESjCsstHcTkU= -cosmossdk.io/client/v2 v2.0.0-beta.5 h1:0LVv3nEByn//hFDIrYLs2WvsEU3HodOelh4SDHnA/1I= -cosmossdk.io/client/v2 v2.0.0-beta.5/go.mod h1:4p0P6o0ro+FizakJUYS9SeM94RNbv0thLmkHRw5o5as= -cosmossdk.io/collections v1.2.0 h1:IesfVG8G/+FYCMVMP01frS/Cw99Omk5vBh3cHbO01Gg= -cosmossdk.io/collections v1.2.0/go.mod h1:4NkMoYw6qRA8fnSH/yn1D/MOutr8qyQnwsO50Mz9ItU= +cosmossdk.io/client/v2 v2.0.0-beta.8.0.20250402172810-41e3e9d004a1 h1:nlMUeKu6CGrO7Gxt5S31qT3g27CHmBJHsZPjqHApVTI= +cosmossdk.io/client/v2 v2.0.0-beta.8.0.20250402172810-41e3e9d004a1/go.mod h1:xgv0ejeOk5yeDraPW5tv+PfBkCDt4yYa/+u45MyP+bM= +cosmossdk.io/collections v1.3.0 h1:RUY23xXBy/bu5oSHZ5y+mkJRyA4ZboKDO4Yvx4+g2uc= +cosmossdk.io/collections v1.3.0/go.mod h1:cqVpBMDGEYhuNmNSXIOmqpnQ7Eav43hpJIetzLuEkns= cosmossdk.io/core v0.11.3 h1:mei+MVDJOwIjIniaKelE3jPDqShCc/F4LkNNHh+4yfo= cosmossdk.io/core v0.11.3/go.mod h1:9rL4RE1uDt5AJ4Tg55sYyHWXA16VmpHgbe0PbJc6N2Y= cosmossdk.io/depinject v1.2.0 h1:6NW/FSK1IkWTrX7XxUpBmX1QMBozpEI9SsWkKTBc5zw= cosmossdk.io/depinject v1.2.0/go.mod h1:pvitjtUxZZZTQESKNS9KhGjWVslJZxtO9VooRJYyPjk= cosmossdk.io/errors v1.0.2 h1:wcYiJz08HThbWxd/L4jObeLaLySopyyuUFB5w4AGpCo= cosmossdk.io/errors v1.0.2/go.mod h1:0rjgiHkftRYPj//3DrD6y8hcm40HcPv/dR4R/4efr0k= -cosmossdk.io/log v1.5.1 h1:wLwiYXmfrort/O+j6EkjF+HvbdrRQd+4cYCPKFSm+zM= -cosmossdk.io/log v1.5.1/go.mod h1:5cXXBvfBkR2/BcXmosdCSLXllvgSjphrrDVdfVRmBGM= +cosmossdk.io/log v1.6.0 h1:SJIOmJ059wi1piyRgNRXKXhlDXGqnB5eQwhcZKv2tOk= +cosmossdk.io/log v1.6.0/go.mod h1:5cXXBvfBkR2/BcXmosdCSLXllvgSjphrrDVdfVRmBGM= cosmossdk.io/math v1.5.3 h1:WH6tu6Z3AUCeHbeOSHg2mt9rnoiUWVWaQ2t6Gkll96U= cosmossdk.io/math v1.5.3/go.mod h1:uqcZv7vexnhMFJF+6zh9EWdm/+Ylyln34IvPnBauPCQ= cosmossdk.io/schema v1.1.0 h1:mmpuz3dzouCoyjjcMcA/xHBEmMChN+EHh8EHxHRHhzE= @@ -43,42 +43,41 @@ cosmossdk.io/x/evidence v0.1.1 h1:Ks+BLTa3uftFpElLTDp9L76t2b58htjVbSZ86aoK/E4= cosmossdk.io/x/evidence v0.1.1/go.mod h1:OoDsWlbtuyqS70LY51aX8FBTvguQqvFrt78qL7UzeNc= cosmossdk.io/x/feegrant v0.1.1 h1:EKFWOeo/pup0yF0svDisWWKAA9Zags6Zd0P3nRvVvw8= cosmossdk.io/x/feegrant v0.1.1/go.mod h1:2GjVVxX6G2fta8LWj7pC/ytHjryA6MHAJroBWHFNiEQ= -cosmossdk.io/x/nft v0.1.1 h1:pslAVS8P5NkW080+LWOamInjDcq+v2GSCo+BjN9sxZ8= -cosmossdk.io/x/nft v0.1.1/go.mod h1:Kac6F6y2gsKvoxU+fy8uvxRTi4BIhLOor2zgCNQwVgY= cosmossdk.io/x/tx v0.14.0 h1:hB3O25kIcyDW/7kMTLMaO8Ripj3yqs5imceVd6c/heA= cosmossdk.io/x/tx v0.14.0/go.mod h1:Tn30rSRA1PRfdGB3Yz55W4Sn6EIutr9xtMKSHij+9PM= -cosmossdk.io/x/upgrade v0.1.4 h1:/BWJim24QHoXde8Bc64/2BSEB6W4eTydq0X/2f8+g38= -cosmossdk.io/x/upgrade v0.1.4/go.mod h1:9v0Aj+fs97O+Ztw+tG3/tp5JSlrmT7IcFhAebQHmOPo= +cosmossdk.io/x/upgrade v0.2.0 h1:ZHy0xny3wBCSLomyhE06+UmQHWO8cYlVYjfFAJxjz5g= +cosmossdk.io/x/upgrade v0.2.0/go.mod h1:DXDtkvi//TrFyHWSOaeCZGBoiGAE6Rs8/0ABt2pcDD0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/CosmWasm/wasmd v0.53.0 h1:kdaoAi20bIb4VCsxw9pRaT2g5PpIp82Wqrr9DRVN9ao= -github.com/CosmWasm/wasmd v0.53.0/go.mod h1:FJl/aWjdpGof3usAMFQpDe07Rkx77PUzp0cygFMOvtw= -github.com/CosmWasm/wasmvm/v2 v2.1.2 h1:GkJ5bAsRlLHfIQVg/FY1VHwLyBwlCjAhDea0B8L+e20= -github.com/CosmWasm/wasmvm/v2 v2.1.2/go.mod h1:bMhLQL4Yp9CzJi9A83aR7VO9wockOsSlZbT4ztOl6bg= -github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= +github.com/CosmWasm/wasmd v0.55.0-ibc2.0 h1:9bH+QDnSGxmZhjSykLYGtW4sltzGFFVm10Awk683q2Y= +github.com/CosmWasm/wasmd v0.55.0-ibc2.0/go.mod h1:c9l+eycjUB2zNVLIGjAXd7QrFEbxVTEa1Fh1Mx74VwQ= +github.com/CosmWasm/wasmvm/v3 v3.0.0-ibc2.0 h1:QoagSm5iYuRSPYDxgRxsa6hVfDppUp4+bOwY7bDuMO0= +github.com/CosmWasm/wasmvm/v3 v3.0.0-ibc2.0/go.mod h1:oknpb1bFERvvKcY7vHRp1F/Y/z66xVrsl7n9uWkOAlM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q= +github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.5.7 h1:ybO8RBeh29qrxIhCA9E8gKY6xfONU9T6G6aP9DTKfLE= github.com/DataDog/zstd v1.5.7/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 h1:f2Qw/Ehhimh5uO1fayV0QIW7DShEQqhtUfhYc+cBPlw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0/go.mod h1:2bIszWvQRlJVmJLiuLhukLImRjKPcYdzzsx6darK02A= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 h1:8nn+rsCvTq9axyEh382S0PFLBeaFwNsT43IrPWzctRU= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 h1:5IT7xOdq17MtcdtL/vtl6mGfzhaq4m4vpollPRmlsBQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0/go.mod h1:ZV4VOm0/eHR06JLrXWe09068dHpr3TRpY9Uo7T+anuA= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 h1:ig/FpDD2JofP/NExKQUbn7uOSZzJAQqogfqluZK4ed4= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/LumeraProtocol/lumera v1.7.0 h1:F5zgRBnCtgGfdMB6jz01PFWIzbS8VjQfCu1H9OYt3BU= -github.com/LumeraProtocol/lumera v1.7.0/go.mod h1:c1M+sjewuCvxw+pznwlspUzenDJI8Y+suKB3RFKS2Wo= +github.com/LumeraProtocol/lumera v1.8.0 h1:0t5/6qOSs9wKti7utPAWo9Jq8wk2X+L/eEaH8flk/Hc= +github.com/LumeraProtocol/lumera v1.8.0/go.mod h1:38uAZxxleZyXaWKbqOQKwjw7CSX92lTxdF+B7c4SRPw= github.com/LumeraProtocol/rq-go v0.2.1 h1:8B3UzRChLsGMmvZ+UVbJsJj6JZzL9P9iYxbdUwGsQI4= github.com/LumeraProtocol/rq-go v0.2.1/go.mod h1:APnKCZRh1Es2Vtrd2w4kCLgAyaL5Bqrkz/BURoRJ+O8= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= @@ -103,8 +102,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.44.224 h1:09CiaaF35nRmxrzWZ2uRq5v6Ghg/d2RiPjZnSgtt+RQ= -github.com/aws/aws-sdk-go v1.44.224/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.49.0 h1:g9BkW1fo9GqKfwg2+zCD+TW/D36Ux+vtfJ8guF4AYmY= +github.com/aws/aws-sdk-go v1.49.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -146,8 +145,8 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4= -github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= +github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M= +github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU= github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -157,19 +156,19 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk= -github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= -github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= -github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= -github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= -github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= -github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= -github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/errors v1.12.0 h1:d7oCs6vuIMUQRVbi6jWWWEJZahLCfJpnJSVobd1/sUo= +github.com/cockroachdb/errors v1.12.0/go.mod h1:SvzfYNNBshAVbZ8wzNc/UPK3w1vf0dKDUP41ucAIf7g= +github.com/cockroachdb/fifo v0.0.0-20240616162244-4768e80dfb9a h1:f52TdbU4D5nozMAhO9TvTJ2ZMCXtN4VIAmfrrZ0JXQ4= +github.com/cockroachdb/fifo v0.0.0-20240616162244-4768e80dfb9a/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= +github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 h1:ASDL+UJcILMqgNeV5jiqR4j+sTuvQNHdf2chuKj1M5k= +github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506/go.mod h1:Mw7HqKr2kdtu6aYGn3tPmAftiP3QPX63LdK/zcariIo= github.com/cockroachdb/pebble v1.1.5 h1:5AAWCBWbat0uE0blr8qzufZP5tBjkRyy/jWe1QWLnvw= github.com/cockroachdb/pebble v1.1.5/go.mod h1:17wO9el1YEigxkP/YtV8NtCivQDgoCyBg5c4VR/eOWo= github.com/cockroachdb/redact v1.1.6 h1:zXJBwDZ84xJNlHl1rMyCojqyIxv+7YUpQiJLQ7n4314= @@ -177,8 +176,8 @@ github.com/cockroachdb/redact v1.1.6/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZ github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/cometbft/cometbft v0.38.17 h1:FkrQNbAjiFqXydeAO81FUzriL4Bz0abYxN/eOHrQGOk= -github.com/cometbft/cometbft v0.38.17/go.mod h1:5l0SkgeLRXi6bBfQuevXjKqML1jjfJJlvI1Ulp02/o4= +github.com/cometbft/cometbft v0.38.18 h1:1ZHYMdu0S75YxFM13LlPXnOwiIpUW5z9TKMQtTIALpw= +github.com/cometbft/cometbft v0.38.18/go.mod h1:PlOQgf3jQorep+g6oVnJgtP65TJvBJoLiXjGaMdNxBE= github.com/cometbft/cometbft-db v0.14.1 h1:SxoamPghqICBAIcGpleHbmoPqy+crij/++eZz3DlerQ= github.com/cometbft/cometbft-db v0.14.1/go.mod h1:KHP1YghilyGV/xjD5DP3+2hyigWx0WTp9X+0Gnx0RxQ= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= @@ -189,12 +188,12 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= -github.com/cosmos/cosmos-db v1.1.1 h1:FezFSU37AlBC8S98NlSagL76oqBRWq/prTPvFcEJNCM= -github.com/cosmos/cosmos-db v1.1.1/go.mod h1:AghjcIPqdhSLP/2Z0yha5xPH3nLnskz81pBx3tcVSAw= +github.com/cosmos/cosmos-db v1.1.2 h1:KZm4xLlPp6rLkyIOmPOhh+XDK9oH1++pNH/csLdX0Dk= +github.com/cosmos/cosmos-db v1.1.2/go.mod h1:dMg2gav979Ig2N076POEw4CEKbCsieaOfDWSfFZxs8M= github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= -github.com/cosmos/cosmos-sdk v0.53.0 h1:ZsB2tnBVudumV059oPuElcr0K1lLOutaI6WJ+osNTbI= -github.com/cosmos/cosmos-sdk v0.53.0/go.mod h1:UPcRyFwOUy2PfSFBWxBceO/HTjZOuBVqY583WyazIGs= +github.com/cosmos/cosmos-sdk v0.50.14 h1:G8CtGHFWbExa+ZpVOVAb4kFmko/R30igsYOwyzRMtgY= +github.com/cosmos/cosmos-sdk v0.50.14/go.mod h1:hrWEFMU1eoXqLJeE6VVESpJDQH67FS1nnMrQIjO2daw= github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= @@ -202,12 +201,12 @@ github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ github.com/cosmos/gogoproto v1.4.2/go.mod h1:cLxOsn1ljAHSV527CHOtaIP91kK6cCrZETRBrkzItWU= github.com/cosmos/gogoproto v1.7.0 h1:79USr0oyXAbxg3rspGh/m4SWNyoz/GLaAh0QlCe2fro= github.com/cosmos/gogoproto v1.7.0/go.mod h1:yWChEv5IUEYURQasfyBW5ffkMHR/90hiHgbNgrtp4j0= -github.com/cosmos/iavl v1.2.2 h1:qHhKW3I70w+04g5KdsdVSHRbFLgt3yY3qTMd4Xa4rC8= -github.com/cosmos/iavl v1.2.2/go.mod h1:GiM43q0pB+uG53mLxLDzimxM9l/5N9UuSY3/D0huuVw= -github.com/cosmos/ibc-go/modules/capability v1.0.1 h1:ibwhrpJ3SftEEZRxCRkH0fQZ9svjthrX2+oXdZvzgGI= -github.com/cosmos/ibc-go/modules/capability v1.0.1/go.mod h1:rquyOV262nGJplkumH+/LeYs04P3eV8oB7ZM4Ygqk4E= -github.com/cosmos/ibc-go/v8 v8.5.1 h1:3JleEMKBjRKa3FeTKt4fjg22za/qygLBo7mDkoYTNBs= -github.com/cosmos/ibc-go/v8 v8.5.1/go.mod h1:P5hkAvq0Qbg0h18uLxDVA9q1kOJ0l36htMsskiNwXbo= +github.com/cosmos/iavl v1.2.4 h1:IHUrG8dkyueKEY72y92jajrizbkZKPZbMmG14QzsEkw= +github.com/cosmos/iavl v1.2.4/go.mod h1:GiM43q0pB+uG53mLxLDzimxM9l/5N9UuSY3/D0huuVw= +github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v10 v10.1.0 h1:epKcbFAeWRRw1i1jZnYzLIEm9sgUPaL1RftuRjjUKGw= +github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v10 v10.1.0/go.mod h1:S4ZQwf5/LhpOi8JXSAese/6QQDk87nTdicJPlZ5q9UQ= +github.com/cosmos/ibc-go/v10 v10.3.0 h1:w5DkHih8qn15deAeFoTk778WJU+xC1krJ5kDnicfUBc= +github.com/cosmos/ibc-go/v10 v10.3.0/go.mod h1:CthaR7n4d23PJJ7wZHegmNgbVcLXCQql7EwHrAXnMtw= github.com/cosmos/ics23/go v0.11.0 h1:jk5skjT0TqX5e5QJbEnwXIS2yI2vnmLOgpQPeM5RtnU= github.com/cosmos/ics23/go v0.11.0/go.mod h1:A8OjxPE67hHST4Icw94hOxxFEJMBG031xIGF/JHNIY0= github.com/cosmos/keyring v1.2.0 h1:8C1lBP9xhImmIabyXW4c3vFjjLiBdGCmfLUfeZlV1Yo= @@ -218,8 +217,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/danieljoos/wincred v1.2.1 h1:dl9cBrupW8+r5250DYkYxocLeZ1Y4vB1kxgtjxw8GQs= -github.com/danieljoos/wincred v1.2.1/go.mod h1:uGaFL9fDn3OLTvzCGulzE+SzjEe5NGlh5FdCcyfPwps= +github.com/danieljoos/wincred v1.2.2 h1:774zMFJrqaeYCK2W57BgAem/MLi6mtSE47MB6BOJ0i0= +github.com/danieljoos/wincred v1.2.2/go.mod h1:w7w4Utbrz8lqeMbDAK0lkNJUv5sAOkFi7nd/ogr0Uh8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -228,8 +227,9 @@ github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= -github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= +github.com/desertbit/timer v1.0.1 h1:yRpYNn5Vaaj6QXecdLMPMJsW81JLiI1eokUft5nBmeo= +github.com/desertbit/timer v1.0.1/go.mod h1:htRrYeY5V/t4iu1xCJ5XsQvp4xve8QulXXctAzxqcwE= github.com/dgraph-io/badger/v4 v4.2.0 h1:kJrlajbXXL9DFTNuhhu9yCx7JJa4qpYWxtE8BzuWsEs= github.com/dgraph-io/badger/v4 v4.2.0/go.mod h1:qfCqhPoWDFJRx1gp5QwwyGo8xk1lbHUxvK9nK0OGAak= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= @@ -241,16 +241,16 @@ github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa5 github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= -github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= -github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/dvsekhvalnov/jose2go v1.7.0 h1:bnQc8+GMnidJZA8zc6lLEAb4xNrIqHwO+9TzqvtQZPo= +github.com/dvsekhvalnov/jose2go v1.7.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= @@ -270,10 +270,12 @@ github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1 github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/ethereum/go-ethereum v1.15.11 h1:JK73WKeu0WC0O1eyX+mdQAVHUV+UR1a9VB/domDngBU= +github.com/ethereum/go-ethereum v1.15.11/go.mod h1:mf8YiHIb0GR4x4TipcvBUPxJLw1mFdmxzoDi11sDRoI= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= @@ -286,16 +288,16 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= -github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/getsentry/sentry-go v0.32.0 h1:YKs+//QmwE3DcYtfKRH8/KyOOF/I6Qnx7qYGNHCGmCY= +github.com/getsentry/sentry-go v0.32.0/go.mod h1:CYNcMMz73YigoHljQRG+qPF+eMq8gG72XcGN/p71BAY= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E= -github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= +github.com/go-jose/go-jose/v4 v4.1.2 h1:TK/7NqRQZfgAh+Td8AlsrvtPoUyiHh0LqVvokh+1vHI= +github.com/go-jose/go-jose/v4 v4.1.2/go.mod h1:22cg9HWM1pOlnRiY+9cQYJ9XHmya1bYW8OeDM6Ku6Oo= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= @@ -309,8 +311,8 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= @@ -319,8 +321,8 @@ github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= -github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= @@ -340,8 +342,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= -github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= +github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -370,14 +372,14 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e h1:4bw4WeyTYPp0smaXiJZCNnLrvVBqirQVreixayXezGc= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw= -github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI= +github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -397,14 +399,14 @@ github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= -github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= -github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= +github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -427,6 +429,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= @@ -436,8 +440,8 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-getter v1.7.5 h1:dT58k9hQ/vbxNMwoI5+xFYAJuv6152UNvdHokfI5wE4= -github.com/hashicorp/go-getter v1.7.5/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= +github.com/hashicorp/go-getter v1.7.8 h1:mshVHx1Fto0/MydBekWan5zUipGq7jO0novchgMmSiY= +github.com/hashicorp/go-getter v1.7.8/go.mod h1:2c6CboOEb9jG6YvmC9xdD+tyAFsrUaJPedwXDGr0TM4= github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -456,11 +460,12 @@ github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoD github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -476,12 +481,16 @@ github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8 github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns= github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= +github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= +github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= github.com/huandu/skiplist v1.2.1 h1:dTi93MgjwErA/8idWTzIw4Y1kZsMWx35fmI2c8Rij7w= github.com/huandu/skiplist v1.2.1/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/iancoleman/orderedmap v0.3.0 h1:5cbR2grmZR/DiVt+VJopEhtVs9YGInGIxAoMJn+Ichc= +github.com/iancoleman/orderedmap v0.3.0/go.mod h1:XuLcCUkdL5owUCQeF2Ue9uuw1EptkJDkXXS7VoV7XGE= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= @@ -490,8 +499,8 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94= -github.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8= +github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls= +github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -543,8 +552,8 @@ github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0U github.com/linxGnu/grocksdb v1.9.8 h1:vOIKv9/+HKiqJAElJIEYv3ZLcihRxyP7Suu/Mu8Dxjs= github.com/linxGnu/grocksdb v1.9.8/go.mod h1:C3CNe9UYc9hlEM2pC82AqiGS3LRW537u9LFV4wIZuHk= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= -github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -599,8 +608,8 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= -github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a h1:dlRvE5fWabOchtH7znfiFCcOvmIYgOeAS5ifBXBlh9Q= github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a/go.mod h1:hVoHR2EVESiICEMbg137etN/Lx+lSrHPTD39Z/uE+2s= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= @@ -611,18 +620,19 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q= -github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= +github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= +github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= @@ -717,8 +727,8 @@ github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6 github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= -github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU= github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U= @@ -736,21 +746,22 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= -github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= -github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= -github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= -github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= -github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -772,8 +783,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= @@ -803,7 +814,6 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= @@ -811,32 +821,32 @@ github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWp github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw= github.com/zondax/ledger-go v0.14.3/go.mod h1:IKKaoxupuB43g4NxeQmbLXv7T9AlQyie1UpHb342ycI= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.4.0-alpha.0.0.20240404170359-43604f3112c5 h1:qxen9oVGzDdIRP6ejyAJc760RwW4SnVDiTYTzwnXuxo= -go.etcd.io/bbolt v1.4.0-alpha.0.0.20240404170359-43604f3112c5/go.mod h1:eW0HG9/oHQhvRCvb1/pIXW4cOvtDqeQK+XSi3TnwaXY= +go.etcd.io/bbolt v1.4.0-alpha.1 h1:3yrqQzbRRPFPdOMWS/QQIVxVnzSkAZQYeWlZFv1kbj4= +go.etcd.io/bbolt v1.4.0-alpha.1/go.mod h1:S/Z/Nm3iuOnyO1W4XuFfPci51Gj6F1Hv0z8hisyYYOw= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/detectors/gcp v1.34.0 h1:JRxssobiPg23otYU5SbWtQC//snGVIM3Tx6QRzlQBao= -go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= -go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= -go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= -go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= -go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= -go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= -go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= -go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= -go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= -go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= -go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -845,8 +855,8 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= -go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= +go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= +go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= @@ -858,6 +868,8 @@ go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/arch v0.15.0 h1:QtOrQd0bTUnhNVNndMpLHNWrDmYzZ2KDqSrEymqInZw= golang.org/x/arch v0.15.0/go.mod h1:JmwW7aLIoRUKgaTzhkiEFxvcEiQGyOg9BMonBJUS7EE= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -869,13 +881,13 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= -golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= +golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= -golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= +golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b h1:DXr+pvt3nC887026GRP39Ej11UATqWDmWuS99x26cD0= +golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -889,9 +901,6 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= -golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -920,13 +929,13 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= -golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I= +golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE= -golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -935,9 +944,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= -golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -991,24 +999,24 @@ golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= -golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= -golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= +golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ= +golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= -golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= +golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= -golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1028,16 +1036,15 @@ golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= -golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.215.0 h1:jdYF4qnyczlEz2ReWIsosNLDuzXyvFHJtI5gcr0J7t0= -google.golang.org/api v0.215.0/go.mod h1:fta3CVtuJYOEdugLNWm6WodzOS8KdFckABwN4I40hzY= +google.golang.org/api v0.229.0 h1:p98ymMtqeJ5i3lIBMj5MpR9kzIIgzpHHh8vQ+vgAzx8= +google.golang.org/api v0.229.0/go.mod h1:wyDfmq5g1wYJWn29O22FDWN48P7Xcz0xz+LBpptYvB0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1052,12 +1059,12 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= -google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc= -google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 h1:hE3bRWtU6uceqlh4fhrSnUyjKHMKB9KrTLLG+bc0ddM= -google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463/go.mod h1:U90ffi8eUL9MwPcrJylN5+Mk2v3vuPDptd5yyNUiRR8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f h1:N/PrbTw4kdkqNRzVfWPrBekzLuarFREcbFOiOLkXon4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= +google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 h1:8XJ4pajGwOlasW+L13MnEGA8W4115jJySQtVfS2/IBU= +google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4/go.mod h1:NnuHhy+bxcg30o7FnVAZbXsPHUDQ9qKWAQKCD7VxFtk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 h1:i8QOKZfYg6AbGVZzUAY3LrNWCKF8O6zFisU9Wl9RER4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1076,8 +1083,8 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= -google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A= +google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1093,8 +1100,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1130,13 +1137,13 @@ honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= -lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= +lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= +lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo= lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= -nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= -nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= +nhooyr.io/websocket v1.8.17 h1:KEVeLJkUywCKVsnLIDlD/5gtayKp8VoCkksHCGGfT9Y= +nhooyr.io/websocket v1.8.17/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= diff --git a/tests/system/signature_utils.go b/tests/system/signature_utils.go deleted file mode 100644 index 977c674c..00000000 --- a/tests/system/signature_utils.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -import ( - "github.com/LumeraProtocol/supernode/v2/pkg/cascade" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" - cosmoskeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" -) - -// createCascadeLayoutSignature is a wrapper for the common cascade signature function -func createCascadeLayoutSignature(metadataFile codec.Layout, kr cosmoskeyring.Keyring, userKeyName string, ic uint32, maxFiles uint32) (signatureFormat string, indexFileIDs []string, err error) { - return cascade.CreateLayoutSignature(metadataFile, kr, userKeyName, ic, maxFiles) -} - -// ComputeBlake3Hash is a wrapper for the common Blake3 hash function -func ComputeBlake3Hash(msg []byte) ([]byte, error) { - return cascade.ComputeBlake3Hash(msg) -} \ No newline at end of file diff --git a/tests/system/system.go b/tests/system/system.go index 8666c474..1a2a3bd7 100644 --- a/tests/system/system.go +++ b/tests/system/system.go @@ -236,6 +236,7 @@ func appendToBuf(r io.Reader, b *ring.Ring, stop <-chan struct{}) { func isLogNoise(text string) bool { for _, v := range []string{ "\x1b[36mmodule=\x1b[0mrpc-server", // "module=rpc-server", + "Upgrading IAVL storage for faster queries", } { if strings.Contains(text, v) { return true