diff --git a/.vscode/launch.json b/.vscode/launch.json index c1d22bcd..cc44c410 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -6,9 +6,9 @@ "type": "go", "request": "launch", "mode": "debug", - "program": "${workspaceFolder}/main.go", + "program": "${workspaceFolder}/supernode/main.go", "env": {}, - "args": [], + "args": ["start"], "showLog": true } ] diff --git a/actionsdk/action/client.go b/actionsdk/action/client.go new file mode 100644 index 00000000..c55d22e7 --- /dev/null +++ b/actionsdk/action/client.go @@ -0,0 +1,94 @@ +package action + +import ( + "context" + "fmt" + + "action/config" + "action/task" + + "github.com/LumeraProtocol/supernode/pkg/lumera" +) + +// ActionClient is the main interface that exposes high-level operations +// for interacting with the Lumera Protocol ecosystem +type ActionClient interface { + // StartSense initiates a Sense operation and returns a unique task ID + StartSense(ctx context.Context, fileHash string, actionID string, filePath string) (string, error) + + // StartCascade initiates a Cascade operation and returns a unique task ID + StartCascade(ctx context.Context, fileHash string, actionID string, filePath string) (string, error) +} + +// ActionClientImpl implements the ActionClient interface +type ActionClientImpl struct { + lumeraClient lumera.Client + config config.Config + taskManager task.Manager +} + +// NewActionClient creates a new instance of ActionClientImpl +func NewActionClient(lumeraClient lumera.Client, config config.Config) *ActionClientImpl { + // Create task manager with config + taskManager := task.NewManager(lumeraClient, config) + + return &ActionClientImpl{ + lumeraClient: lumeraClient, + config: config, + taskManager: taskManager, + } +} + +// StartSense initiates a Sense operation +func (ac *ActionClientImpl) StartSense( + ctx context.Context, + fileHash string, + actionID string, + filePath string, +) (string, error) { + // Input validation + if fileHash == "" { + return "", ErrEmptyFileHash + } + if actionID == "" { + return "", ErrEmptyActionID + } + if filePath == "" { + return "", ErrEmptyFilePath + } + + // Create and start the task + taskID, err := ac.taskManager.CreateSenseTask(ctx, fileHash, actionID, filePath) + if err != nil { + return "", fmt.Errorf("failed to create sense task: %w", err) + } + + return taskID, nil +} + +// StartCascade initiates a Cascade operation +func (ac *ActionClientImpl) StartCascade( + ctx context.Context, + fileHash string, + actionID string, + filePath string, +) (string, error) { + // Input validation + if fileHash == "" { + return "", ErrEmptyFileHash + } + if actionID == "" { + return "", ErrEmptyActionID + } + if filePath == "" { + return "", ErrEmptyFilePath + } + + // Create and start the task + taskID, err := ac.taskManager.CreateCascadeTask(ctx, fileHash, actionID, filePath) + if err != nil { + return "", fmt.Errorf("failed to create cascade task: %w", err) + } + + return taskID, nil +} diff --git a/actionsdk/action/errors.go b/actionsdk/action/errors.go new file mode 100644 index 00000000..f8e5bf6a --- /dev/null +++ b/actionsdk/action/errors.go @@ -0,0 +1,52 @@ +package action + +import ( + "errors" + "fmt" +) + +// Common error definitions +var ( + ErrEmptyFileHash = errors.New("file hash cannot be empty") + ErrEmptyActionID = errors.New("action ID cannot be empty") + ErrEmptyFilePath = errors.New("file path cannot be empty") + ErrNoValidAction = errors.New("no action found with the specified ID") + ErrInvalidAction = errors.New("action is not in a valid state") + ErrNoSupernodes = errors.New("no valid supernodes available") + ErrTaskCreation = errors.New("failed to create task") + ErrCommunication = errors.New("communication with supernode failed") +) + +// SupernodeError represents an error related to supernode operations +type SupernodeError struct { + NodeID string + Message string + Err error +} + +// Error returns the error message +func (e *SupernodeError) Error() string { + return fmt.Sprintf("supernode error (ID: %s): %s: %v", e.NodeID, e.Message, e.Err) +} + +// Unwrap returns the underlying error +func (e *SupernodeError) Unwrap() error { + return e.Err +} + +// ActionError represents an error related to action operations +type ActionError struct { + ActionID string + Message string + Err error +} + +// Error returns the error message +func (e *ActionError) Error() string { + return fmt.Sprintf("action error (ID: %s): %s: %v", e.ActionID, e.Message, e.Err) +} + +// Unwrap returns the underlying error +func (e *ActionError) Unwrap() error { + return e.Err +} diff --git a/actionsdk/action/types.go b/actionsdk/action/types.go new file mode 100644 index 00000000..26015948 --- /dev/null +++ b/actionsdk/action/types.go @@ -0,0 +1,8 @@ +package action + +type TaskType string + +const ( + TaskTypeSense TaskType = "SENSE" + TaskTypeCascade TaskType = "CASCADE" +) diff --git a/actionsdk/adapters/lumera/adapter.go b/actionsdk/adapters/lumera/adapter.go new file mode 100644 index 00000000..039646fd --- /dev/null +++ b/actionsdk/adapters/lumera/adapter.go @@ -0,0 +1,94 @@ +package lumera + +import ( + "context" + "fmt" + + "github.com/LumeraProtocol/lumera/x/action/types" + sntypes "github.com/LumeraProtocol/lumera/x/supernode/types" + lumeraclient "github.com/LumeraProtocol/supernode/pkg/lumera" +) + +type Client interface { + GetAction(ctx context.Context, actionID string) (Action, error) + GetSupernodes(ctx context.Context, height int64) ([]Supernode, error) +} + +// Adapter adapts the lumera.Client to our Client interface +type Adapter struct { + client lumeraclient.Client +} + +// NewAdapter creates a new adapter for the lumera.Client +func NewAdapter(client lumeraclient.Client) Client { + return &Adapter{ + client: client, + } +} + +// GetAction retrieves action information from the blockchain +func (a *Adapter) GetAction(ctx context.Context, actionID string) (Action, error) { + resp, err := a.client.Action().GetAction(ctx, actionID) + if err != nil { + return Action{}, fmt.Errorf("failed to get action: %w", err) + } + + // Transform the response to our simplified Action type + return toSdkAction(resp), nil +} + +// GetSupernodes retrieves a list of top supernodes at a given height +func (a *Adapter) GetSupernodes(ctx context.Context, height int64) ([]Supernode, error) { + resp, err := a.client.SuperNode().GetTopSuperNodesForBlock(ctx, uint64(height)) + if err != nil { + return nil, fmt.Errorf("failed to get supernodes: %w", err) + } + + // Transform the response to our simplified Supernode type + return toSdkSupernodes(resp), nil +} + +// Helper functions to transform between types + +func toSdkAction(resp *types.QueryGetActionResponse) Action { + return Action{ + ID: resp.Action.ActionID, + State: ACTION_STATE(resp.Action.State), + Height: int64(resp.Action.BlockHeight), + ExpirationTime: resp.Action.ExpirationTime, + } +} + +func toSdkSupernodes(resp *sntypes.QueryGetTopSuperNodesForBlockResponse) []Supernode { + var result []Supernode + for _, sn := range resp.Supernodes { + ipAddress, err := getLatestIP(sn) + if err != nil { + continue + } + + if sn.SupernodeAccount == "" { + continue + } + + if sn.States[0].State.String() != string(SUPERNODE_STATE_ACTIVE) { + continue + } + + result = append(result, Supernode{ + CosmosAddress: sn.SupernodeAccount, + GrpcEndpoint: ipAddress, + State: SUPERNODE_STATE_ACTIVE, + }) + } + return result +} + +// getLatestIP is a simplified version of the GetLatestIP function in supernode module +func getLatestIP(supernode *sntypes.SuperNode) (string, error) { + if len(supernode.PrevIpAddresses) == 0 { + return "", fmt.Errorf("no ip history exists for the supernode") + } + // Just take the first one for simplicity + return supernode.PrevIpAddresses[0].Address, nil +} diff --git a/actionsdk/adapters/lumera/types.go b/actionsdk/adapters/lumera/types.go new file mode 100644 index 00000000..721ac134 --- /dev/null +++ b/actionsdk/adapters/lumera/types.go @@ -0,0 +1,39 @@ +package lumera + +// ACTION_STATE represents the possible states of an action +type ACTION_STATE string + +const ( + ACTION_STATE_UNSPECIFIED ACTION_STATE = "ACTION_STATE_UNSPECIFIED" + ACTION_STATE_PENDING ACTION_STATE = "ACTION_STATE_PENDING" + ACTION_STATE_DONE ACTION_STATE = "ACTION_STATE_DONE" + ACTION_STATE_APPROVED ACTION_STATE = "ACTION_STATE_APPROVED" + ACTION_STATE_REJECTED ACTION_STATE = "ACTION_STATE_REJECTED" + ACTION_STATE_FAILED ACTION_STATE = "ACTION_STATE_FAILED" +) + +// SUPERNODE_STATE represents the possible states of a supernode +type SUPERNODE_STATE string + +const ( + SUPERNODE_STATE_UNSPECIFIED SUPERNODE_STATE = "SUPERNODE_STATE_UNSPECIFIED" + SUPERNODE_STATE_ACTIVE SUPERNODE_STATE = "SUPERNODE_STATE_ACTIVE" + SUPERNODE_STATE_DISABLED SUPERNODE_STATE = "SUPERNODE_STATE_DISABLED" + SUPERNODE_STATE_STOPPED SUPERNODE_STATE = "SUPERNODE_STATE_STOPPED" + SUPERNODE_STATE_PENALIZED SUPERNODE_STATE = "SUPERNODE_STATE_PENALIZED" +) + +// Action represents an action registered on the Lumera blockchain +type Action struct { + ID string + State ACTION_STATE + Height int64 + ExpirationTime string +} + +// Supernode represents information about a supernode in the network +type Supernode struct { + CosmosAddress string // Blockchain identity of the supernode + GrpcEndpoint string // Network endpoint for gRPC communication + State SUPERNODE_STATE // Current state of the supernode +} diff --git a/actionsdk/config/config.go b/actionsdk/config/config.go new file mode 100644 index 00000000..62d8ac54 --- /dev/null +++ b/actionsdk/config/config.go @@ -0,0 +1,35 @@ +package config + +import ( + "github.com/LumeraProtocol/lumera/x/lumeraid/securekeyx" + "github.com/cosmos/cosmos-sdk/crypto/keyring" +) + +// Config holds configuration values for the ActionClient +type Config struct { + // Security configuration + Keyring keyring.Keyring // Keyring containing identity keys + LocalCosmosAddress string // Local cosmos address for authentication + LocalPeerType securekeyx.PeerType // Local peer type (Simplenode for clients) + + // Network configuration + DefaultSupernodePort int // Default port for supernode gRPC endpoints + + // Task configuration + MaxRetries int // Maximum number of retries for supernode communication + TimeoutSeconds int // Timeout for supernode communication in seconds + SenseSupernodeCount int // Number of supernodes to select for Sense operations + CascadeSupernodeCount int // Number of supernodes to select for Cascade operations +} + +// DefaultConfig returns a Config with default values +func DefaultConfig() Config { + return Config{ + LocalPeerType: securekeyx.Simplenode, + DefaultSupernodePort: 50051, + MaxRetries: 3, + TimeoutSeconds: 30, + SenseSupernodeCount: 3, + CascadeSupernodeCount: 1, + } +} diff --git a/actionsdk/go.mod b/actionsdk/go.mod new file mode 100644 index 00000000..9ff3c94d --- /dev/null +++ b/actionsdk/go.mod @@ -0,0 +1,163 @@ +module action + +go 1.24.0 + +replace github.com/LumeraProtocol/supernode => ../ + +require ( + github.com/LumeraProtocol/lumera v0.4.3 + github.com/LumeraProtocol/supernode v0.0.0-00010101000000-000000000000 + github.com/cosmos/cosmos-sdk v0.50.13 + github.com/google/uuid v1.6.0 + google.golang.org/grpc v1.71.0 +) + +require ( + cosmossdk.io/api v0.7.6 // indirect + cosmossdk.io/collections v0.4.0 // indirect + cosmossdk.io/core v0.11.1 // indirect + cosmossdk.io/depinject v1.1.0 // indirect + cosmossdk.io/errors v1.0.1 // indirect + cosmossdk.io/log v1.4.1 // indirect + cosmossdk.io/math v1.4.0 // indirect + cosmossdk.io/store v1.1.1 // indirect + cosmossdk.io/x/tx v0.13.7 // indirect + filippo.io/edwards25519 v1.1.0 // indirect + github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect + github.com/99designs/keyring v1.2.1 // indirect + github.com/DataDog/datadog-go v3.2.0+incompatible // indirect + github.com/DataDog/zstd v1.5.5 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cockroachdb/errors v1.11.3 // indirect + github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/pebble v1.1.2 // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect + github.com/cometbft/cometbft v0.38.12 // indirect + github.com/cometbft/cometbft-db v0.11.0 // indirect + github.com/cosmos/btcutil v1.0.5 // indirect + github.com/cosmos/cosmos-db v1.1.1 // indirect + github.com/cosmos/cosmos-proto v1.0.0-beta.5 // indirect + github.com/cosmos/go-bip39 v1.0.0 // indirect + github.com/cosmos/gogogateway v1.2.0 // indirect + github.com/cosmos/gogoproto v1.7.0 // indirect + github.com/cosmos/iavl v1.2.2 // indirect + github.com/cosmos/ics23/go v0.11.0 // indirect + github.com/cosmos/ledger-cosmos-go v0.14.0 // indirect + github.com/danieljoos/wincred v1.2.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect + github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect + github.com/dgraph-io/badger/v2 v2.2007.4 // indirect + github.com/dgraph-io/ristretto v0.1.1 // indirect + github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/dvsekhvalnov/jose2go v1.6.0 // indirect + github.com/emicklei/dot v1.6.2 // indirect + github.com/fatih/color v1.15.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/go-errors/errors v1.5.1 // indirect + github.com/go-kit/kit v0.12.0 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/gogo/googleapis v1.4.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/glog v1.2.4 // indirect + github.com/golang/mock v1.6.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/gorilla/handlers v1.5.2 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/gorilla/websocket v1.5.3 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-metrics v0.5.3 // indirect + github.com/hashicorp/go-plugin v1.5.2 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hashicorp/yamux v0.1.1 // indirect + github.com/hdevalence/ed25519consensus v0.1.0 // indirect + github.com/huandu/skiplist v1.2.0 // indirect + github.com/iancoleman/strcase v0.3.0 // indirect + github.com/improbable-eng/grpc-web v0.15.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jmhodges/levigo v1.0.0 // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/linxGnu/grocksdb v1.8.14 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mtibben/percent v0.2.1 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a // indirect + github.com/oklog/run v1.1.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/petermattis/goid v0.0.0-20231207134359-e60b3f734c67 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.20.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect + github.com/rs/cors v1.11.1 // indirect + github.com/rs/zerolog v1.33.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sasha-s/go-deadlock v0.3.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.7.1 // indirect + github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/viper v1.19.0 // indirect + github.com/stretchr/testify v1.10.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/tendermint/go-amino v0.16.0 // indirect + github.com/tidwall/btree v1.7.0 // indirect + github.com/x-cray/logrus-prefixed-formatter v0.5.2 // indirect + github.com/zondax/hid v0.9.2 // indirect + github.com/zondax/ledger-go v0.14.3 // indirect + go.etcd.io/bbolt v1.3.10 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.33.0 // indirect + golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect + golang.org/x/net v0.35.0 // indirect + golang.org/x/sync v0.11.0 // indirect + golang.org/x/sys v0.30.0 // indirect + golang.org/x/term v0.29.0 // indirect + golang.org/x/text v0.22.0 // indirect + google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/protobuf v1.36.5 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + gotest.tools/v3 v3.5.1 // indirect + nhooyr.io/websocket v1.8.6 // indirect + pgregory.net/rapid v1.1.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/actionsdk/go.sum b/actionsdk/go.sum new file mode 100644 index 00000000..5ca3bf03 --- /dev/null +++ b/actionsdk/go.sum @@ -0,0 +1,1106 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= +cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= +cloud.google.com/go/auth v0.6.0 h1:5x+d6b5zdezZ7gmLWD1m/xNjnaQ2YDhmIz/HH3doy1g= +cloud.google.com/go/auth v0.6.0/go.mod h1:b4acV+jLQDyjwm4OXHYjNvRi4jvGBzHWJRtJcy+2P4g= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= +cloud.google.com/go/compute v1.27.1 h1:0WbBLIPNANheCRZ4h8QhgzjN53KMutbiVBOLtPiVzBU= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/iam v1.1.9 h1:oSkYLVtVme29uGYrOcKcvJRht7cHJpYD09GM9JaR0TE= +cloud.google.com/go/iam v1.1.9/go.mod h1:Nt1eDWNYH9nGQg3d/mY7U1hvfGmsaG9o/kLGoLoLXjQ= +cloud.google.com/go/storage v1.41.0 h1:RusiwatSu6lHeEXe3kglxakAmAbfV+rhtPqA6i8RBx0= +cloud.google.com/go/storage v1.41.0/go.mod h1:J1WCa/Z2FcgdEDuPUY8DxT5I+d9mFKsCepp5vR6Sq80= +cosmossdk.io/api v0.7.6 h1:PC20PcXy1xYKH2KU4RMurVoFjjKkCgYRbVAD4PdqUuY= +cosmossdk.io/api v0.7.6/go.mod h1:IcxpYS5fMemZGqyYtErK7OqvdM0C8kdW3dq8Q/XIG38= +cosmossdk.io/client/v2 v2.0.0-beta.5 h1:0LVv3nEByn//hFDIrYLs2WvsEU3HodOelh4SDHnA/1I= +cosmossdk.io/client/v2 v2.0.0-beta.5/go.mod h1:4p0P6o0ro+FizakJUYS9SeM94RNbv0thLmkHRw5o5as= +cosmossdk.io/collections v0.4.0 h1:PFmwj2W8szgpD5nOd8GWH6AbYNi1f2J6akWXJ7P5t9s= +cosmossdk.io/collections v0.4.0/go.mod h1:oa5lUING2dP+gdDquow+QjlF45eL1t4TJDypgGd+tv0= +cosmossdk.io/core v0.11.1 h1:h9WfBey7NAiFfIcUhDVNS503I2P2HdZLebJlUIs8LPA= +cosmossdk.io/core v0.11.1/go.mod h1:OJzxcdC+RPrgGF8NJZR2uoQr56tc7gfBKhiKeDO7hH0= +cosmossdk.io/depinject v1.1.0 h1:wLan7LG35VM7Yo6ov0jId3RHWCGRhe8E8bsuARorl5E= +cosmossdk.io/depinject v1.1.0/go.mod h1:kkI5H9jCGHeKeYWXTqYdruogYrEeWvBQCw1Pj4/eCFI= +cosmossdk.io/errors v1.0.1 h1:bzu+Kcr0kS/1DuPBtUFdWjzLqyUuCiyHjyJB6srBV/0= +cosmossdk.io/errors v1.0.1/go.mod h1:MeelVSZThMi4bEakzhhhE/CKqVv3nOJDA25bIqRDu/U= +cosmossdk.io/log v1.4.1 h1:wKdjfDRbDyZRuWa8M+9nuvpVYxrEOwbD/CA8hvhU8QM= +cosmossdk.io/log v1.4.1/go.mod h1:k08v0Pyq+gCP6phvdI6RCGhLf/r425UT6Rk/m+o74rU= +cosmossdk.io/math v1.4.0 h1:XbgExXFnXmF/CccPPEto40gOO7FpWu9yWNAZPN3nkNQ= +cosmossdk.io/math v1.4.0/go.mod h1:O5PkD4apz2jZs4zqFdTr16e1dcaQCc5z6lkEnrrppuk= +cosmossdk.io/store v1.1.1 h1:NA3PioJtWDVU7cHHeyvdva5J/ggyLDkyH0hGHl2804Y= +cosmossdk.io/store v1.1.1/go.mod h1:8DwVTz83/2PSI366FERGbWSH7hL6sB7HbYp8bqksNwM= +cosmossdk.io/x/circuit v0.1.1 h1:KPJCnLChWrxD4jLwUiuQaf5mFD/1m7Omyo7oooefBVQ= +cosmossdk.io/x/circuit v0.1.1/go.mod h1:B6f/urRuQH8gjt4eLIXfZJucrbreuYrKh5CSjaOxr+Q= +cosmossdk.io/x/evidence v0.1.1 h1:Ks+BLTa3uftFpElLTDp9L76t2b58htjVbSZ86aoK/E4= +cosmossdk.io/x/evidence v0.1.1/go.mod h1:OoDsWlbtuyqS70LY51aX8FBTvguQqvFrt78qL7UzeNc= +cosmossdk.io/x/feegrant v0.1.1 h1:EKFWOeo/pup0yF0svDisWWKAA9Zags6Zd0P3nRvVvw8= +cosmossdk.io/x/feegrant v0.1.1/go.mod h1:2GjVVxX6G2fta8LWj7pC/ytHjryA6MHAJroBWHFNiEQ= +cosmossdk.io/x/nft v0.1.1 h1:pslAVS8P5NkW080+LWOamInjDcq+v2GSCo+BjN9sxZ8= +cosmossdk.io/x/nft v0.1.1/go.mod h1:Kac6F6y2gsKvoxU+fy8uvxRTi4BIhLOor2zgCNQwVgY= +cosmossdk.io/x/tx v0.13.7 h1:8WSk6B/OHJLYjiZeMKhq7DK7lHDMyK0UfDbBMxVmeOI= +cosmossdk.io/x/tx v0.13.7/go.mod h1:V6DImnwJMTq5qFjeGWpXNiT/fjgE4HtmclRmTqRVM3w= +cosmossdk.io/x/upgrade v0.1.4 h1:/BWJim24QHoXde8Bc64/2BSEB6W4eTydq0X/2f8+g38= +cosmossdk.io/x/upgrade v0.1.4/go.mod h1:9v0Aj+fs97O+Ztw+tG3/tp5JSlrmT7IcFhAebQHmOPo= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.2.1 h1:tYLp1ULvO7i3fI5vE21ReQuj99QFSs7lGm0xWyJo87o= +github.com/99designs/keyring v1.2.1/go.mod h1:fc+wB5KTk9wQ9sDx0kFXB3A0MaeGHM9AwRStKOQ5vOA= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/CosmWasm/wasmd v0.53.0 h1:kdaoAi20bIb4VCsxw9pRaT2g5PpIp82Wqrr9DRVN9ao= +github.com/CosmWasm/wasmd v0.53.0/go.mod h1:FJl/aWjdpGof3usAMFQpDe07Rkx77PUzp0cygFMOvtw= +github.com/CosmWasm/wasmvm/v2 v2.1.2 h1:GkJ5bAsRlLHfIQVg/FY1VHwLyBwlCjAhDea0B8L+e20= +github.com/CosmWasm/wasmvm/v2 v2.1.2/go.mod h1:bMhLQL4Yp9CzJi9A83aR7VO9wockOsSlZbT4ztOl6bg= +github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= +github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/LumeraProtocol/lumera v0.4.3 h1:q/FuT+JOLIpYdlunczRUr6K85r9Sn0lKvGltSrj4r6s= +github.com/LumeraProtocol/lumera v0.4.3/go.mod h1:MRqVY+f8edEBkDvpr4z2nJpglp3Qj1OUvjeWvrvIUSM= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.44.224 h1:09CiaaF35nRmxrzWZ2uRq5v6Ghg/d2RiPjZnSgtt+RQ= +github.com/aws/aws-sdk-go v1.44.224/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 h1:41iFGWnSlI2gVpmOtVTJZNodLdLQLn/KsJqFvXwnd/s= +github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bits-and-blooms/bitset v1.8.0 h1:FD+XqgOZDUxxZ8hzoBFuV9+cGWY9CslN6d5MS5JVb4c= +github.com/bits-and-blooms/bitset v1.8.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= +github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/btcutil v1.1.6 h1:zFL2+c3Lb9gEgqKNzowKUPQNb8jV7v5Oaodi/AYFd6c= +github.com/btcsuite/btcd/btcutil v1.1.6/go.mod h1:9dFymx8HpuLqBnsPELrImQeTQfKBQqzqGbbV3jK55aE= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce h1:YtWJF7RHm2pYCvA5t0RPmAaLUhREsKuKd+SLhxFbFeQ= +github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o= +github.com/bufbuild/protocompile v0.14.0 h1:z3DW4IvXE5G/uTOnSQn+qwQQxvhckkTWLS/0No/o7KU= +github.com/bufbuild/protocompile v0.14.0/go.mod h1:N6J1NYzkspJo3ZwyL4Xjvli86XOj1xq4qAasUFxGups= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= +github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= +github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/pebble v1.1.2 h1:CUh2IPtR4swHlEj48Rhfzw6l/d0qA31fItcIszQVIsA= +github.com/cockroachdb/pebble v1.1.2/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/cometbft/cometbft v0.38.12 h1:OWsLZN2KcSSFe8bet9xCn07VwhBnavPea3VyPnNq1bg= +github.com/cometbft/cometbft v0.38.12/go.mod h1:GPHp3/pehPqgX1930HmK1BpBLZPxB75v/dZg8Viwy+o= +github.com/cometbft/cometbft-db v0.11.0 h1:M3Lscmpogx5NTbb1EGyGDaFRdsoLWrUWimFEyf7jej8= +github.com/cometbft/cometbft-db v0.11.0/go.mod h1:GDPJAC/iFHNjmZZPN8V8C1yr/eyityhi2W1hz2MGKSc= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= +github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= +github.com/cosmos/cosmos-db v1.1.1 h1:FezFSU37AlBC8S98NlSagL76oqBRWq/prTPvFcEJNCM= +github.com/cosmos/cosmos-db v1.1.1/go.mod h1:AghjcIPqdhSLP/2Z0yha5xPH3nLnskz81pBx3tcVSAw= +github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= +github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= +github.com/cosmos/cosmos-sdk v0.50.13 h1:xQ32hhzVy7agEe7behMdZN0ezWhPss3KoLZsF9KoBnw= +github.com/cosmos/cosmos-sdk v0.50.13/go.mod h1:hrWEFMU1eoXqLJeE6VVESpJDQH67FS1nnMrQIjO2daw= +github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= +github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= +github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= +github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ4GUkT+tbFI= +github.com/cosmos/gogoproto v1.4.2/go.mod h1:cLxOsn1ljAHSV527CHOtaIP91kK6cCrZETRBrkzItWU= +github.com/cosmos/gogoproto v1.7.0 h1:79USr0oyXAbxg3rspGh/m4SWNyoz/GLaAh0QlCe2fro= +github.com/cosmos/gogoproto v1.7.0/go.mod h1:yWChEv5IUEYURQasfyBW5ffkMHR/90hiHgbNgrtp4j0= +github.com/cosmos/iavl v1.2.2 h1:qHhKW3I70w+04g5KdsdVSHRbFLgt3yY3qTMd4Xa4rC8= +github.com/cosmos/iavl v1.2.2/go.mod h1:GiM43q0pB+uG53mLxLDzimxM9l/5N9UuSY3/D0huuVw= +github.com/cosmos/ibc-go/modules/capability v1.0.1 h1:ibwhrpJ3SftEEZRxCRkH0fQZ9svjthrX2+oXdZvzgGI= +github.com/cosmos/ibc-go/modules/capability v1.0.1/go.mod h1:rquyOV262nGJplkumH+/LeYs04P3eV8oB7ZM4Ygqk4E= +github.com/cosmos/ibc-go/v8 v8.5.1 h1:3JleEMKBjRKa3FeTKt4fjg22za/qygLBo7mDkoYTNBs= +github.com/cosmos/ibc-go/v8 v8.5.1/go.mod h1:P5hkAvq0Qbg0h18uLxDVA9q1kOJ0l36htMsskiNwXbo= +github.com/cosmos/ics23/go v0.11.0 h1:jk5skjT0TqX5e5QJbEnwXIS2yI2vnmLOgpQPeM5RtnU= +github.com/cosmos/ics23/go v0.11.0/go.mod h1:A8OjxPE67hHST4Icw94hOxxFEJMBG031xIGF/JHNIY0= +github.com/cosmos/ledger-cosmos-go v0.14.0 h1:WfCHricT3rPbkPSVKRH+L4fQGKYHuGOK9Edpel8TYpE= +github.com/cosmos/ledger-cosmos-go v0.14.0/go.mod h1:E07xCWSBl3mTGofZ2QnL4cIUzMbbGVyik84QYKbX3RA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/danieljoos/wincred v1.2.1 h1:dl9cBrupW8+r5250DYkYxocLeZ1Y4vB1kxgtjxw8GQs= +github.com/danieljoos/wincred v1.2.1/go.mod h1:uGaFL9fDn3OLTvzCGulzE+SzjEe5NGlh5FdCcyfPwps= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= +github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= +github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= +github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= +github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= +github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/gin-gonic/gin v1.8.1 h1:4+fr/el88TOO3ewCmQr8cx/CtZ/umlIRIs5M4NTNjf8= +github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= +github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= +github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= +github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= +github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ= +github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.4.1-0.20201022092350-68b0159b7869/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= +github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= +github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA= +github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-getter v1.7.5 h1:dT58k9hQ/vbxNMwoI5+xFYAJuv6152UNvdHokfI5wE4= +github.com/hashicorp/go-getter v1.7.5/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-metrics v0.5.3 h1:M5uADWMOGCTUNU1YuC4hfknOeHNaX54LDm4oYSucoNE= +github.com/hashicorp/go-metrics v0.5.3/go.mod h1:KEjodfebIOuBYSAe/bHTm+HChmKSxAOXPBieMLYozDE= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT1G2Y= +github.com/hashicorp/go-plugin v1.5.2/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/hdevalence/ed25519consensus v0.1.0 h1:jtBwzzcHuTmFrQN6xQZn6CQEO/V9f7HsjsjeEZ6auqU= +github.com/hdevalence/ed25519consensus v0.1.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= +github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= +github.com/huandu/skiplist v1.2.0 h1:gox56QD77HzSC0w+Ws3MH3iie755GBJU1OER3h5VsYw= +github.com/huandu/skiplist v1.2.0/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= +github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls= +github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= +github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= +github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= +github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/linxGnu/grocksdb v1.8.14 h1:HTgyYalNwBSG/1qCQUIott44wU5b2Y9Kr3z7SK5OfGQ= +github.com/linxGnu/grocksdb v1.8.14/go.mod h1:QYiYypR2d4v63Wj1adOOfzglnoII0gLj3PNh4fZkcFA= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= +github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/grpc-proxy v0.0.0-20181017164139-0f1106ef9c76/go.mod h1:x5OoJHDHqxHS801UIuhqGl6QdSAEJvtausosHSdazIo= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a h1:dlRvE5fWabOchtH7znfiFCcOvmIYgOeAS5ifBXBlh9Q= +github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a/go.mod h1:hVoHR2EVESiICEMbg137etN/Lx+lSrHPTD39Z/uE+2s= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q= +github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/petermattis/goid v0.0.0-20231207134359-e60b3f734c67 h1:jik8PHtAIsPlCRJjJzl4udgEf7hawInF9texMeO2jrU= +github.com/petermattis/goid v0.0.0-20231207134359-e60b3f734c67/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.20.1 h1:IMJXHOD6eARkQpxo8KkhgEVFlBNm+nkrFUyGlIu7Na8= +github.com/prometheus/client_golang v1.20.1/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= +github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= +github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shamaton/msgpack/v2 v2.2.0 h1:IP1m01pHwCrMa6ZccP9B3bqxEMKMSmMVAVKk54g3L/Y= +github.com/shamaton/msgpack/v2 v2.2.0/go.mod h1:6khjYnkx73f7VQU7wjcFS9DFjs+59naVWJv1TB7qdOI= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= +github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= +github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= +github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= +github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= +github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= +github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= +github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw= +github.com/zondax/ledger-go v0.14.3/go.mod h1:IKKaoxupuB43g4NxeQmbLXv7T9AlQyie1UpHb342ycI= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= +go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= +golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= +golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= +golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220315194320-039c03cc5b86/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= +golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= +golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.186.0 h1:n2OPp+PPXX0Axh4GuSsL5QL8xQCTb2oDwyzPnQvqUug= +google.golang.org/api v0.186.0/go.mod h1:hvRbBmgoje49RV3xqVXrmP6w93n6ehGgIVPYrGtBFFc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094 h1:6whtk83KtD3FkGrVb2hFXuQ+ZMbCNdakARIn/aHMmG8= +google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094/go.mod h1:Zs4wYw8z1zr6RNF4cwYb31mvN/EGaKAdQjNCF3DW6K4= +google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 h1:GVIKPyP/kLIyVOgOnTwFOrvQaQUzOzGMCxgFUOEmm24= +google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422/go.mod h1:b6h1vNKhxaSoEI+5jc3PJUCustfli/mRab7295pY7rw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= +google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo= +lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +nhooyr.io/websocket v1.8.6 h1:s+C3xAMLwGmlI31Nyn/eAehUlZPwfYZu2JXM621Q5/k= +nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= +pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/actionsdk/net/client.go b/actionsdk/net/client.go new file mode 100644 index 00000000..fbce450a --- /dev/null +++ b/actionsdk/net/client.go @@ -0,0 +1,21 @@ +package net + +import ( + "context" + + "github.com/LumeraProtocol/supernode/gen/supernode/action/cascade" + "google.golang.org/grpc" + "google.golang.org/grpc/health/grpc_health_v1" +) + +// SupernodeClient provides access to supernode services +type SupernodeClient interface { + // UploadInputData uploads input data for cascade processing + UploadInputData(ctx context.Context, in *cascade.UploadInputDataRequest, opts ...grpc.CallOption) (*cascade.UploadInputDataResponse, error) + + // HealthCheck performs a health check on the supernode + HealthCheck(ctx context.Context) (*grpc_health_v1.HealthCheckResponse, error) + + // Close closes the underlying connection + Close() error +} diff --git a/actionsdk/net/config.go b/actionsdk/net/config.go new file mode 100644 index 00000000..34cbe074 --- /dev/null +++ b/actionsdk/net/config.go @@ -0,0 +1,44 @@ +package net + +import ( + "action/adapters/lumera" + "action/config" + + "github.com/LumeraProtocol/lumera/x/lumeraid/securekeyx" + "github.com/LumeraProtocol/supernode/pkg/net/grpc/client" + "github.com/cosmos/cosmos-sdk/crypto/keyring" +) + +// Config holds configuration for creating a SupernodeClient +type Config struct { + // Security configuration + Keyring keyring.Keyring + LocalCosmosAddress string + LocalPeerType securekeyx.PeerType + + // Target supernode + TargetSupernode lumera.Supernode + + // Default port if not specified in endpoint + DefaultPort int + + // Client options + ClientOptions *client.ClientOptions +} + +// NewConfigFromGlobalConfig creates a client config from the global config +func NewConfigFromGlobalConfig(globalConfig config.Config, targetSupernode lumera.Supernode) *Config { + return &Config{ + Keyring: globalConfig.Keyring, + LocalCosmosAddress: globalConfig.LocalCosmosAddress, + LocalPeerType: globalConfig.LocalPeerType, + TargetSupernode: targetSupernode, + DefaultPort: globalConfig.DefaultSupernodePort, + ClientOptions: client.DefaultClientOptions(), + } +} + +// DefaultClientOptions returns the default client options +func DefaultClientOptions() *client.ClientOptions { + return client.DefaultClientOptions() +} diff --git a/actionsdk/net/factory.go b/actionsdk/net/factory.go new file mode 100644 index 00000000..91c8bfea --- /dev/null +++ b/actionsdk/net/factory.go @@ -0,0 +1,50 @@ +package net + +import ( + "context" + "fmt" + "strings" + + "action/adapters/lumera" + "action/config" +) + +// ClientFactory creates supernode clients +type ClientFactory struct { + config config.Config +} + +// NewClientFactory creates a new supernode client factory +func NewClientFactory(config config.Config) *ClientFactory { + return &ClientFactory{ + config: config, + } +} + +// CreateClient creates a client for a specific supernode +func (f *ClientFactory) CreateClient(ctx context.Context, supernode lumera.Supernode) (SupernodeClient, error) { + // Validate the supernode has an endpoint + if supernode.GrpcEndpoint == "" { + return nil, fmt.Errorf("supernode has no gRPC endpoint") + } + + // Ensure endpoint has port + endpoint := EnsureEndpointHasPort(supernode.GrpcEndpoint, f.config.DefaultSupernodePort) + + // Update the supernode with the properly formatted endpoint + supernode.GrpcEndpoint = endpoint + + // Create client config + clientConfig := NewConfigFromGlobalConfig(f.config, supernode) + + // Create client + return NewSupernodeClient(ctx, clientConfig) +} + +// EnsureEndpointHasPort adds default port to endpoint if missing +func EnsureEndpointHasPort(endpoint string, defaultPort int) string { + if !strings.Contains(endpoint, ":") { + return fmt.Sprintf("%s:%d", endpoint, defaultPort) + } + return endpoint +} diff --git a/actionsdk/net/impl.go b/actionsdk/net/impl.go new file mode 100644 index 00000000..4d4f664b --- /dev/null +++ b/actionsdk/net/impl.go @@ -0,0 +1,84 @@ +package net + +import ( + "context" + "fmt" + + "github.com/LumeraProtocol/supernode/gen/supernode/action/cascade" + "github.com/LumeraProtocol/supernode/pkg/net/credentials" + "github.com/LumeraProtocol/supernode/pkg/net/grpc/client" + "google.golang.org/grpc" + "google.golang.org/grpc/health/grpc_health_v1" +) + +type supernodeClient struct { + cascadeClient cascade.CascadeServiceClient + healthClient grpc_health_v1.HealthClient + conn *grpc.ClientConn +} + +// NewSupernodeClient creates a new supernode client +func NewSupernodeClient(ctx context.Context, config *Config) (SupernodeClient, error) { + if config == nil { + return nil, fmt.Errorf("config cannot be nil") + } + + if config.Keyring == nil { + return nil, fmt.Errorf("keyring cannot be nil") + } + + if config.LocalCosmosAddress == "" { + return nil, fmt.Errorf("local cosmos address cannot be empty") + } + + // Create client credentials + clientCreds, err := credentials.NewClientCreds(&credentials.ClientOptions{ + CommonOptions: credentials.CommonOptions{ + Keyring: config.Keyring, + LocalIdentity: config.LocalCosmosAddress, + PeerType: config.LocalPeerType, + }, + }) + if err != nil { + return nil, fmt.Errorf("failed to create credentials: %w", err) + } + + // Create gRPC client + grpcClient := client.NewClient(clientCreds) + + // Format address with identity for authentication + targetGrpcEndpoint := config.TargetSupernode.GrpcEndpoint + targetCosmosAddress := config.TargetSupernode.CosmosAddress + addressWithIdentity := FormatAddressWithIdentity(targetCosmosAddress, targetGrpcEndpoint) + + // Connect to server + conn, err := grpcClient.Connect(ctx, addressWithIdentity, config.ClientOptions) + if err != nil { + return nil, fmt.Errorf("failed to connect to supernode: %w", err) + } + + // Create service clients + return &supernodeClient{ + cascadeClient: cascade.NewCascadeServiceClient(conn), + healthClient: grpc_health_v1.NewHealthClient(conn), + conn: conn, + }, nil +} + +// UploadInputData sends data to the supernode for cascade processing +func (c *supernodeClient) UploadInputData(ctx context.Context, in *cascade.UploadInputDataRequest, opts ...grpc.CallOption) (*cascade.UploadInputDataResponse, error) { + return c.cascadeClient.UploadInputData(ctx, in, opts...) +} + +// HealthCheck performs a health check on the supernode +func (c *supernodeClient) HealthCheck(ctx context.Context) (*grpc_health_v1.HealthCheckResponse, error) { + return c.healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{}) +} + +// Close closes the connection to the supernode +func (c *supernodeClient) Close() error { + if c.conn != nil { + return c.conn.Close() + } + return nil +} diff --git a/actionsdk/net/utils..go b/actionsdk/net/utils..go new file mode 100644 index 00000000..2d0f45b6 --- /dev/null +++ b/actionsdk/net/utils..go @@ -0,0 +1,24 @@ +package net + +import ( + "fmt" + "net" +) + +// GetFreePortInRange finds a free port within the given range. +func GetFreePortInRange(start, end int) (int, error) { + for port := start; port <= end; port++ { + addr := fmt.Sprintf("localhost:%d", port) + listener, err := net.Listen("tcp", addr) + if err == nil { + listener.Close() + return port, nil + } + } + return 0, fmt.Errorf("no free port found in range %d-%d", start, end) +} + +// FormatAddressWithIdentity combines identity and address for secure connection +func FormatAddressWithIdentity(cosmosAddress, grpcEndpoint string) string { + return fmt.Sprintf("%s@%s", cosmosAddress, grpcEndpoint) +} diff --git a/actionsdk/task/cascade.go b/actionsdk/task/cascade.go new file mode 100644 index 00000000..d07681b5 --- /dev/null +++ b/actionsdk/task/cascade.go @@ -0,0 +1,200 @@ +// File: task/cascade.go +package task + +import ( + "action/adapters/lumera" + "action/config" + "action/net" + "context" + "errors" + "fmt" + "path/filepath" + "time" + + "github.com/LumeraProtocol/supernode/gen/supernode/action/cascade" + "google.golang.org/grpc/health/grpc_health_v1" +) + +// CascadeTask implements the Task interface for Cascade operations +type CascadeTask struct { + BaseTask + fileHash string + actionID string + filePath string + client lumera.Client + config config.Config +} + +// NewCascadeTask creates a new CascadeTask +func NewCascadeTask( + taskID string, + fileHash string, + actionID string, + filePath string, + client lumera.Client, + config config.Config, +) *CascadeTask { + return &CascadeTask{ + BaseTask: BaseTask{ + TaskID: taskID, + Status: StatusPending, + }, + fileHash: fileHash, + actionID: actionID, + filePath: filePath, + client: client, + config: config, + } +} + +// Run executes the CascadeTask +func (t *CascadeTask) Run(ctx context.Context) error { + // Update status + t.Status = StatusProcessing + + // 1. Action Validation Phase + action, err := t.validateAction(ctx) + if err != nil { + t.Status = StatusFailed + t.Err = fmt.Errorf("action validation failed: %w", err) + return t.Err + } + + // 2. Supernode Selection Phase + supernodes, err := t.selectSupernodes(ctx, action.Height) + if err != nil { + t.Status = StatusFailed + t.Err = fmt.Errorf("supernode selection failed: %w", err) + return t.Err + } + + // 3. Create client factory using global config + clientFactory := net.NewClientFactory(t.config) + + // TODO : Verify this part on the supernode side + + // 4. Read file content + // fileData, err := os.ReadFile(t.filePath) + // if err != nil { + // t.Status = StatusFailed + // t.Err = fmt.Errorf("failed to read file: %w", err) + // return t.Err + // } + + // 5. Create upload request with correct field names matching the protobuf definition + uploadRequest := &cascade.UploadInputDataRequest{ + Filename: filepath.Base(t.filePath), // Extract filename from the path + ActionId: t.actionID, + DataHash: t.fileHash, // Changed from FileHash to DataHash + // Data: fileData, // Add the actual file data + } + + // 6. Try each supernode until success + var lastErr error + for _, sn := range supernodes { + // Try to upload to this supernode + success, err := t.tryUploadToSupernode(ctx, clientFactory, sn, uploadRequest) + if err != nil { + lastErr = err + continue + } + + if success { + // Success! + t.Status = StatusCompleted + return nil + } + } + + // All supernodes failed + t.Status = StatusFailed + t.Err = fmt.Errorf("all supernodes failed: %w", lastErr) + return t.Err +} + +// tryUploadToSupernode attempts to upload data to a single supernode +func (t *CascadeTask) tryUploadToSupernode( + ctx context.Context, + clientFactory *net.ClientFactory, + supernode lumera.Supernode, + request *cascade.UploadInputDataRequest, +) (bool, error) { + // Create a client for this supernode + client, err := clientFactory.CreateClient(ctx, supernode) + if err != nil { + return false, fmt.Errorf("failed to create client for supernode %s: %w", supernode.CosmosAddress, err) + } + // Ensure connection is closed when we're done with this function + defer client.Close() + + // Check if supernode is healthy + healthCtx, cancel := context.WithTimeout(ctx, time.Duration(t.config.TimeoutSeconds)*time.Second) + healthResp, err := client.HealthCheck(healthCtx) + cancel() + + if err != nil { + return false, fmt.Errorf("health check failed for supernode %s: %w", supernode.CosmosAddress, err) + } + + if healthResp.Status != grpc_health_v1.HealthCheckResponse_SERVING { + return false, fmt.Errorf("supernode %s is not in serving state", supernode.CosmosAddress) + } + + // Upload data to supernode + uploadCtx, cancel := context.WithTimeout(ctx, time.Duration(t.config.TimeoutSeconds)*time.Second) + defer cancel() + + _, err = client.UploadInputData(uploadCtx, request) + if err != nil { + return false, fmt.Errorf("upload failed to supernode %s: %w", supernode.CosmosAddress, err) + } + + // Success! + return true, nil +} + +// validateAction checks if the action exists and is in PENDING state +func (t *CascadeTask) validateAction(ctx context.Context) (lumera.Action, error) { + action, err := t.client.GetAction(ctx, t.actionID) + if err != nil { + return lumera.Action{}, fmt.Errorf("failed to get action: %w", err) + } + + // Check if action exists + if action.ID == "" { + return lumera.Action{}, errors.New("no action found with the specified ID") + } + + // Check action state + if action.State != lumera.ACTION_STATE_PENDING { + return lumera.Action{}, fmt.Errorf("action is in %s state, expected PENDING", action.State) + } + + return action, nil +} + +// selectSupernodes selects supernodes for cascade operation +func (t *CascadeTask) selectSupernodes(ctx context.Context, height int64) ([]lumera.Supernode, error) { + // Get top supernodes + supernodes, err := t.client.GetSupernodes(ctx, height) + if err != nil { + return nil, fmt.Errorf("failed to get supernodes: %w", err) + } + + // Filter valid supernodes + var validSupernodes []lumera.Supernode + for _, sn := range supernodes { + if sn.State == lumera.SUPERNODE_STATE_ACTIVE && sn.GrpcEndpoint != "" { + validSupernodes = append(validSupernodes, sn) + if len(validSupernodes) >= t.config.CascadeSupernodeCount { + break + } + } + } + + if len(validSupernodes) == 0 { + return nil, errors.New("no valid supernodes available") + } + + return validSupernodes, nil +} diff --git a/actionsdk/task/manager.go b/actionsdk/task/manager.go new file mode 100644 index 00000000..06f92fa3 --- /dev/null +++ b/actionsdk/task/manager.go @@ -0,0 +1,116 @@ +package task + +// TODO: Implement task cleanup and retention policies + +import ( + "context" + "fmt" + "sync" + + "action/adapters/lumera" + "action/config" + + lumeraclient "github.com/LumeraProtocol/supernode/pkg/lumera" + "github.com/google/uuid" +) + +// Manager handles task creation and management +type Manager interface { + // CreateSenseTask creates and starts a Sense task + CreateSenseTask(ctx context.Context, fileHash, actionID, filePath string) (string, error) + + // CreateCascadeTask creates and starts a Cascade task + CreateCascadeTask(ctx context.Context, fileHash, actionID, filePath string) (string, error) + + // GetTask retrieves a task by its ID + GetTask(taskID string) (Task, bool) +} + +// ManagerImpl implements the Manager interface +type ManagerImpl struct { + client lumera.Client + config config.Config + tasks map[string]Task + tasksMutex sync.RWMutex +} + +// NewManager creates a new task manager +func NewManager(client lumeraclient.Client, config config.Config) Manager { + // Adapt the lumera.Client to our Client interface + clientAdapter := lumera.NewAdapter(client) + + return &ManagerImpl{ + client: clientAdapter, + config: config, + tasks: make(map[string]Task), + } +} + +// CreateSenseTask creates and starts a Sense task +func (m *ManagerImpl) CreateSenseTask( + ctx context.Context, + fileHash string, + actionID string, + filePath string, +) (string, error) { + // Generate task ID + taskID := uuid.New().String() + + // Create task with config + task := NewSenseTask(taskID, fileHash, actionID, filePath, m.client, m.config) + + // Store task + m.tasksMutex.Lock() + m.tasks[taskID] = task + m.tasksMutex.Unlock() + + // Start task asynchronously + go func() { + err := task.Run(ctx) + if err != nil { + // Task will update its own error state + fmt.Printf("Sense task %s failed: %v\n", taskID, err) + } + }() + + return taskID, nil +} + +// CreateCascadeTask creates and starts a Cascade task +func (m *ManagerImpl) CreateCascadeTask( + ctx context.Context, + fileHash string, + actionID string, + filePath string, +) (string, error) { + // Generate task ID + taskID := uuid.New().String() + + // Create task with config + task := NewCascadeTask(taskID, fileHash, actionID, filePath, m.client, m.config) + + // Store task + m.tasksMutex.Lock() + m.tasks[taskID] = task + m.tasksMutex.Unlock() + + // Start task asynchronously + go func() { + err := task.Run(ctx) + if err != nil { + // Task will update its own error state + fmt.Printf("Cascade task %s failed: %v\n", taskID, err) + } + }() + + return taskID, nil +} + +// GetTask retrieves a task by its ID +func (m *ManagerImpl) GetTask(taskID string) (Task, bool) { + m.tasksMutex.RLock() + defer m.tasksMutex.RUnlock() + + task, exists := m.tasks[taskID] + return task, exists +} diff --git a/actionsdk/task/sense.go b/actionsdk/task/sense.go new file mode 100644 index 00000000..77335c9a --- /dev/null +++ b/actionsdk/task/sense.go @@ -0,0 +1,121 @@ +package task + +import ( + "action/adapters/lumera" + "action/config" + "context" + "errors" + "fmt" +) + +// SenseTask implements the Task interface for Sense operations +type SenseTask struct { + BaseTask + fileHash string + actionID string + filePath string + client lumera.Client + config config.Config + supernodes []lumera.Supernode +} + +// NewSenseTask creates a new SenseTask +func NewSenseTask( + taskID string, + fileHash string, + actionID string, + filePath string, + client lumera.Client, + config config.Config, +) *SenseTask { + return &SenseTask{ + BaseTask: BaseTask{ + TaskID: taskID, + Status: StatusPending, + }, + fileHash: fileHash, + actionID: actionID, + filePath: filePath, + client: client, + config: config, + } +} + +// Run executes the SenseTask +func (t *SenseTask) Run(ctx context.Context) error { + // Update status + t.Status = StatusProcessing + + // 1. Action Validation Phase + action, err := t.validateAction(ctx) + if err != nil { + t.Status = StatusFailed + t.Err = fmt.Errorf("action validation failed: %w", err) + return t.Err + } + + // 2. Supernode Selection Phase + supernodes, err := t.selectSupernodes(ctx, action.Height) + if err != nil { + t.Status = StatusFailed + t.Err = fmt.Errorf("supernode selection failed: %w", err) + return t.Err + } + + // Store selected supernodes + t.supernodes = supernodes + + // 3. Supernode Communication Phase + // This will be implemented when the sense-specific requirements are defined + + // For now, just mark as completed + t.Status = StatusCompleted + return nil +} + +// validateAction checks if the action exists and is in PENDING state +func (t *SenseTask) validateAction(ctx context.Context) (lumera.Action, error) { + action, err := t.client.GetAction(ctx, t.actionID) + if err != nil { + return lumera.Action{}, fmt.Errorf("failed to get action: %w", err) + } + + // Check if action exists + if action.ID == "" { + return lumera.Action{}, errors.New("no action found with the specified ID") + } + + // Check action state + if action.State != lumera.ACTION_STATE_PENDING { + return lumera.Action{}, fmt.Errorf("action is in %s state, expected PENDING", action.State) + } + + return action, nil +} + +// selectSupernodes selects multiple supernodes for sense operation +func (t *SenseTask) selectSupernodes(ctx context.Context, height int64) ([]lumera.Supernode, error) { + // Get top supernodes + supernodes, err := t.client.GetSupernodes(ctx, height) + if err != nil { + return nil, fmt.Errorf("failed to get supernodes: %w", err) + } + + // Filter valid supernodes + var validSupernodes []lumera.Supernode + for _, sn := range supernodes { + if sn.State == lumera.SUPERNODE_STATE_ACTIVE && sn.GrpcEndpoint != "" { + validSupernodes = append(validSupernodes, sn) + if len(validSupernodes) >= t.config.SenseSupernodeCount { + break + } + } + } + + // Check if we have enough valid supernodes + if len(validSupernodes) == 0 { + return nil, errors.New("no valid supernodes available") + } + + return validSupernodes, nil +} diff --git a/actionsdk/task/task.go b/actionsdk/task/task.go new file mode 100644 index 00000000..345f038f --- /dev/null +++ b/actionsdk/task/task.go @@ -0,0 +1,52 @@ +package task + +import ( + "context" +) + +// TaskStatus represents the possible states of a task +type TaskStatus string + +const ( + StatusPending TaskStatus = "PENDING" + StatusProcessing TaskStatus = "PROCESSING" + StatusCompleted TaskStatus = "COMPLETED" + StatusFailed TaskStatus = "FAILED" +) + +// Task is the interface that all task types must implement +type Task interface { + // Run executes the task asynchronously + Run(ctx context.Context) error + + // GetTaskID returns the unique identifier for this task + GetTaskID() string + + // GetStatus returns the current status of the task + GetStatus() TaskStatus + + // GetError returns the error if the task failed + GetError() error +} + +// BaseTask contains common fields and functionality for all tasks +type BaseTask struct { + TaskID string + Status TaskStatus + Err error +} + +// GetTaskID returns the unique identifier for this task +func (t *BaseTask) GetTaskID() string { + return t.TaskID +} + +// GetStatus returns the current status of the task +func (t *BaseTask) GetStatus() TaskStatus { + return t.Status +} + +// GetError returns the error if the task failed +func (t *BaseTask) GetError() error { + return t.Err +} diff --git a/gen/supernode/action/cascade/service.pb.go b/gen/supernode/action/cascade/service.pb.go new file mode 100644 index 00000000..9b8cc5fb --- /dev/null +++ b/gen/supernode/action/cascade/service.pb.go @@ -0,0 +1,236 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v5.29.3 +// source: proto/supernode/action/cascade/service.proto + +package cascade + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type UploadInputDataRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Filename string `protobuf:"bytes,1,opt,name=filename,proto3" json:"filename,omitempty"` + ActionId string `protobuf:"bytes,2,opt,name=action_id,json=actionId,proto3" json:"action_id,omitempty"` + DataHash string `protobuf:"bytes,3,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` + RqIc int32 `protobuf:"varint,4,opt,name=rq_ic,json=rqIc,proto3" json:"rq_ic,omitempty"` + RqMax int32 `protobuf:"varint,5,opt,name=rq_max,json=rqMax,proto3" json:"rq_max,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UploadInputDataRequest) Reset() { + *x = UploadInputDataRequest{} + mi := &file_proto_supernode_action_cascade_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UploadInputDataRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UploadInputDataRequest) ProtoMessage() {} + +func (x *UploadInputDataRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_supernode_action_cascade_service_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UploadInputDataRequest.ProtoReflect.Descriptor instead. +func (*UploadInputDataRequest) Descriptor() ([]byte, []int) { + return file_proto_supernode_action_cascade_service_proto_rawDescGZIP(), []int{0} +} + +func (x *UploadInputDataRequest) GetFilename() string { + if x != nil { + return x.Filename + } + return "" +} + +func (x *UploadInputDataRequest) GetActionId() string { + if x != nil { + return x.ActionId + } + return "" +} + +func (x *UploadInputDataRequest) GetDataHash() string { + if x != nil { + return x.DataHash + } + return "" +} + +func (x *UploadInputDataRequest) GetRqIc() int32 { + if x != nil { + return x.RqIc + } + return 0 +} + +func (x *UploadInputDataRequest) GetRqMax() int32 { + if x != nil { + return x.RqMax + } + return 0 +} + +type UploadInputDataResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UploadInputDataResponse) Reset() { + *x = UploadInputDataResponse{} + mi := &file_proto_supernode_action_cascade_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UploadInputDataResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UploadInputDataResponse) ProtoMessage() {} + +func (x *UploadInputDataResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_supernode_action_cascade_service_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UploadInputDataResponse.ProtoReflect.Descriptor instead. +func (*UploadInputDataResponse) Descriptor() ([]byte, []int) { + return file_proto_supernode_action_cascade_service_proto_rawDescGZIP(), []int{1} +} + +func (x *UploadInputDataResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *UploadInputDataResponse) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +var File_proto_supernode_action_cascade_service_proto protoreflect.FileDescriptor + +var file_proto_supernode_action_cascade_service_proto_rawDesc = string([]byte{ + 0x0a, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, + 0x65, 0x2f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, + 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, + 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x16, 0x55, 0x70, 0x6c, 0x6f, + 0x61, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, + 0x0a, 0x09, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x64, + 0x61, 0x74, 0x61, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x64, 0x61, 0x74, 0x61, 0x48, 0x61, 0x73, 0x68, 0x12, 0x13, 0x0a, 0x05, 0x72, 0x71, 0x5f, 0x69, + 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x72, 0x71, 0x49, 0x63, 0x12, 0x15, 0x0a, + 0x06, 0x72, 0x71, 0x5f, 0x6d, 0x61, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x72, + 0x71, 0x4d, 0x61, 0x78, 0x22, 0x4d, 0x0a, 0x17, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x32, 0x66, 0x0a, 0x0e, 0x43, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, + 0x6e, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, + 0x64, 0x65, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x61, + 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x61, 0x73, 0x63, + 0x61, 0x64, 0x65, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x44, + 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x42, 0x5a, 0x40, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, + 0x64, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, + 0x2f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_proto_supernode_action_cascade_service_proto_rawDescOnce sync.Once + file_proto_supernode_action_cascade_service_proto_rawDescData []byte +) + +func file_proto_supernode_action_cascade_service_proto_rawDescGZIP() []byte { + file_proto_supernode_action_cascade_service_proto_rawDescOnce.Do(func() { + file_proto_supernode_action_cascade_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_proto_supernode_action_cascade_service_proto_rawDesc), len(file_proto_supernode_action_cascade_service_proto_rawDesc))) + }) + return file_proto_supernode_action_cascade_service_proto_rawDescData +} + +var file_proto_supernode_action_cascade_service_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_proto_supernode_action_cascade_service_proto_goTypes = []any{ + (*UploadInputDataRequest)(nil), // 0: cascade.UploadInputDataRequest + (*UploadInputDataResponse)(nil), // 1: cascade.UploadInputDataResponse +} +var file_proto_supernode_action_cascade_service_proto_depIdxs = []int32{ + 0, // 0: cascade.CascadeService.UploadInputData:input_type -> cascade.UploadInputDataRequest + 1, // 1: cascade.CascadeService.UploadInputData:output_type -> cascade.UploadInputDataResponse + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_proto_supernode_action_cascade_service_proto_init() } +func file_proto_supernode_action_cascade_service_proto_init() { + if File_proto_supernode_action_cascade_service_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_proto_supernode_action_cascade_service_proto_rawDesc), len(file_proto_supernode_action_cascade_service_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_proto_supernode_action_cascade_service_proto_goTypes, + DependencyIndexes: file_proto_supernode_action_cascade_service_proto_depIdxs, + MessageInfos: file_proto_supernode_action_cascade_service_proto_msgTypes, + }.Build() + File_proto_supernode_action_cascade_service_proto = out.File + file_proto_supernode_action_cascade_service_proto_goTypes = nil + file_proto_supernode_action_cascade_service_proto_depIdxs = nil +} diff --git a/gen/supernode/action/cascade/service_grpc.pb.go b/gen/supernode/action/cascade/service_grpc.pb.go new file mode 100644 index 00000000..f2bd9030 --- /dev/null +++ b/gen/supernode/action/cascade/service_grpc.pb.go @@ -0,0 +1,121 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.29.3 +// source: proto/supernode/action/cascade/service.proto + +package cascade + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + CascadeService_UploadInputData_FullMethodName = "/cascade.CascadeService/UploadInputData" +) + +// CascadeServiceClient is the client API for CascadeService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type CascadeServiceClient interface { + UploadInputData(ctx context.Context, in *UploadInputDataRequest, opts ...grpc.CallOption) (*UploadInputDataResponse, error) +} + +type cascadeServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewCascadeServiceClient(cc grpc.ClientConnInterface) CascadeServiceClient { + return &cascadeServiceClient{cc} +} + +func (c *cascadeServiceClient) UploadInputData(ctx context.Context, in *UploadInputDataRequest, opts ...grpc.CallOption) (*UploadInputDataResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(UploadInputDataResponse) + err := c.cc.Invoke(ctx, CascadeService_UploadInputData_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// CascadeServiceServer is the server API for CascadeService service. +// All implementations must embed UnimplementedCascadeServiceServer +// for forward compatibility. +type CascadeServiceServer interface { + UploadInputData(context.Context, *UploadInputDataRequest) (*UploadInputDataResponse, error) + mustEmbedUnimplementedCascadeServiceServer() +} + +// UnimplementedCascadeServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedCascadeServiceServer struct{} + +func (UnimplementedCascadeServiceServer) UploadInputData(context.Context, *UploadInputDataRequest) (*UploadInputDataResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UploadInputData not implemented") +} +func (UnimplementedCascadeServiceServer) mustEmbedUnimplementedCascadeServiceServer() {} +func (UnimplementedCascadeServiceServer) testEmbeddedByValue() {} + +// UnsafeCascadeServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to CascadeServiceServer will +// result in compilation errors. +type UnsafeCascadeServiceServer interface { + mustEmbedUnimplementedCascadeServiceServer() +} + +func RegisterCascadeServiceServer(s grpc.ServiceRegistrar, srv CascadeServiceServer) { + // If the following call pancis, it indicates UnimplementedCascadeServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&CascadeService_ServiceDesc, srv) +} + +func _CascadeService_UploadInputData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UploadInputDataRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CascadeServiceServer).UploadInputData(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: CascadeService_UploadInputData_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CascadeServiceServer).UploadInputData(ctx, req.(*UploadInputDataRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// CascadeService_ServiceDesc is the grpc.ServiceDesc for CascadeService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var CascadeService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "cascade.CascadeService", + HandlerType: (*CascadeServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "UploadInputData", + Handler: _CascadeService_UploadInputData_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "proto/supernode/action/cascade/service.proto", +} diff --git a/gen/supernode/supernode/cascade_service.pb.go b/gen/supernode/supernode/cascade_service.pb.go new file mode 100644 index 00000000..0b47a6dc --- /dev/null +++ b/gen/supernode/supernode/cascade_service.pb.go @@ -0,0 +1,370 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v5.29.3 +// source: proto/supernode/supernode/cascade_service.proto + +package supernode + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SessionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NodeID string `protobuf:"bytes,1,opt,name=nodeID,proto3" json:"nodeID,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SessionRequest) Reset() { + *x = SessionRequest{} + mi := &file_proto_supernode_supernode_cascade_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SessionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionRequest) ProtoMessage() {} + +func (x *SessionRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_supernode_supernode_cascade_service_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionRequest.ProtoReflect.Descriptor instead. +func (*SessionRequest) Descriptor() ([]byte, []int) { + return file_proto_supernode_supernode_cascade_service_proto_rawDescGZIP(), []int{0} +} + +func (x *SessionRequest) GetNodeID() string { + if x != nil { + return x.NodeID + } + return "" +} + +type SessionReply struct { + state protoimpl.MessageState `protogen:"open.v1"` + SessID string `protobuf:"bytes,1,opt,name=sessID,proto3" json:"sessID,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SessionReply) Reset() { + *x = SessionReply{} + mi := &file_proto_supernode_supernode_cascade_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SessionReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionReply) ProtoMessage() {} + +func (x *SessionReply) ProtoReflect() protoreflect.Message { + mi := &file_proto_supernode_supernode_cascade_service_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionReply.ProtoReflect.Descriptor instead. +func (*SessionReply) Descriptor() ([]byte, []int) { + return file_proto_supernode_supernode_cascade_service_proto_rawDescGZIP(), []int{1} +} + +func (x *SessionReply) GetSessID() string { + if x != nil { + return x.SessID + } + return "" +} + +type SendTicketSignatureRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NodeID string `protobuf:"bytes,1,opt,name=nodeID,proto3" json:"nodeID,omitempty"` + Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + RqFile []byte `protobuf:"bytes,4,opt,name=rqFile,proto3" json:"rqFile,omitempty"` + RqEncodeParams *EncoderParameters `protobuf:"bytes,5,opt,name=rqEncodeParams,proto3" json:"rqEncodeParams,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SendTicketSignatureRequest) Reset() { + *x = SendTicketSignatureRequest{} + mi := &file_proto_supernode_supernode_cascade_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SendTicketSignatureRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendTicketSignatureRequest) ProtoMessage() {} + +func (x *SendTicketSignatureRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_supernode_supernode_cascade_service_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendTicketSignatureRequest.ProtoReflect.Descriptor instead. +func (*SendTicketSignatureRequest) Descriptor() ([]byte, []int) { + return file_proto_supernode_supernode_cascade_service_proto_rawDescGZIP(), []int{2} +} + +func (x *SendTicketSignatureRequest) GetNodeID() string { + if x != nil { + return x.NodeID + } + return "" +} + +func (x *SendTicketSignatureRequest) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +func (x *SendTicketSignatureRequest) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *SendTicketSignatureRequest) GetRqFile() []byte { + if x != nil { + return x.RqFile + } + return nil +} + +func (x *SendTicketSignatureRequest) GetRqEncodeParams() *EncoderParameters { + if x != nil { + return x.RqEncodeParams + } + return nil +} + +type SendTicketSignatureReply struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SendTicketSignatureReply) Reset() { + *x = SendTicketSignatureReply{} + mi := &file_proto_supernode_supernode_cascade_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SendTicketSignatureReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendTicketSignatureReply) ProtoMessage() {} + +func (x *SendTicketSignatureReply) ProtoReflect() protoreflect.Message { + mi := &file_proto_supernode_supernode_cascade_service_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendTicketSignatureReply.ProtoReflect.Descriptor instead. +func (*SendTicketSignatureReply) Descriptor() ([]byte, []int) { + return file_proto_supernode_supernode_cascade_service_proto_rawDescGZIP(), []int{3} +} + +type EncoderParameters struct { + state protoimpl.MessageState `protogen:"open.v1"` + Oti []byte `protobuf:"bytes,1,opt,name=Oti,proto3" json:"Oti,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EncoderParameters) Reset() { + *x = EncoderParameters{} + mi := &file_proto_supernode_supernode_cascade_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EncoderParameters) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EncoderParameters) ProtoMessage() {} + +func (x *EncoderParameters) ProtoReflect() protoreflect.Message { + mi := &file_proto_supernode_supernode_cascade_service_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EncoderParameters.ProtoReflect.Descriptor instead. +func (*EncoderParameters) Descriptor() ([]byte, []int) { + return file_proto_supernode_supernode_cascade_service_proto_rawDescGZIP(), []int{4} +} + +func (x *EncoderParameters) GetOti() []byte { + if x != nil { + return x.Oti + } + return nil +} + +var File_proto_supernode_supernode_cascade_service_proto protoreflect.FileDescriptor + +var file_proto_supernode_supernode_cascade_service_proto_rawDesc = string([]byte{ + 0x0a, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, + 0x65, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x63, 0x61, 0x73, 0x63, + 0x61, 0x64, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x09, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x22, 0x28, 0x0a, 0x0e, + 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, + 0x0a, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x22, 0x26, 0x0a, 0x0c, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x73, 0x73, 0x49, 0x44, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x73, 0x73, 0x49, 0x44, 0x22, 0xc4, + 0x01, 0x0a, 0x1a, 0x53, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, + 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, + 0x6f, 0x64, 0x65, 0x49, 0x44, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x71, 0x46, 0x69, 0x6c, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x71, 0x46, 0x69, 0x6c, 0x65, 0x12, + 0x44, 0x0a, 0x0e, 0x72, 0x71, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x0e, 0x72, 0x71, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x63, + 0x6b, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x22, 0x25, 0x0a, 0x11, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x4f, 0x74, 0x69, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x03, 0x4f, 0x74, 0x69, 0x32, 0xbd, 0x01, 0x0a, 0x0e, 0x43, 0x61, 0x73, + 0x63, 0x61, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x53, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x17, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x28, 0x01, 0x30, 0x01, 0x12, 0x68, + 0x0a, 0x1a, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x54, 0x69, 0x63, + 0x6b, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x25, 0x2e, 0x73, + 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x63, + 0x6b, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2e, + 0x53, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x42, 0x3d, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x75, 0x6d, 0x65, 0x72, 0x61, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, + 0x67, 0x65, 0x6e, 0x2f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x73, 0x75, + 0x70, 0x65, 0x72, 0x6e, 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_proto_supernode_supernode_cascade_service_proto_rawDescOnce sync.Once + file_proto_supernode_supernode_cascade_service_proto_rawDescData []byte +) + +func file_proto_supernode_supernode_cascade_service_proto_rawDescGZIP() []byte { + file_proto_supernode_supernode_cascade_service_proto_rawDescOnce.Do(func() { + file_proto_supernode_supernode_cascade_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_proto_supernode_supernode_cascade_service_proto_rawDesc), len(file_proto_supernode_supernode_cascade_service_proto_rawDesc))) + }) + return file_proto_supernode_supernode_cascade_service_proto_rawDescData +} + +var file_proto_supernode_supernode_cascade_service_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_proto_supernode_supernode_cascade_service_proto_goTypes = []any{ + (*SessionRequest)(nil), // 0: supernode.SessionRequest + (*SessionReply)(nil), // 1: supernode.SessionReply + (*SendTicketSignatureRequest)(nil), // 2: supernode.SendTicketSignatureRequest + (*SendTicketSignatureReply)(nil), // 3: supernode.SendTicketSignatureReply + (*EncoderParameters)(nil), // 4: supernode.EncoderParameters +} +var file_proto_supernode_supernode_cascade_service_proto_depIdxs = []int32{ + 4, // 0: supernode.SendTicketSignatureRequest.rqEncodeParams:type_name -> supernode.EncoderParameters + 0, // 1: supernode.CascadeService.Session:input_type -> supernode.SessionRequest + 2, // 2: supernode.CascadeService.SendCascadeTicketSignature:input_type -> supernode.SendTicketSignatureRequest + 1, // 3: supernode.CascadeService.Session:output_type -> supernode.SessionReply + 3, // 4: supernode.CascadeService.SendCascadeTicketSignature:output_type -> supernode.SendTicketSignatureReply + 3, // [3:5] is the sub-list for method output_type + 1, // [1:3] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_proto_supernode_supernode_cascade_service_proto_init() } +func file_proto_supernode_supernode_cascade_service_proto_init() { + if File_proto_supernode_supernode_cascade_service_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_proto_supernode_supernode_cascade_service_proto_rawDesc), len(file_proto_supernode_supernode_cascade_service_proto_rawDesc)), + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_proto_supernode_supernode_cascade_service_proto_goTypes, + DependencyIndexes: file_proto_supernode_supernode_cascade_service_proto_depIdxs, + MessageInfos: file_proto_supernode_supernode_cascade_service_proto_msgTypes, + }.Build() + File_proto_supernode_supernode_cascade_service_proto = out.File + file_proto_supernode_supernode_cascade_service_proto_goTypes = nil + file_proto_supernode_supernode_cascade_service_proto_depIdxs = nil +} diff --git a/gen/supernode/supernode/cascade_service_grpc.pb.go b/gen/supernode/supernode/cascade_service_grpc.pb.go new file mode 100644 index 00000000..453a3f89 --- /dev/null +++ b/gen/supernode/supernode/cascade_service_grpc.pb.go @@ -0,0 +1,160 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.29.3 +// source: proto/supernode/supernode/cascade_service.proto + +package supernode + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + CascadeService_Session_FullMethodName = "/supernode.CascadeService/Session" + CascadeService_SendCascadeTicketSignature_FullMethodName = "/supernode.CascadeService/SendCascadeTicketSignature" +) + +// CascadeServiceClient is the client API for CascadeService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type CascadeServiceClient interface { + // Session informs primary supernode about its `nodeID` and `sessID` it wants to connect to. + // The stream is used by the parties to inform each other about the cancellation of the task. + Session(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[SessionRequest, SessionReply], error) + // SendSenseTicketSignature send signature from supernodes mn2/mn3 for given reg NFT session id to primary supernode + SendCascadeTicketSignature(ctx context.Context, in *SendTicketSignatureRequest, opts ...grpc.CallOption) (*SendTicketSignatureReply, error) +} + +type cascadeServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewCascadeServiceClient(cc grpc.ClientConnInterface) CascadeServiceClient { + return &cascadeServiceClient{cc} +} + +func (c *cascadeServiceClient) Session(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[SessionRequest, SessionReply], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &CascadeService_ServiceDesc.Streams[0], CascadeService_Session_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[SessionRequest, SessionReply]{ClientStream: stream} + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type CascadeService_SessionClient = grpc.BidiStreamingClient[SessionRequest, SessionReply] + +func (c *cascadeServiceClient) SendCascadeTicketSignature(ctx context.Context, in *SendTicketSignatureRequest, opts ...grpc.CallOption) (*SendTicketSignatureReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SendTicketSignatureReply) + err := c.cc.Invoke(ctx, CascadeService_SendCascadeTicketSignature_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// CascadeServiceServer is the server API for CascadeService service. +// All implementations must embed UnimplementedCascadeServiceServer +// for forward compatibility. +type CascadeServiceServer interface { + // Session informs primary supernode about its `nodeID` and `sessID` it wants to connect to. + // The stream is used by the parties to inform each other about the cancellation of the task. + Session(grpc.BidiStreamingServer[SessionRequest, SessionReply]) error + // SendSenseTicketSignature send signature from supernodes mn2/mn3 for given reg NFT session id to primary supernode + SendCascadeTicketSignature(context.Context, *SendTicketSignatureRequest) (*SendTicketSignatureReply, error) + mustEmbedUnimplementedCascadeServiceServer() +} + +// UnimplementedCascadeServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedCascadeServiceServer struct{} + +func (UnimplementedCascadeServiceServer) Session(grpc.BidiStreamingServer[SessionRequest, SessionReply]) error { + return status.Errorf(codes.Unimplemented, "method Session not implemented") +} +func (UnimplementedCascadeServiceServer) SendCascadeTicketSignature(context.Context, *SendTicketSignatureRequest) (*SendTicketSignatureReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendCascadeTicketSignature not implemented") +} +func (UnimplementedCascadeServiceServer) mustEmbedUnimplementedCascadeServiceServer() {} +func (UnimplementedCascadeServiceServer) testEmbeddedByValue() {} + +// UnsafeCascadeServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to CascadeServiceServer will +// result in compilation errors. +type UnsafeCascadeServiceServer interface { + mustEmbedUnimplementedCascadeServiceServer() +} + +func RegisterCascadeServiceServer(s grpc.ServiceRegistrar, srv CascadeServiceServer) { + // If the following call pancis, it indicates UnimplementedCascadeServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&CascadeService_ServiceDesc, srv) +} + +func _CascadeService_Session_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(CascadeServiceServer).Session(&grpc.GenericServerStream[SessionRequest, SessionReply]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type CascadeService_SessionServer = grpc.BidiStreamingServer[SessionRequest, SessionReply] + +func _CascadeService_SendCascadeTicketSignature_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendTicketSignatureRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CascadeServiceServer).SendCascadeTicketSignature(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: CascadeService_SendCascadeTicketSignature_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CascadeServiceServer).SendCascadeTicketSignature(ctx, req.(*SendTicketSignatureRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// CascadeService_ServiceDesc is the grpc.ServiceDesc for CascadeService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var CascadeService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "supernode.CascadeService", + HandlerType: (*CascadeServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SendCascadeTicketSignature", + Handler: _CascadeService_SendCascadeTicketSignature_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Session", + Handler: _CascadeService_Session_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "proto/supernode/supernode/cascade_service.proto", +} diff --git a/go.mod b/go.mod index f4047d77..b1875190 100644 --- a/go.mod +++ b/go.mod @@ -3,24 +3,28 @@ module github.com/LumeraProtocol/supernode go 1.24.0 require ( - cosmossdk.io/api v0.7.6 - github.com/LumeraProtocol/lumera v0.4.2 + github.com/LumeraProtocol/lumera v0.4.3 github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce + github.com/cenkalti/backoff v2.2.1+incompatible github.com/cenkalti/backoff/v4 v4.3.0 github.com/cosmos/btcutil v1.0.5 github.com/cosmos/cosmos-sdk v0.50.12 github.com/cosmos/go-bip39 v1.0.0 github.com/cosmos/gogoproto v1.7.0 + github.com/disintegration/imaging v1.6.2 github.com/go-errors/errors v1.5.1 github.com/golang/mock v1.6.0 github.com/google/uuid v1.6.0 github.com/jmoiron/sqlx v1.4.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.11 + github.com/kolesa-team/go-webp v1.0.4 github.com/mattn/go-sqlite3 v1.14.24 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.9.3 + github.com/spf13/viper v1.19.0 + github.com/spf13/cobra v1.8.1 github.com/stretchr/testify v1.10.0 github.com/x-cray/logrus-prefixed-formatter v0.5.2 go.uber.org/ratelimit v0.3.1 @@ -30,9 +34,11 @@ require ( google.golang.org/grpc v1.70.0 google.golang.org/protobuf v1.36.5 gopkg.in/natefinch/lumberjack.v2 v2.2.1 + gopkg.in/yaml.v3 v3.0.1 ) require ( + cosmossdk.io/api v0.7.6 // indirect cosmossdk.io/collections v0.4.0 // indirect cosmossdk.io/core v0.11.1 // indirect cosmossdk.io/depinject v1.1.0 // indirect @@ -143,9 +149,7 @@ require ( github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.7.1 // indirect - github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/spf13/viper v1.19.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect @@ -156,6 +160,7 @@ require ( go.etcd.io/bbolt v1.3.10 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect + golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d // indirect golang.org/x/net v0.35.0 // indirect golang.org/x/term v0.29.0 // indirect golang.org/x/text v0.22.0 // indirect @@ -163,7 +168,6 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a // indirect gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.1 // indirect lukechampine.com/uint128 v1.3.0 // indirect nhooyr.io/websocket v1.8.6 // indirect diff --git a/go.sum b/go.sum index 7b8e02be..ea8281cd 100644 --- a/go.sum +++ b/go.sum @@ -61,8 +61,8 @@ github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3 github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/LumeraProtocol/lumera v0.4.2 h1:yW7mwoYiBCcFLFNs9AgmaLc0DVkir95NGFtR2j/VYsw= -github.com/LumeraProtocol/lumera v0.4.2/go.mod h1:MRqVY+f8edEBkDvpr4z2nJpglp3Qj1OUvjeWvrvIUSM= +github.com/LumeraProtocol/lumera v0.4.3 h1:q/FuT+JOLIpYdlunczRUr6K85r9Sn0lKvGltSrj4r6s= +github.com/LumeraProtocol/lumera v0.4.3/go.mod h1:MRqVY+f8edEBkDvpr4z2nJpglp3Qj1OUvjeWvrvIUSM= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= @@ -123,6 +123,7 @@ github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46f github.com/bufbuild/protocompile v0.14.0 h1:z3DW4IvXE5G/uTOnSQn+qwQQxvhckkTWLS/0No/o7KU= github.com/bufbuild/protocompile v0.14.0/go.mod h1:N6J1NYzkspJo3ZwyL4Xjvli86XOj1xq4qAasUFxGups= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= @@ -229,6 +230,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c= +github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -515,6 +518,8 @@ github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/kolesa-team/go-webp v1.0.4 h1:wQvU4PLG/X7RS0vAeyhiivhLRoxfLVRlDq4I3frdxIQ= +github.com/kolesa-team/go-webp v1.0.4/go.mod h1:oMvdivD6K+Q5qIIkVC2w4k2ZUnI1H+MyP7inwgWq9aA= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -872,6 +877,9 @@ golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0J golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d h1:RNPAfi2nHY7C2srAV8A49jpsYr0ADedCk1wq6fTMTvs= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= diff --git a/p2p/DEVDOCS.md b/p2p/DEVDOCS.md new file mode 100644 index 00000000..1e3bf608 --- /dev/null +++ b/p2p/DEVDOCS.md @@ -0,0 +1,135 @@ +# Lumera P2P Service + +A Kademlia-based distributed hash table (DHT) implementation that provides decentralized storage and retrieval capabilities for the Lumera network. + +## Overview + +The P2P service enables supernodes to: +- Store and retrieve data in a distributed network +- Auto-discover other nodes via the Lumera blockchain +- Securely communicate using ALTS (Application Layer Transport Security) +- Replicate data across the network for redundancy + +## Architecture + +``` +┌─────────────┐ ┌─────────────┐ ┌─────────────┐ +│ P2P API │────▶│ DHT │────▶│ Network │ +└─────────────┘ └─────────────┘ └─────────────┘ + │ │ │ + │ │ │ + ▼ ▼ ▼ +┌─────────────┐ ┌─────────────┐ ┌─────────────┐ +│ Local Store │ │ Hash Table │ │ Conn Pool │ +└─────────────┘ └─────────────┘ └─────────────┘ +``` + +- **P2P API**: Public interface for store/retrieve operations +- **DHT**: Core DHT implementation using Kademlia algorithm +- **Network**: Handles peer connections, messaging, and encryption +- **Local Store**: SQLite database for persistent storage +- **Hash Table**: Manages routing table of known peers +- **Conn Pool**: Manages network connections to peers + +## Configuration + +Key configuration parameters in the YAML config: + +```yaml +p2p: + listen_address: "0.0.0.0" # Network interface to listen on + port: 4445 # Port for P2P communication + data_dir: "~/.lumera/p2p" # Directory for DHT data storage + bootstrap_nodes: "" # Optional comma-separated list of bootstrap nodes + external_ip: "" # Optional override for auto-detected external IP +``` + +### Configuration Field Details + +| Field | Description | Default | Required | +|-------|-------------|---------|----------| +| `listen_address` | Network interface to bind to | `0.0.0.0` | Yes | +| `port` | Port to listen for P2P connections | `4445` | Yes | +| `data_dir` | Storage directory for P2P data | N/A | Yes | +| `bootstrap_nodes` | Format: `identity@host:port,identity2@host2:port2` | Auto-fetched from blockchain | No | +| `external_ip` | Public IP address for the node | Auto-detected | No | + +The **Node ID** is derived from the Lumera account in your keyring specified by the `key_name` in the supernode config. + +## Usage Example + +Initializing the P2P service: + +```go +// Create P2P configuration +p2pConfig := &p2p.Config{ + ListenAddress: "0.0.0.0", + Port: 4445, + DataDir: "/path/to/data", + ID: supernodeAddress, // Lumera account address +} + +// Initialize P2P service +p2pService, err := p2p.New(ctx, p2pConfig, lumeraClient, keyring, rqStore, nil, nil) +if err != nil { + return err +} + +// Start P2P service +if err := p2pService.Run(ctx); err != nil { + return err +} +``` + +Storing and retrieving data: + +```go +// Store data - returns base58-encoded key +key, err := p2pService.Store(ctx, []byte("Hello, world!"), 0) +if err != nil { + return err +} + +// Retrieve data +data, err := p2pService.Retrieve(ctx, key) +if err != nil { + return err +} +``` + +## Key Components + +### Keyring Integration + +The P2P service uses the Cosmos SDK keyring for: +- Secure node identity (derived from your Lumera account) +- Cryptographic signatures for secure communication +- Authentication between peers + +### Bootstrap Process + +When a node starts: +1. It checks for configured bootstrap nodes +2. If none provided, queries the Lumera blockchain for active supernodes +3. Connects to bootstrap nodes and performs iterative `FIND_NODE` queries +4. Builds its routing table based on responses +5. Becomes a full participant in the network + +### Data Replication + +Data stored in the network is: +1. Stored locally in SQLite +2. Replicated to the closest `Alpha` (6) nodes in the DHT +3. Periodically checked and re-replicated as nodes come and go + +## Troubleshooting + +- **Can't connect to network**: Verify your `external_ip` is correct or remove it to use auto-detection +- **Bootstrap fails**: Ensure the Lumera client is connected or specify manual bootstrap nodes +- **Storage issues**: Check `data_dir` path and permissions + +## Development Notes + +- Use `localOnly: true` with `Retrieve()` to only check local storage +- DHT operations use a modified Kademlia with `Alpha=6` for parallelism +- Key format is base58-encoded SHA-256 hash of the data \ No newline at end of file diff --git a/p2p/kademlia/bootstrap.go b/p2p/kademlia/bootstrap.go index 96330598..06d97e52 100644 --- a/p2p/kademlia/bootstrap.go +++ b/p2p/kademlia/bootstrap.go @@ -11,7 +11,6 @@ import ( "github.com/LumeraProtocol/supernode/pkg/errors" "github.com/LumeraProtocol/supernode/pkg/log" - "github.com/LumeraProtocol/supernode/pkg/lumera" ltc "github.com/LumeraProtocol/supernode/pkg/net/credentials" ) @@ -77,7 +76,7 @@ func (s *DHT) setBootstrapNodesFromConfigVar(ctx context.Context, bootstrapNodes } nodes = append(nodes, &Node{ - ID: []byte(lumeraAddress.Identity), + ID: []byte(lumeraAddress.Identity), IP: lumeraAddress.Host, Port: lumeraAddress.Port, }) @@ -88,6 +87,7 @@ func (s *DHT) setBootstrapNodesFromConfigVar(ctx context.Context, bootstrapNodes return nil } +// ConfigureBootstrapNodes connects with lumera client & gets p2p boostrap ip & port // ConfigureBootstrapNodes connects with lumera client & gets p2p boostrap ip & port func (s *DHT) ConfigureBootstrapNodes(ctx context.Context, bootstrapNodes string) error { if bootstrapNodes != "" { @@ -100,53 +100,77 @@ func (s *DHT) ConfigureBootstrapNodes(ctx context.Context, bootstrapNodes string } selfAddress = fmt.Sprintf("%s:%d", selfAddress, s.options.Port) - get := func(ctx context.Context, f func(context.Context) (lumera.SuperNodeAddressInfos, error)) ([]*Node, error) { - mns, err := f(ctx) + var boostrapNodes []*Node + + if s.options.LumeraClient != nil { + // Get the latest block to determine height + latestBlockResp, err := s.options.LumeraClient.Node().GetLatestBlock(ctx) + if err != nil { + return fmt.Errorf("failed to get latest block: %w", err) + } + + // Get the block height + blockHeight := uint64(latestBlockResp.SdkBlock.Header.Height) + + // Get top supernodes for this block + supernodeResp, err := s.options.LumeraClient.SuperNode().GetTopSuperNodesForBlock(ctx, blockHeight) if err != nil { - return []*Node{}, err + return fmt.Errorf("failed to get top supernodes: %w", err) } mapNodes := map[string]*Node{} - for _, mn := range mns { - node, err := s.parseNode(mn.ExtP2P, selfAddress) + + for _, supernode := range supernodeResp.Supernodes { + // Find the latest IP address (with highest block height) + var latestIP string + var maxHeight int64 = -1 + + for _, ipHistory := range supernode.PrevIpAddresses { + if ipHistory.Height > maxHeight { + maxHeight = ipHistory.Height + latestIP = ipHistory.Address + } + } + + if latestIP == "" { + log.P2P().WithContext(ctx). + WithField("supernode", supernode.SupernodeAccount). + Warn("No valid IP address found for supernode") + continue + } + + // Parse the node from the IP address + node, err := s.parseNode(latestIP, selfAddress) if err != nil { - log.P2P().WithContext(ctx).WithError(err).WithField("extP2P", mn.ExtP2P).Warn("Skip Bad Boostrap Address") + log.P2P().WithContext(ctx).WithError(err). + WithField("address", latestIP). + WithField("supernode", supernode.SupernodeAccount). + Warn("Skip Bad Bootstrap Address") continue } - mapNodes[mn.ExtP2P] = node + // Store the supernode account as the node ID + node.ID = []byte(supernode.SupernodeAccount) + mapNodes[latestIP] = node } - nodes := []*Node{} + // Convert the map to a slice for _, node := range mapNodes { - nodes = append(nodes, node) + boostrapNodes = append(boostrapNodes, node) } - - return nodes, nil } - var boostrapNodes []*Node - if s.options.LumeraNetwork != nil { - boostrapNodes, err := get(ctx, s.options.LumeraNetwork.MasterNodesExtra) - if err != nil { - return fmt.Errorf("masternodesTop failed: %s", err) - } else if len(boostrapNodes) == 0 { - boostrapNodes, err = get(ctx, s.options.LumeraNetwork.MasterNodesTop) - if err != nil { - return fmt.Errorf("masternodesExtra failed: %s", err) - } else if len(boostrapNodes) == 0 { - log.P2P().WithContext(ctx).Error("unable to fetch bootstrap ip. Missing extP2P") - - return nil - } - } + if len(boostrapNodes) == 0 { + log.P2P().WithContext(ctx).Error("unable to fetch bootstrap IP addresses. No valid supernodes found.") + return nil } for _, node := range boostrapNodes { log.P2P().WithContext(ctx).WithFields(log.Fields{ "bootstap_ip": node.IP, "bootstrap_port": node.Port, - }).Info("adding p2p bootstap node") + "node_id": string(node.ID), + }).Info("adding p2p bootstrap node") } s.options.BootstrapNodes = append(s.options.BootstrapNodes, boostrapNodes...) diff --git a/p2p/kademlia/dht.go b/p2p/kademlia/dht.go index 0b0a138b..96ff5083 100644 --- a/p2p/kademlia/dht.go +++ b/p2p/kademlia/dht.go @@ -12,33 +12,34 @@ import ( "github.com/btcsuite/btcutil/base58" "github.com/cenkalti/backoff/v4" + "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/LumeraProtocol/lumera/x/lumeraid/securekeyx" "github.com/LumeraProtocol/supernode/pkg/errors" "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/lumera" ltc "github.com/LumeraProtocol/supernode/pkg/net/credentials" "github.com/LumeraProtocol/supernode/pkg/storage" "github.com/LumeraProtocol/supernode/pkg/storage/memory" "github.com/LumeraProtocol/supernode/pkg/storage/rqstore" "github.com/LumeraProtocol/supernode/pkg/utils" - "github.com/LumeraProtocol/supernode/pkg/lumera" ) const ( - defaultNetworkPort uint16 = 4445 - defaultNetworkAddr = "0.0.0.0" - defaultRefreshTime = time.Second * 3600 - defaultPingTime = time.Second * 10 - defaultCleanupInterval = time.Minute * 2 - defaultDisabledKeyExpirationInterval = time.Minute * 30 - defaultRedundantDataCleanupInterval = 12 * time.Hour - defaultDeleteDataInterval = 11 * time.Hour - delKeysCountThreshold = 10 - lowSpaceThreshold = 50 // GB - batchStoreSize = 2500 - storeSameSymbolsBatchConcurrency = 1 - storeSymbolsBatchConcurrency = 2.0 - minimumDataStoreSuccessRate = 75.0 + defaultNetworkPort uint16 = 4445 + defaultNetworkAddr = "0.0.0.0" + defaultRefreshTime = time.Second * 3600 + defaultPingTime = time.Second * 10 + defaultCleanupInterval = time.Minute * 2 + defaultDisabledKeyExpirationInterval = time.Minute * 30 + defaultRedundantDataCleanupInterval = 12 * time.Hour + defaultDeleteDataInterval = 11 * time.Hour + delKeysCountThreshold = 10 + lowSpaceThreshold = 50 // GB + batchStoreSize = 2500 + storeSameSymbolsBatchConcurrency = 1 + storeSymbolsBatchConcurrency = 2.0 + minimumDataStoreSuccessRate = 75.0 maxIterations = 4 ) @@ -74,13 +75,16 @@ type Options struct { // node there is no way to connect to the network BootstrapNodes []*Node - LumeraClient *lumera.Client + // Lumera client for interacting with the blockchain + LumeraClient lumera.Client - LumeraNetwork *lumera.LumeraNetwork + // Keyring for credentials + Keyring keyring.Keyring ExternalIP string } +// NewDHT returns a new DHT node // NewDHT returns a new DHT node func NewDHT(ctx context.Context, store Store, metaStore MetaStore, options *Options, rqstore rqstore.Store) (*DHT, error) { // validate the options, if it's invalid, set them to default value @@ -107,13 +111,15 @@ func NewDHT(ctx context.Context, store Store, metaStore MetaStore, options *Opti s.externalIP = options.ExternalIP } - kr := options.LumeraClient.GetKeyring() - if kr == nil { - return nil, fmt.Errorf("keyring is not initialized in lumera client context") + // Check that keyring is provided + if options.Keyring == nil { + return nil, fmt.Errorf("keyring is required but not provided") } + + // Initialize client credentials with the provided keyring clientCreds, err := ltc.NewClientCreds(<c.ClientOptions{ CommonOptions: ltc.CommonOptions{ - Keyring: kr, + Keyring: options.Keyring, LocalIdentity: string(options.ID), PeerType: securekeyx.Supernode, }, diff --git a/p2p/p2p.go b/p2p/p2p.go index 43392eed..dcb99d02 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -6,17 +6,17 @@ import ( "fmt" "time" + "github.com/LumeraProtocol/supernode/p2p/kademlia" "github.com/LumeraProtocol/supernode/p2p/kademlia/store/cloud.go" "github.com/LumeraProtocol/supernode/p2p/kademlia/store/meta" - + "github.com/LumeraProtocol/supernode/p2p/kademlia/store/sqlite" "github.com/LumeraProtocol/supernode/pkg/errors" "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/lumera" "github.com/LumeraProtocol/supernode/pkg/storage/rqstore" "github.com/LumeraProtocol/supernode/pkg/utils" - "github.com/LumeraProtocol/supernode/p2p/kademlia" - "github.com/LumeraProtocol/supernode/p2p/kademlia/store/sqlite" - "github.com/LumeraProtocol/supernode/pkg/lumera" "github.com/btcsuite/btcutil/base58" + "github.com/cosmos/cosmos-sdk/crypto/keyring" ) const ( @@ -45,7 +45,8 @@ type p2p struct { dht *kademlia.DHT // the kademlia network config *Config // the service configuration running bool // if the kademlia network is ready - lumeraClient *lumera.Client + lumeraClient lumera.Client + keyring keyring.Keyring // Add the keyring field rqstore rqstore.Store } @@ -231,7 +232,8 @@ func (s *p2p) NClosestNodesWithIncludingNodeList(ctx context.Context, n int, key func (s *p2p) configure(ctx context.Context) error { // new the queries storage kadOpts := &kademlia.Options{ - LumeraClient: s.lumeraClient, + LumeraClient: s.lumeraClient, + Keyring: s.keyring, // Pass the keyring BootstrapNodes: []*kademlia.Node{}, IP: s.config.ListenAddress, Port: s.config.Port, @@ -259,7 +261,7 @@ func (s *p2p) configure(ctx context.Context) error { } // New returns a new p2p instance. -func New(ctx context.Context, config *Config, lumeraClient *lumera.Client, rqstore rqstore.Store, cloud cloud.Storage, mst *sqlite.MigrationMetaStore) (P2P, error) { +func New(ctx context.Context, config *Config, lumeraClient lumera.Client, kr keyring.Keyring, rqstore rqstore.Store, cloud cloud.Storage, mst *sqlite.MigrationMetaStore) (P2P, error) { store, err := sqlite.NewStore(ctx, config.DataDir, cloud, mst) if err != nil { return nil, errors.Errorf("new kademlia store: %w", err) @@ -275,6 +277,7 @@ func New(ctx context.Context, config *Config, lumeraClient *lumera.Client, rqsto metaStore: meta, config: config, lumeraClient: lumeraClient, + keyring: kr, // Store the keyring rqstore: rqstore, }, nil } diff --git a/pkg/common/blocktracker/block_tracker.go b/pkg/common/blocktracker/block_tracker.go new file mode 100644 index 00000000..13a5c5fd --- /dev/null +++ b/pkg/common/blocktracker/block_tracker.go @@ -0,0 +1,121 @@ +package blocktracker + +import ( + "context" + "sync" + "time" + + "github.com/LumeraProtocol/supernode/pkg/errors" +) + +const ( + defaultRetries = 3 + defaultDelayDurationBetweenRetries = 5 * time.Second + defaultRPCConnectTimeout = 15 * time.Second + // Update duration in case last update was success + defaultSuccessUpdateDuration = 10 * time.Second + // Update duration in case last update was failed - prevent too much call to Lumera + defaultFailedUpdateDuration = 5 * time.Second + defaultNextBlockTimeout = 30 * time.Minute +) + +// LumeraClient defines interface functions BlockCntTracker expects from Lumera +type LumeraClient interface { + // GetBlockCount returns block height of blockchain + GetBlockCount(ctx context.Context) (int32, error) +} + +// BlockCntTracker defines a block tracker - that will keep current block height +type BlockCntTracker struct { + mtx sync.Mutex + LumeraClient LumeraClient + curBlockCnt int32 + lastSuccess time.Time + lastRetried time.Time + lastErr error + delayBetweenRetries time.Duration + retries int +} + +// New returns an instance of BlockCntTracker +func New(LumeraClient LumeraClient) *BlockCntTracker { + return &BlockCntTracker{ + LumeraClient: LumeraClient, + curBlockCnt: 0, + delayBetweenRetries: defaultDelayDurationBetweenRetries, + retries: defaultRetries, + } +} + +func (tracker *BlockCntTracker) refreshBlockCount(retries int) { + tracker.lastRetried = time.Now().UTC() + for i := 0; i < retries; i = i + 1 { + ctx, cancel := context.WithTimeout(context.Background(), defaultRPCConnectTimeout) + blockCnt, err := tracker.LumeraClient.GetBlockCount(ctx) + if err == nil { + tracker.curBlockCnt = blockCnt + tracker.lastSuccess = time.Now().UTC() + cancel() + tracker.lastErr = nil + return + } + cancel() + + tracker.lastErr = err + // delay between retries + time.Sleep(tracker.delayBetweenRetries) + } + +} + +// GetBlockCount return current block count +// it will get from cache if last refresh is small than defaultSuccessUpdateDuration +// or will refresh it by call from Lumera daemon to get the latest one if defaultSuccessUpdateDuration expired +func (tracker *BlockCntTracker) GetBlockCount() (int32, error) { + tracker.mtx.Lock() + defer tracker.mtx.Unlock() + + shouldRefresh := false + + if tracker.lastSuccess.After(tracker.lastRetried) { + if time.Now().UTC().After(tracker.lastSuccess.Add(defaultSuccessUpdateDuration)) { + shouldRefresh = true + } + } else { + // prevent update too much + if time.Now().UTC().After(tracker.lastRetried.Add(defaultFailedUpdateDuration)) { + shouldRefresh = true + } + } + + if shouldRefresh { + tracker.refreshBlockCount(tracker.retries) + } + + if tracker.curBlockCnt == 0 { + return 0, errors.Errorf("failed to get blockcount: %w", tracker.lastErr) + } + + return tracker.curBlockCnt, nil +} + +// WaitTillNextBlock will wait until next block height is greater than blockCnt +func (tracker *BlockCntTracker) WaitTillNextBlock(ctx context.Context, blockCnt int32) error { + for { + select { + case <-ctx.Done(): + return errors.Errorf("context done: %w", ctx.Err()) + case <-time.After(defaultNextBlockTimeout): + return errors.Errorf("timeout waiting for next block") + case <-time.After(defaultSuccessUpdateDuration): + curBlockCnt, err := tracker.GetBlockCount() + if err != nil { + return errors.Errorf("failed to get blockcount: %w", err) + } + + if curBlockCnt > blockCnt { + return nil + } + } + } +} diff --git a/pkg/common/blocktracker/block_tracker_test.go b/pkg/common/blocktracker/block_tracker_test.go new file mode 100644 index 00000000..b070a4b7 --- /dev/null +++ b/pkg/common/blocktracker/block_tracker_test.go @@ -0,0 +1,97 @@ +package blocktracker + +import ( + "context" + "errors" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +type fakePastelClient struct { + retBlockCnt int32 + retErr error +} + +func (fake *fakePastelClient) GetBlockCount(_ context.Context) (int32, error) { + return fake.retBlockCnt, fake.retErr +} + +func TestGetCountFirstTime(t *testing.T) { + tests := []struct { + name string + pastelClient *fakePastelClient + expectErr bool + }{ + { + name: "success", + pastelClient: &fakePastelClient{ + retBlockCnt: 10, + retErr: nil, + }, + expectErr: false, + }, + { + name: "fail", + pastelClient: &fakePastelClient{ + retBlockCnt: 0, + retErr: errors.New("error"), + }, + expectErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tracker := New(tt.pastelClient) + tracker.retries = 1 + blkCnt, err := tracker.GetBlockCount() + assert.Equal(t, tt.pastelClient.retBlockCnt, blkCnt) + if tt.expectErr { + assert.True(t, strings.Contains(err.Error(), tt.pastelClient.retErr.Error())) + } else { + assert.Nil(t, err) + } + }) + } +} + +func TestGetBlockCountNoRefresh(t *testing.T) { + pastelClient := &fakePastelClient{ + retBlockCnt: 10, + retErr: errors.New("error"), + } + + expectedBlk := int32(1) + tracker := New(pastelClient) + tracker.retries = 1 + tracker.curBlockCnt = expectedBlk + tracker.lastRetried = time.Now().UTC() + tracker.lastSuccess = time.Now().UTC() + + blkCnt, err := tracker.GetBlockCount() + assert.Equal(t, expectedBlk, blkCnt) + + assert.Nil(t, err) +} + +func TestGetBlockCountRefresh(t *testing.T) { + expectedBlk := int32(10) + pastelClient := &fakePastelClient{ + retBlockCnt: expectedBlk, + retErr: nil, + } + + tracker := New(pastelClient) + tracker.retries = 1 + tracker.curBlockCnt = 1 + tracker.lastRetried = time.Now().UTC().Add(-defaultSuccessUpdateDuration) + tracker.lastSuccess = time.Now().UTC().Add(-defaultSuccessUpdateDuration) + + blkCnt, err := tracker.GetBlockCount() + assert.Equal(t, expectedBlk, blkCnt) + + assert.Nil(t, err) +} diff --git a/pkg/common/task/action.go b/pkg/common/task/action.go new file mode 100644 index 00000000..227ebe35 --- /dev/null +++ b/pkg/common/task/action.go @@ -0,0 +1,20 @@ +package task + +import "context" + +// ActionFn represents a function that is run inside a goroutine. +type ActionFn func(ctx context.Context) error + +// Action represents the action of the task. +type Action struct { + fn ActionFn + doneCh chan struct{} +} + +// NewAction returns a new Action instance. +func NewAction(fn ActionFn) *Action { + return &Action{ + fn: fn, + doneCh: make(chan struct{}), + } +} diff --git a/pkg/common/task/state/state.go b/pkg/common/task/state/state.go new file mode 100644 index 00000000..e1eed5fd --- /dev/null +++ b/pkg/common/task/state/state.go @@ -0,0 +1,174 @@ +//go:generate mockery --name=State + +package state + +import ( + "context" + "sync" + "time" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/storage/queries" + "github.com/LumeraProtocol/supernode/pkg/types" +) + +// State represents a state of the task. +type State interface { + // Status returns the current status. + Status() *Status + + // SetStatusNotifyFunc sets a function to be called after the state is updated. + SetStatusNotifyFunc(fn func(status *Status)) + + // RequiredStatus returns an error if the current status doen't match the given one. + RequiredStatus(subStatus SubStatus) error + + // StatusHistory returns all history from the very beginning. + StatusHistory() []*Status + + // UpdateStatus updates the status of the state by creating a new status with the given `status`. + UpdateStatus(subStatus SubStatus) + + // SubscribeStatus returns a new subscription of the state. + SubscribeStatus() func() <-chan *Status + + //SetStateLog set the wallet node task status log to the state status log + SetStateLog(statusLog types.Fields) + + //InitialiseHistoryDB sets the connection to historyDB + InitialiseHistoryDB(store queries.LocalStoreInterface) +} + +type state struct { + status *Status + history []*Status + + notifyFn func(status *Status) + sync.RWMutex + subsCh []chan *Status + taskID string + statusLog types.Fields + historyDBStore queries.LocalStoreInterface +} + +// Status implements State.Status() +func (state *state) Status() *Status { + return state.status +} + +// SetStatusNotifyFunc implements State.SetStatusNotifyFunc() +func (state *state) SetStatusNotifyFunc(fn func(status *Status)) { + state.notifyFn = fn +} + +// RequiredStatus implements State.RequiredStatus() +func (state *state) RequiredStatus(subStatus SubStatus) error { + if state.status.Is(subStatus) { + return nil + } + return errors.Errorf("required status %q, current %q", subStatus, state.status) +} + +// StatusHistory implements State.StatusHistory() +func (state *state) StatusHistory() []*Status { + state.RLock() + defer state.RUnlock() + + return append(state.history, state.status) +} + +// UpdateStatus implements State.UpdateStatus() +func (state *state) UpdateStatus(subStatus SubStatus) { + state.Lock() + defer state.Unlock() + + status := NewStatus(subStatus) + state.history = append(state.history, state.status) + state.status = status + + history := types.TaskHistory{CreatedAt: time.Now().UTC(), TaskID: state.taskID, Status: status.String()} + if state.statusLog.IsValid() { + history.Details = types.NewDetails(status.String(), state.statusLog) + } + + if state.historyDBStore != nil { + if _, err := state.historyDBStore.InsertTaskHistory(history); err != nil { + log.WithError(err).Error("unable to store task status") + } + } else { + store, err := queries.OpenHistoryDB() + if err != nil { + log.WithError(err).Error("error opening history db") + } + + if store != nil { + defer store.CloseHistoryDB(context.Background()) + if _, err := store.InsertTaskHistory(history); err != nil { + log.WithError(err).Error("unable to store task status") + } + } + } + + if state.notifyFn != nil { + state.notifyFn(status) + } + + for _, subCh := range state.subsCh { + subCh := subCh + go func() { + subCh <- status + }() + } +} + +// SubscribeStatus implements State.SubscribeStatus() +func (state *state) SubscribeStatus() func() <-chan *Status { + state.RLock() + defer state.RUnlock() + + subCh := make(chan *Status) + state.subsCh = append(state.subsCh, subCh) + + for _, status := range append(state.history, state.status) { + status := status + go func() { + subCh <- status + }() + } + + sub := func() <-chan *Status { + return subCh + } + return sub +} + +func (state *state) SetStateLog(statusLog types.Fields) { + state.statusLog = statusLog +} + +func (state *state) InitialiseHistoryDB(storeInterface queries.LocalStoreInterface) { + state.historyDBStore = storeInterface +} + +// New returns a new state instance. +func New(subStatus SubStatus, taskID string) State { + store, err := queries.OpenHistoryDB() + if err != nil { + log.WithError(err).Error("error opening history db") + } + + if store != nil { + defer store.CloseHistoryDB(context.Background()) + + if _, err := store.InsertTaskHistory(types.TaskHistory{CreatedAt: time.Now().UTC(), TaskID: taskID, + Status: subStatus.String()}); err != nil { + log.WithError(err).Error("unable to store task status") + } + } + + return &state{ + status: NewStatus(subStatus), + taskID: taskID, + } +} diff --git a/pkg/common/task/state/status.go b/pkg/common/task/state/status.go new file mode 100644 index 00000000..b1b00da6 --- /dev/null +++ b/pkg/common/task/state/status.go @@ -0,0 +1,34 @@ +//go:generate mockery --name=SubStatus + +package state + +import ( + "fmt" + "time" +) + +// SubStatus represents a sub-status that contains a description of the status. +type SubStatus interface { + fmt.Stringer + IsFinal() bool + IsFailure() bool +} + +// Status represents a state of the task. +type Status struct { + CreatedAt time.Time + SubStatus +} + +// Is returns true if the current `Status` matches to the given `statuses`. +func (status *Status) Is(subStatus SubStatus) bool { + return status.SubStatus == subStatus +} + +// NewStatus returns a new Status instance. +func NewStatus(subStatus SubStatus) *Status { + return &Status{ + CreatedAt: time.Now().UTC(), + SubStatus: subStatus, + } +} diff --git a/pkg/common/task/task.go b/pkg/common/task/task.go new file mode 100644 index 00000000..88a64add --- /dev/null +++ b/pkg/common/task/task.go @@ -0,0 +1,143 @@ +//go:generate mockery --name=Task + +package task + +import ( + "context" + "sync" + + "github.com/LumeraProtocol/supernode/pkg/common/task/state" + "github.com/LumeraProtocol/supernode/pkg/errgroup" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/random" +) + +// Task represent a worker task. +type Task interface { + state.State + + // ID returns id of the task. + ID() string + + // Run starts the task. + Run(ctx context.Context) error + + // Cancel tells a task to abandon its work. + // Cancel may be called by multiple goroutines simultaneously. + // After the first call, subsequent calls to a Cancel do nothing. + Cancel() + + // Done returns a channel when the task is canceled. + Done() <-chan struct{} + + // RunAction waits for new actions, starts handling each of them in a new goroutine. + RunAction(ctx context.Context) error + + // NewAction creates a new action and passes for the execution. + // It is used when it is necessary to run an action in the context of `Tasks` rather than the one who was called. + NewAction(fn ActionFn) <-chan struct{} + + // CloseActionCh closes action ch + CloseActionCh() +} + +type task struct { + state.State + + id string + + actionCh chan *Action + + doneMu sync.Mutex + doneCh chan struct{} + closeOnce sync.Once +} + +// ID implements Task.ID +func (task *task) ID() string { + return task.id +} + +// Run implements Task.Run +func (task *task) Run(_ context.Context) error { + return errors.New("task default run func not implemented") +} + +// Cancel implements Task.Cancel +func (task *task) Cancel() { + task.doneMu.Lock() + defer task.doneMu.Unlock() + + select { + case <-task.Done(): + log.Debugf("task %s cancelled", task.ID()) + return + default: + close(task.doneCh) + } +} + +// Done implements Task.Done +func (task *task) Done() <-chan struct{} { + return task.doneCh +} + +// RunAction implements Task.RunAction +func (task *task) RunAction(ctx context.Context) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + group, ctx := errgroup.WithContext(ctx) + for { + select { + case <-ctx.Done(): + log.WithContext(ctx).WithField("task", task.ID()).Info("context done") + case <-task.Done(): + log.WithContext(ctx).Infof("task %s done", task.ID()) + cancel() + case action, ok := <-task.actionCh: + if !ok { + log.WithContext(ctx).Info("action channel closed") + return group.Wait() + } + + currAction := action + group.Go(func() error { + defer close(currAction.doneCh) + + return currAction.fn(ctx) + }) + continue + } + break + } + + return group.Wait() +} + +// CloseActionCh safely closes the action channel +func (task *task) CloseActionCh() { + task.closeOnce.Do(func() { + close(task.actionCh) + }) +} + +// NewAction implements Task.NewAction +func (task *task) NewAction(fn ActionFn) <-chan struct{} { + act := NewAction(fn) + task.actionCh <- act + return act.doneCh +} + +// New returns a new task instance. +func New(status state.SubStatus) Task { + taskID, _ := random.String(8, random.Base62Chars) + + return &task{ + State: state.New(status, taskID), + id: taskID, + doneCh: make(chan struct{}), + actionCh: make(chan *Action), + } +} diff --git a/pkg/common/task/ticket.go b/pkg/common/task/ticket.go new file mode 100644 index 00000000..561b8f0b --- /dev/null +++ b/pkg/common/task/ticket.go @@ -0,0 +1,13 @@ +package task + +type CascadeTicket struct { + Creator string `json:"creator"` + CreatorSignature []byte `json:"creator_signature"` + DataHash string `json:"data_hash"` + ActionID string `json:"action_id"` + BlockHeight int64 `json:"block_height"` + BlockHash []byte `json:"block_hash"` + RQIDsIC uint32 `json:"rqids_ic"` + RQIDsMax int32 `json:"rqids_max"` + RQIDs []string `json:"rq_ids"` +} diff --git a/pkg/common/task/worker.go b/pkg/common/task/worker.go new file mode 100644 index 00000000..724d74c5 --- /dev/null +++ b/pkg/common/task/worker.go @@ -0,0 +1,90 @@ +package task + +import ( + "context" + "sync" + + "github.com/LumeraProtocol/supernode/pkg/errgroup" + "github.com/LumeraProtocol/supernode/pkg/log" +) + +// Worker represents a pool of the task. +type Worker struct { + sync.Mutex + + tasks []Task + taskCh chan Task +} + +// Tasks returns all tasks. +func (worker *Worker) Tasks() []Task { + return worker.tasks +} + +// Task returns the task by the given id. +func (worker *Worker) Task(taskID string) Task { + worker.Lock() + defer worker.Unlock() + + for _, task := range worker.tasks { + if task.ID() == taskID { + return task + } + } + return nil +} + +// AddTask adds the new task. +func (worker *Worker) AddTask(task Task) { + worker.Lock() + defer worker.Unlock() + + worker.tasks = append(worker.tasks, task) + worker.taskCh <- task +} + +// RemoveTask removes the task. +func (worker *Worker) RemoveTask(subTask Task) { + worker.Lock() + defer worker.Unlock() + + for i, task := range worker.tasks { + if task == subTask { + worker.tasks = append(worker.tasks[:i], worker.tasks[i+1:]...) + return + } + } +} + +// Run waits for new tasks, starts handling each of them in a new goroutine. +func (worker *Worker) Run(ctx context.Context) error { + group, _ := errgroup.WithContext(ctx) // Create an error group but ignore the derived context + for { + select { + case <-ctx.Done(): + log.WithContext(ctx).Warn("worker run stopping : %w", ctx.Err()) + return group.Wait() + case t := <-worker.taskCh: // Rename here + currentTask := t // Capture the loop variable + group.Go(func() error { + defer func() { + if r := recover(); r != nil { + log.WithContext(ctx).Errorf("Recovered from panic in common task's worker run: %v", r) + } + + log.WithContext(ctx).WithField("task", currentTask.ID()).Info("Task Removed") + worker.RemoveTask(currentTask) + }() + + return currentTask.Run(ctx) // Use the captured variable + }) + } + } +} + +// NewWorker returns a new Worker instance. +func NewWorker() *Worker { + return &Worker{ + taskCh: make(chan Task), + } +} diff --git a/pkg/common/task/worker_test.go b/pkg/common/task/worker_test.go new file mode 100644 index 00000000..4c5f21ac --- /dev/null +++ b/pkg/common/task/worker_test.go @@ -0,0 +1,147 @@ +package task + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWorkerTasks(t *testing.T) { + t.Parallel() + + type fields struct { + tasks []Task + } + tests := []struct { + name string + fields fields + want []Task + }{ + { + name: "retrieve tasks", + fields: fields{ + tasks: []Task{&task{id: "1"}, &task{id: "2"}}, + }, + want: []Task{&task{id: "1"}, &task{id: "2"}}, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + worker := &Worker{ + tasks: tt.fields.tasks, + } + assert.Equal(t, tt.want, worker.Tasks()) + }) + } +} + +func TestWorkerTask(t *testing.T) { + t.Parallel() + + type fields struct { + tasks []Task + } + type args struct { + taskID string + } + tests := []struct { + name string + fields fields + args args + want Task + }{ + { + name: "get task with id 1", + fields: fields{ + tasks: []Task{&task{id: "1"}, &task{id: "2"}}, + }, + args: args{"2"}, + want: &task{id: "2"}, + }, + { + name: "get not exist task", + fields: fields{ + tasks: []Task{&task{id: "1"}, &task{id: "2"}}, + }, + args: args{"3"}, + want: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + worker := &Worker{ + tasks: tt.fields.tasks, + } + assert.Equal(t, tt.want, worker.Task(tt.args.taskID)) + }) + } +} + +func TestWorkerAddTask(t *testing.T) { + t.Parallel() + + type args struct { + task Task + } + tests := []struct { + name string + args args + want []Task + }{ + { + name: "add task", + args: args{&task{id: "1"}}, + want: []Task{&task{id: "1"}}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + worker := &Worker{ + taskCh: make(chan Task), + } + + go func() { + worker.AddTask(tt.args.task) + }() + + <-worker.taskCh + tasks := worker.tasks + assert.Equal(t, tt.want, tasks) + + }) + } +} + +func TestWorkerRemoveTask(t *testing.T) { + t.Parallel() + + type args struct { + subTask Task + } + tests := []struct { + name string + args args + want []Task + }{ + { + name: "removed task", + args: args{&task{id: "1"}}, + want: []Task{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + worker := &Worker{ + tasks: []Task{tt.args.subTask}, + } + + worker.RemoveTask(tt.args.subTask) + assert.Equal(t, tt.want, worker.tasks) + }) + } +} diff --git a/pkg/configurer/file.go b/pkg/configurer/file.go new file mode 100644 index 00000000..49fab2bc --- /dev/null +++ b/pkg/configurer/file.go @@ -0,0 +1,58 @@ +package configurer + +import ( + "path/filepath" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/spf13/viper" +) + +// SetDefaultConfigPaths sets default paths for Viper to search for the config file in. +func SetDefaultConfigPaths(paths ...string) { + defaultConfigPaths = paths +} + +// ParseFile parses the config file from the given path `filename`, and assign it to the struct `config`. +func ParseFile(filename string, config interface{}) error { + var configType string + + switch filepath.Ext(filename) { + case ".conf": + configType = "env" + } + + return parseFile(filename, configType, config) +} + +// ParseJSONFile parses json config file from the given path `filename`, and assign it to the struct `config`. +func ParseJSONFile(filename string, config interface{}) error { + return parseFile(filename, "json", config) +} + +func parseFile(filename, configType string, config interface{}) error { + conf := viper.New() + + for _, configPath := range defaultConfigPaths { + conf.AddConfigPath(filepath.FromSlash(configPath)) + } + + if dir, _ := filepath.Split(filename); dir != "" { + conf.SetConfigFile(filename) + } else { + conf.SetConfigName(filename) + } + + if configType != "" { + conf.SetConfigType(configType) + } + + if err := conf.ReadInConfig(); err != nil { + return errors.Errorf("could not read config file: %w", err) + } + + if err := conf.Unmarshal(&config); err != nil { + return errors.Errorf("unable to decode into struct, %w", err) + } + + return nil +} diff --git a/pkg/configurer/path_darwin.go b/pkg/configurer/path_darwin.go new file mode 100644 index 00000000..3be74f09 --- /dev/null +++ b/pkg/configurer/path_darwin.go @@ -0,0 +1,20 @@ +//go:build darwin +// +build darwin + +package configurer + +import ( + "os" + "path/filepath" +) + +var defaultConfigPaths = []string{ + "$HOME/Library/Application Support/Pastel", + ".", +} + +// DefaultPath returns the default config path for darwin OS. +func DefaultPath() string { + homeDir, _ := os.UserConfigDir() + return filepath.Join(homeDir, "Pastel") +} diff --git a/pkg/configurer/path_linux.go b/pkg/configurer/path_linux.go new file mode 100644 index 00000000..efdb75da --- /dev/null +++ b/pkg/configurer/path_linux.go @@ -0,0 +1,20 @@ +//go:build linux +// +build linux + +package configurer + +import ( + "os" + "path/filepath" +) + +var defaultConfigPaths = []string{ + "$HOME/.pastel", + ".", +} + +// DefaultPath returns the default config path for Linux OS. +func DefaultPath() string { + homeDir, _ := os.UserHomeDir() + return filepath.Join(homeDir, ".pastel") +} diff --git a/pkg/configurer/path_windows.go b/pkg/configurer/path_windows.go new file mode 100644 index 00000000..9d313a6f --- /dev/null +++ b/pkg/configurer/path_windows.go @@ -0,0 +1,34 @@ +//go:build windows +// +build windows + +package configurer + +import ( + "os" + "path" + "path/filepath" + "syscall" +) + +const ( + beforeVistaAppDir = "Application Data" + sinceVistaAppDir = "AppData/Roaming" +) + +var defaultConfigPaths = []string{ + path.Join("$HOME", beforeVistaAppDir, "Pastel"), + path.Join("$HOME", sinceVistaAppDir, "Pastel"), + ".", +} + +// DefaultPath returns the default config path for Windows OS. +func DefaultPath() string { + homeDir, _ := os.UserHomeDir() + appDir := beforeVistaAppDir + + v, _ := syscall.GetVersion() + if v&0xff > 5 { + appDir = sinceVistaAppDir + } + return filepath.Join(homeDir, filepath.FromSlash(appDir), "Pastel") +} diff --git a/pkg/errgroup/errgroup.go b/pkg/errgroup/errgroup.go new file mode 100644 index 00000000..ca2b3fe8 --- /dev/null +++ b/pkg/errgroup/errgroup.go @@ -0,0 +1,37 @@ +package errgroup + +import ( + "context" + "runtime/debug" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/logtrace" + + "golang.org/x/sync/errgroup" +) + +// A Group is a collection of goroutines working on subtasks that are part of the same overall task. +type Group struct { + *errgroup.Group +} + +// Go calls the given function in a new goroutine and tries to recover from panics. +func (group *Group) Go(fn func() error) { + group.Group.Go(func() (err error) { + defer errors.Recover(func(recErr error) { + fields := logtrace.Fields{ + logtrace.FieldError: recErr.Error(), + logtrace.FieldStackTrace: debug.Stack(), + } + logtrace.Error(context.Background(), "errgroup panic", fields) + err = recErr + }) + return fn() + }) +} + +// WithContext returns a new Group and an associated Context derived from ctx. +func WithContext(ctx context.Context) (*Group, context.Context) { + group, ctx := errgroup.WithContext(ctx) + return &Group{group}, ctx +} diff --git a/pkg/keyring/keyring.go b/pkg/keyring/keyring.go new file mode 100644 index 00000000..bcfa9443 --- /dev/null +++ b/pkg/keyring/keyring.go @@ -0,0 +1,155 @@ +package keyring + +import ( + "fmt" + + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + "github.com/cosmos/cosmos-sdk/crypto/hd" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/go-bip39" +) + +const ( + // Default BIP39 passphrase + DefaultBIP39Passphrase = "" + + // Default HD path for Cosmos accounts + DefaultHDPath = "m/44'/118'/0'/0/0" // Cosmos HD path + + // Lumera address prefixes + AccountAddressPrefix = "lumera" + Name = "lumera" + + // Default keyring name + KeyringServiceName = "lumera-keyring" +) + +// InitSDKConfig initializes the SDK configuration with Lumera-specific settings +func InitSDKConfig() { + // Set prefixes + accountPubKeyPrefix := AccountAddressPrefix + "pub" + validatorAddressPrefix := AccountAddressPrefix + "valoper" + validatorPubKeyPrefix := AccountAddressPrefix + "valoperpub" + consNodeAddressPrefix := AccountAddressPrefix + "valcons" + consNodePubKeyPrefix := AccountAddressPrefix + "valconspub" + + // Set and seal config + config := sdk.GetConfig() + config.SetBech32PrefixForAccount(AccountAddressPrefix, accountPubKeyPrefix) + config.SetBech32PrefixForValidator(validatorAddressPrefix, validatorPubKeyPrefix) + config.SetBech32PrefixForConsensusNode(consNodeAddressPrefix, consNodePubKeyPrefix) + config.Seal() +} + +// InitKeyring initializes the keyring +func InitKeyring(backend, dir string) (keyring.Keyring, error) { + // Determine keyring backend type + var backendType string + switch backend { + case "file": + backendType = "file" + case "os": + backendType = "os" + case "test": + backendType = "test" + case "memory", "mem": + backendType = "memory" + default: + return nil, fmt.Errorf("unsupported keyring backend: %s", backend) + } + + // Create interface registry and codec + interfaceRegistry := codectypes.NewInterfaceRegistry() + cryptocodec.RegisterInterfaces(interfaceRegistry) + cdc := codec.NewProtoCodec(interfaceRegistry) + + // Create keyring + kr, err := keyring.New( + KeyringServiceName, + backendType, + dir, + nil, //TODO : Fix this, Using nil for stdin to avoid interactive prompts when using test backend + cdc, + ) + if err != nil { + return nil, fmt.Errorf("failed to initialize keyring: %w", err) + } + + return kr, nil +} + +// GenerateMnemonic generates a new BIP39 mnemonic +func GenerateMnemonic(entropySize int) (string, error) { + entropy, err := bip39.NewEntropy(entropySize) + if err != nil { + return "", fmt.Errorf("failed to generate entropy: %w", err) + } + + mnemonic, err := bip39.NewMnemonic(entropy) + if err != nil { + return "", fmt.Errorf("failed to generate mnemonic: %w", err) + } + + return mnemonic, nil +} + +// CreateNewAccount creates a new account in the keyring +func CreateNewAccount(kr keyring.Keyring, name string, entropySize int) (string, *keyring.Record, error) { + // Generate a new mnemonic + mnemonic, err := GenerateMnemonic(entropySize) + if err != nil { + return "", nil, fmt.Errorf("failed to generate mnemonic: %w", err) + } + + // Create a new account with the generated mnemonic + info, err := kr.NewAccount( + name, + mnemonic, + DefaultBIP39Passphrase, + DefaultHDPath, + hd.Secp256k1, + ) + if err != nil { + return "", nil, fmt.Errorf("failed to create new account: %w", err) + } + + return mnemonic, info, nil +} + +// RecoverAccountFromMnemonic recovers an account from a mnemonic +func RecoverAccountFromMnemonic(kr keyring.Keyring, name, mnemonic string) (*keyring.Record, error) { + // Import account from mnemonic + info, err := kr.NewAccount( + name, + mnemonic, + DefaultBIP39Passphrase, + DefaultHDPath, + hd.Secp256k1, + ) + if err != nil { + return nil, fmt.Errorf("failed to recover account from mnemonic: %w", err) + } + + return info, nil +} + +// DerivePrivKeyFromMnemonic derives a private key directly from a mnemonic +func DerivePrivKeyFromMnemonic(mnemonic, hdPath string) (*secp256k1.PrivKey, error) { + if hdPath == "" { + hdPath = DefaultHDPath + } + + seed := bip39.NewSeed(mnemonic, DefaultBIP39Passphrase) + master, ch := hd.ComputeMastersFromSeed(seed) + + derivedKey, err := hd.DerivePrivateKeyForPath(master, ch, hdPath) + if err != nil { + return nil, fmt.Errorf("failed to derive private key: %w", err) + } + + return &secp256k1.PrivKey{Key: derivedKey}, nil +} diff --git a/pkg/logtrace/fields.go b/pkg/logtrace/fields.go index 9833af8c..9b28ac85 100644 --- a/pkg/logtrace/fields.go +++ b/pkg/logtrace/fields.go @@ -4,15 +4,18 @@ package logtrace type Fields map[string]interface{} const ( - FieldCorrelationID = "correlation_id" - FieldMethod = "method" - FieldModule = "module" - FieldError = "error" - FieldStatus = "status" - FieldBlockHeight = "block_height" - FieldLimit = "limit" - FieldSupernodeState = "supernode_state" - FieldRequest = "request" + FieldCorrelationID = "correlation_id" + FieldMethod = "method" + FieldModule = "module" + FieldError = "error" + FieldStatus = "status" + FieldBlockHeight = "block_height" + FieldLimit = "limit" + FieldSupernodeState = "supernode_state" + FieldRequest = "request" + FieldSupernodeAccountAddress = "supernode_account_address" + FieldIsPrimary = "is_primary" + FieldStackTrace = "stack_trace" ValueLumeraSDK = "lumera-sdk" ValueActionSDK = "action-sdk" diff --git a/pkg/lumera/client.go b/pkg/lumera/client.go index edccfb54..a2cc142a 100644 --- a/pkg/lumera/client.go +++ b/pkg/lumera/client.go @@ -53,7 +53,7 @@ func newClient(ctx context.Context, opts ...Option) (Client, error) { return nil, err } - nodeModule, err := node.NewModule(conn.GetConn()) + nodeModule, err := node.NewModule(conn.GetConn(), cfg.keyring) if err != nil { conn.Close() return nil, err diff --git a/pkg/lumera/config.go b/pkg/lumera/config.go index 6a370bf0..9c9208bc 100644 --- a/pkg/lumera/config.go +++ b/pkg/lumera/config.go @@ -1,5 +1,7 @@ package lumera +import "github.com/cosmos/cosmos-sdk/crypto/keyring" + // Config holds all the configuration needed for the client type Config struct { // GRPCAddr is the gRPC endpoint address @@ -10,6 +12,9 @@ type Config struct { // Timeout is the default request timeout in seconds Timeout int + + // keyring is the keyring conf for the node sign & verify + keyring keyring.Keyring } // DefaultConfig returns a default configuration diff --git a/pkg/lumera/interface.go b/pkg/lumera/interface.go index 1e1d8737..06be0759 100644 --- a/pkg/lumera/interface.go +++ b/pkg/lumera/interface.go @@ -11,7 +11,6 @@ import ( // Client defines the main interface for interacting with Lumera blockchain type Client interface { - // Module accessors Action() action.Module SuperNode() supernode.Module Tx() tx.Module diff --git a/pkg/lumera/modules/action/impl.go b/pkg/lumera/modules/action/impl.go index 507028d9..2ead8f03 100644 --- a/pkg/lumera/modules/action/impl.go +++ b/pkg/lumera/modules/action/impl.go @@ -36,14 +36,16 @@ func (m *module) GetAction(ctx context.Context, actionID string) (*types.QueryGe return resp, nil } -//// GetActionFee calculates fee for processing data with given size -//func (m *module) GetActionFee(ctx context.Context, dataSize string) (*types.QueryGetActionFeeResponse, error) { -// resp, err := m.client.GetActionFee(ctx, &types.QueryGetActionFeeRequest{ -// DataSize: dataSize, -// }) -// if err != nil { -// return nil, fmt.Errorf("failed to get action fee: %w", err) -// } -// -// return resp, nil -//} + +// GetActionFee calculates fee for processing data with given size +func (m *module) GetActionFee(ctx context.Context, dataSize string) (*types.QueryGetActionFeeResponse, error) { + resp, err := m.client.GetActionFee(ctx, &types.QueryGetActionFeeRequest{ + DataSize: dataSize, + }) + if err != nil { + return nil, fmt.Errorf("failed to get action fee: %w", err) + } + + return resp, nil +} + diff --git a/pkg/lumera/modules/action/interface.go b/pkg/lumera/modules/action/interface.go index 2aa2c7d7..844d80c3 100644 --- a/pkg/lumera/modules/action/interface.go +++ b/pkg/lumera/modules/action/interface.go @@ -10,7 +10,7 @@ import ( // Module defines the interface for interacting with the action module type Module interface { GetAction(ctx context.Context, actionID string) (*types.QueryGetActionResponse, error) - //GetActionFee(ctx context.Context, dataSize string) (*types.QueryGetActionFeeResponse, error) + GetActionFee(ctx context.Context, dataSize string) (*types.QueryGetActionFeeResponse, error) } // NewModule creates a new Action module client diff --git a/pkg/lumera/modules/node/impl.go b/pkg/lumera/modules/node/impl.go index e1d9deea..c32bffeb 100644 --- a/pkg/lumera/modules/node/impl.go +++ b/pkg/lumera/modules/node/impl.go @@ -3,6 +3,9 @@ package node import ( "context" "fmt" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cosmos/cosmos-sdk/types" + signingtypes "github.com/cosmos/cosmos-sdk/types/tx/signing" cmtservice "github.com/cosmos/cosmos-sdk/client/grpc/cmtservice" "google.golang.org/grpc" @@ -10,17 +13,19 @@ import ( // module implements the Module interface type module struct { + kr keyring.Keyring client cmtservice.ServiceClient } // newModule creates a new Node module client -func newModule(conn *grpc.ClientConn) (Module, error) { +func newModule(conn *grpc.ClientConn, keyring keyring.Keyring) (Module, error) { if conn == nil { return nil, fmt.Errorf("connection cannot be nil") } return &module{ client: cmtservice.NewServiceClient(conn), + kr: keyring, }, nil } @@ -87,3 +92,45 @@ func (m *module) GetValidatorSetByHeight(ctx context.Context, height int64) (*cm return resp, nil } + +func (m *module) Sign(snAccAddress string, data []byte) (signature []byte, err error) { + accAddr, err := types.AccAddressFromBech32(snAccAddress) + if err != nil { + return signature, fmt.Errorf("invalid address: %w", err) + } + + _, err = m.kr.KeyByAddress(accAddr) + if err != nil { + return signature, fmt.Errorf("address not found in keyring: %w", err) + } + + signature, _, err = m.kr.SignByAddress(accAddr, data, signingtypes.SignMode_SIGN_MODE_DIRECT) + if err != nil { + return nil, fmt.Errorf("failed to sign data: %w", err) + } + + return signature, nil +} + +func (m *module) Verify(accAddress string, data, signature []byte) (err error) { + addr, err := types.AccAddressFromBech32(accAddress) + if err != nil { + return fmt.Errorf("invalid address: %w", err) + } + + keyInfo, err := m.kr.KeyByAddress(addr) + if err != nil { + return fmt.Errorf("address not found in keyring: %w", err) + } + + pubKey, err := keyInfo.GetPubKey() + if err != nil { + return fmt.Errorf("failed to get public key: %w", err) + } + + if !pubKey.VerifySignature(data, signature) { + return fmt.Errorf("invalid signature") + } + + return nil +} diff --git a/pkg/lumera/modules/node/interface.go b/pkg/lumera/modules/node/interface.go index 0694e2af..60ef53c3 100644 --- a/pkg/lumera/modules/node/interface.go +++ b/pkg/lumera/modules/node/interface.go @@ -2,6 +2,7 @@ package node import ( "context" + "github.com/cosmos/cosmos-sdk/crypto/keyring" cmtservice "github.com/cosmos/cosmos-sdk/client/grpc/cmtservice" "google.golang.org/grpc" @@ -26,9 +27,15 @@ type Module interface { // GetValidatorSetByHeight gets the validator set at a specific height GetValidatorSetByHeight(ctx context.Context, height int64) (*cmtservice.GetValidatorSetByHeightResponse, error) + + // Sign signs the given bytes with the supernodeAccountAddress and returns the signature + Sign(snAccAddress string, data []byte) (signature []byte, err error) + + // Verify verifies the given bytes with given supernodeAccAddress public key and returns the error + Verify(accAddress string, data, signature []byte) (err error) } // NewModule creates a new Node module client -func NewModule(conn *grpc.ClientConn) (Module, error) { - return newModule(conn) +func NewModule(conn *grpc.ClientConn, kr keyring.Keyring) (Module, error) { + return newModule(conn, kr) } diff --git a/pkg/lumera/modules/supernode/impl.go b/pkg/lumera/modules/supernode/impl.go index b31d52c6..cc12f430 100644 --- a/pkg/lumera/modules/supernode/impl.go +++ b/pkg/lumera/modules/supernode/impl.go @@ -3,9 +3,12 @@ package supernode import ( "context" "fmt" + "github.com/LumeraProtocol/supernode/pkg/errors" "github.com/LumeraProtocol/lumera/x/supernode/types" + "google.golang.org/grpc" + "sort" ) // module implements the Module interface @@ -47,3 +50,34 @@ func (m *module) GetSuperNode(ctx context.Context, address string) (*types.Query return resp, nil } + +func (m *module) GetSupernodeBySupernodeAddress(ctx context.Context, address string) (*types.SuperNode, error) { + resp, err := m.client.GetSuperNodeBySuperNodeAddress(ctx, &types.QueryGetSuperNodeBySuperNodeAddressRequest{ + SupernodeAddress: address, + }) + if err != nil { + fmt.Errorf("failed to get supernode: %w", err) + } + + return resp.Supernode, nil +} + +func Exists(nodes []*types.SuperNode, snAccAddress string) bool { + for _, sn := range nodes { + if sn.SupernodeAccount == snAccAddress { + return true + } + } + return false +} + +func GetLatestIP(supernode *types.SuperNode) (string, error) { + if len(supernode.PrevIpAddresses) == 0 { + return "", errors.Errorf("no ip history exists for the supernode") + } + sort.Slice(supernode.PrevIpAddresses, func(i, j int) bool { + return supernode.PrevIpAddresses[i].GetHeight() > supernode.PrevIpAddresses[j].GetHeight() + }) + + return supernode.PrevIpAddresses[0].Address, nil +} diff --git a/pkg/lumera/modules/supernode/interface.go b/pkg/lumera/modules/supernode/interface.go index 37e57b12..ed830f36 100644 --- a/pkg/lumera/modules/supernode/interface.go +++ b/pkg/lumera/modules/supernode/interface.go @@ -11,6 +11,7 @@ import ( type Module interface { GetTopSuperNodesForBlock(ctx context.Context, blockHeight uint64) (*types.QueryGetTopSuperNodesForBlockResponse, error) GetSuperNode(ctx context.Context, address string) (*types.QueryGetSuperNodeResponse, error) + GetSupernodeBySupernodeAddress(ctx context.Context, address string) (*types.SuperNode, error) } // NewModule creates a new SuperNode module client diff --git a/pkg/lumera/options.go b/pkg/lumera/options.go index 862194ac..7bc5220e 100644 --- a/pkg/lumera/options.go +++ b/pkg/lumera/options.go @@ -1,5 +1,7 @@ package lumera +import "github.com/cosmos/cosmos-sdk/crypto/keyring" + // Option is a function that applies a change to Config type Option func(*Config) @@ -23,3 +25,10 @@ func WithTimeout(seconds int) Option { c.Timeout = seconds } } + +// WithKeyring sets the keyring conf for the node +func WithKeyring(k keyring.Keyring) Option { + return func(c *Config) { + c.keyring = k + } +} diff --git a/pkg/raptorq/config.go b/pkg/raptorq/config.go index 92203643..ad6d0dd3 100644 --- a/pkg/raptorq/config.go +++ b/pkg/raptorq/config.go @@ -15,6 +15,8 @@ type Config struct { // the queries port to listen for connections on Port int `mapstructure:"port" json:"port,omitempty"` + + RqFilesDir string `mapstructure:"rqfiles_dir" json:"rqfiles_dir,omitempty"` } // NewConfig returns a new Config instance. diff --git a/pkg/raptorq/gen_rq_identifier_files.go b/pkg/raptorq/gen_rq_identifier_files.go new file mode 100644 index 00000000..3b851856 --- /dev/null +++ b/pkg/raptorq/gen_rq_identifier_files.go @@ -0,0 +1,38 @@ +package raptorq + +import ( + "context" + + "github.com/LumeraProtocol/supernode/pkg/logtrace" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (s *raptorQServerClient) GenRQIdentifiersFiles(ctx context.Context, fields logtrace.Fields, data []byte, operationBlockHash string, pastelID string, rqMax uint32) (RQIDsIc uint32, RQIDs []string, RQIDsFile []byte, RQEncodeParams EncoderParameters, signature []byte, err error) { + encodeInfo, err := s.encodeInfo(ctx, fields, data, rqMax, operationBlockHash, pastelID) + if err != nil { + return RQIDsIc, RQIDs, RQIDsFile, RQEncodeParams, signature, status.Errorf(codes.Internal, "generate RaptorQ symbols identifiers") + } + + var rqIDsFilesCount uint32 + for i := range encodeInfo.SymbolIDFiles { + if len(encodeInfo.SymbolIDFiles[i].SymbolIdentifiers) == 0 { + return RQIDsIc, RQIDs, RQIDsFile, RQEncodeParams, signature, status.Errorf(codes.Internal, "empty symbol identifiers - rawFile") + } + + RQIDsIc, RQIDs, RQIDsFile, signature, err := s.generateRQIDs(ctx, encodeInfo.SymbolIDFiles[i], pastelID, rqMax) + if err != nil { + return RQIDsIc, RQIDs, RQIDsFile, RQEncodeParams, signature, status.Errorf(codes.Internal, "create RQIDs file") + } + rqIDsFilesCount++ + break + } + if rqIDsFilesCount != rqMax { + return RQIDsIc, RQIDs, RQIDsFile, RQEncodeParams, signature, status.Errorf(codes.Internal, "number of RaptorQ symbol identifiers files must be %d, most probably old version of rq-services is installed", rqMax) + } + + RQEncodeParams = encodeInfo.EncoderParam + + return RQIDsIc, RQIDs, RQIDsFile, RQEncodeParams, signature, nil +} diff --git a/pkg/raptorq/helper.go b/pkg/raptorq/helper.go new file mode 100644 index 00000000..3d8f95dc --- /dev/null +++ b/pkg/raptorq/helper.go @@ -0,0 +1,158 @@ +package raptorq + +import ( + "bytes" + "context" + "encoding/json" + "math/rand/v2" + "os" + "strconv" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/logtrace" + "github.com/LumeraProtocol/supernode/pkg/utils" + "github.com/cosmos/btcutil/base58" +) + +const ( + InputEncodeFileName = "input.data" + SeparatorByte byte = 46 // separator in dd_and_fingerprints.signature i.e. '.' +) + +// EncoderParameters represents the encoding params used by raptorq services +type EncoderParameters struct { + Oti []byte +} + +// EncodeInfo represents the response returns by encodeInfo method +type EncodeInfo struct { + SymbolIDFiles map[string]RawSymbolIDFile + EncoderParam EncoderParameters +} + +// Encode represents the response returns by Encode method +type Encode struct { + Symbols map[string][]byte + EncoderParam EncoderParameters +} + +// Decode represents the response returns by Decode method +type Decode struct { + File []byte +} + +func (s *raptorQServerClient) encodeInfo(ctx context.Context, data []byte, copies uint32, blockHash string, pastelID string) (*EncodeInfo, error) { + s.semaphore <- struct{}{} // Acquire slot + defer func() { + <-s.semaphore // Release the semaphore slot + }() + + if data == nil { + return nil, errors.Errorf("invalid data") + } + + _, inputPath, err := createInputEncodeFile(s.config.RqFilesDir, data) + if err != nil { + return nil, errors.Errorf("create input file: %w", err) + } + res, err := s.EncodeMetaData(ctx, EncodeMetadataRequest{ + FilesNumber: copies, + BlockHash: blockHash, + PastelId: pastelID, + Path: inputPath, + }) + if err != nil { + return nil, errors.Errorf("encode metadata %s: %w", res.Path, err) + } + + filesMap, err := scanSymbolIDFiles(res.Path) + if err != nil { + return nil, errors.Errorf("scan symbol id files folder %s: %w", res.Path, err) + } + + if len(filesMap) != int(copies) { + return nil, errors.Errorf("symbol id files count not match: expect %d, output %d", copies, len(filesMap)) + } + + output := &EncodeInfo{ + SymbolIDFiles: filesMap, + EncoderParam: EncoderParameters{ + Oti: res.EncoderParameters, + }, + } + + if err := os.Remove(inputPath); err != nil { + logtrace.Error(ctx, "encode info: error removing input file", logtrace.Fields{"Path": inputPath}) + } + + return output, nil +} + +func (s *raptorQServerClient) generateRQIDs(ctx context.Context, rawFile RawSymbolIDFile, snAccAddress string, maxFiles uint32) (RQIDsIc uint32, RQIDs []string, RQIDsFile []byte, signature []byte, err error) { + rqIDsfile, err := json.Marshal(rawFile) + if err != nil { + return RQIDsIc, RQIDs, RQIDsFile, signature, errors.Errorf("marshal rqID file") + } + + // FIXME : msgs param + signature, err = s.lumeraClient.Node().Sign(snAccAddress, rqIDsfile) // FIXME : confirm the data + if err != nil { + return RQIDsIc, RQIDs, RQIDsFile, signature, errors.Errorf("sign identifiers file: %w", err) + } + + encRqIDsfile := utils.B64Encode(rqIDsfile) + + var buffer bytes.Buffer + buffer.Write(encRqIDsfile) + buffer.WriteString(".") + buffer.Write(signature) + rqIDFile := buffer.Bytes() + + RQIDsIc = rand.Uint32() + RQIDs, _, err = GetIDFiles(ctx, rqIDFile, RQIDsIc, maxFiles) + if err != nil { + return RQIDsIc, RQIDs, RQIDsFile, signature, errors.Errorf("get ID Files: %w", err) + } + + comp, err := utils.HighCompress(ctx, rqIDFile) + if err != nil { + return RQIDsIc, RQIDs, RQIDsFile, signature, errors.Errorf("compress: %w", err) + } + RQIDsFile = utils.B64Encode(comp) + + return RQIDsIc, RQIDs, RQIDsFile, signature, nil +} + +// GetIDFiles generates ID Files for dd_and_fingerprints files and rq_id files +// file is b64 encoded file appended with signatures and compressed, ic is the initial counter +// and max is the number of ids to generate +func GetIDFiles(ctx context.Context, file []byte, ic uint32, max uint32) (ids []string, files [][]byte, err error) { + idFiles := make([][]byte, 0, max) + ids = make([]string, 0, max) + var buffer bytes.Buffer + + for i := uint32(0); i < max; i++ { + buffer.Reset() + counter := ic + i + + buffer.Write(file) + buffer.WriteByte(SeparatorByte) + buffer.WriteString(strconv.Itoa(int(counter))) // Using the string representation to maintain backward compatibility + + compressedData, err := utils.HighCompress(ctx, buffer.Bytes()) // Ensure you're using the same compression level + if err != nil { + return ids, idFiles, errors.Errorf("compress identifiers file: %w", err) + } + + idFiles = append(idFiles, compressedData) + + hash, err := utils.Sha3256hash(compressedData) + if err != nil { + return ids, idFiles, errors.Errorf("sha3-256-hash error getting an id file: %w", err) + } + + ids = append(ids, base58.Encode(hash)) + } + + return ids, idFiles, nil +} diff --git a/pkg/raptorq/interfaces.go b/pkg/raptorq/interfaces.go index 4993d2e5..b6862e87 100644 --- a/pkg/raptorq/interfaces.go +++ b/pkg/raptorq/interfaces.go @@ -4,6 +4,8 @@ package raptorq import ( "context" + + "github.com/LumeraProtocol/supernode/pkg/logtrace" ) // ClientInterface represents a base connection interface. @@ -33,4 +35,6 @@ type RaptorQ interface { Decode(ctx context.Context, req DecodeRequest) (DecodeResponse, error) // EncodeMetaData Get encode info(include encode parameters + symbol id files) EncodeMetaData(ctx context.Context, req EncodeMetadataRequest) (EncodeResponse, error) + // GenRQIdentifiersFiles generates the RQ identifier files + GenRQIdentifiersFiles(ctx context.Context, fields logtrace.Fields, data []byte, operationBlockHash string, pastelID string, rqMax uint32) (RQIDsIc uint32, RQIDs []string, RQIDsFile []byte, RQEncodeParams EncoderParameters, signature []byte, err error) } diff --git a/pkg/raptorq/rq_server_client.go b/pkg/raptorq/rq_server_client.go index 1d0341ab..877fc7f5 100644 --- a/pkg/raptorq/rq_server_client.go +++ b/pkg/raptorq/rq_server_client.go @@ -1,6 +1,7 @@ package raptorq import ( + "github.com/LumeraProtocol/supernode/pkg/lumera" "time" rq "github.com/LumeraProtocol/supernode/gen/raptorq" @@ -12,10 +13,11 @@ const ( ) type raptorQServerClient struct { - config *Config - conn *clientConn - rqService rq.RaptorQClient - semaphore chan struct{} // Semaphore to control concurrency + config *Config + conn *clientConn + rqService rq.RaptorQClient + lumeraClient lumera.Client + semaphore chan struct{} // Semaphore to control concurrency } func newRaptorQServerClient(conn *clientConn, config *Config) RaptorQ { diff --git a/pkg/storage/file_storage_interface.go b/pkg/storage/file_storage_interface.go new file mode 100644 index 00000000..faa1b0ca --- /dev/null +++ b/pkg/storage/file_storage_interface.go @@ -0,0 +1,45 @@ +//go:generate mockery --name=FileStorageInterface +//go:generate mockery --name=FileInterface + +package storage + +import ( + "io" + + "github.com/LumeraProtocol/supernode/pkg/errors" +) + +var ( + // ErrFileNotFound is returned when file isn't found. + ErrFileNotFound = errors.New("file not found") + // ErrFileExists is returned when file already exists. + ErrFileExists = errors.New("file exists") +) + +// FileStorageInterface represents a file storage. +type FileStorageInterface interface { + // Open opens a file and returns file descriptor. + // If name is not found, ErrFileNotFound is returned. + Open(name string) (file FileInterface, err error) + + // Create creates a new file with the given name and returns file descriptor. + Create(name string) (file FileInterface, err error) + + // Remove removes a file by the given name. + Remove(name string) error + + // Rename renames oldname to newname. + Rename(oldname, newname string) error +} + +// FileInterface represents a file. +type FileInterface interface { + io.Closer + io.Reader + io.ReaderAt + io.Seeker + io.Writer + io.WriterAt + + Name() string +} diff --git a/pkg/storage/files/file.go b/pkg/storage/files/file.go new file mode 100644 index 00000000..d304abe5 --- /dev/null +++ b/pkg/storage/files/file.go @@ -0,0 +1,382 @@ +package files + +import ( + "bytes" + "fmt" + "image" + "image/gif" + "image/jpeg" + "image/png" + "io" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/storage" + + "github.com/disintegration/imaging" + "github.com/kolesa-team/go-webp/decoder" + "github.com/kolesa-team/go-webp/encoder" + "github.com/kolesa-team/go-webp/webp" +) + +// File represents a file. +type File struct { + fmt.Stringer + sync.Mutex + + storage.FileInterface + storage *Storage + + // if a file was created during the process, it should be deleted at the end. + isCreated bool + + // unique name within the storage. + name string + + // file format, png, jpg, etc. + format Format +} + +// Name returns filename. +func (file *File) Name() string { + return file.name +} + +func (file *File) String() string { + return file.name +} + +// SetFormatFromExtension parses and sets image format from filename extension: +// "jpg" (or "jpeg"), "png", "gif" are supported. +func (file *File) SetFormatFromExtension(ext string) error { + if format, ok := formatExts[strings.ToLower(strings.TrimPrefix(ext, "."))]; ok { + return file.SetFormat(format) + } + return ErrUnsupportedFormat +} + +// SetFormat sets file extension. +func (file *File) SetFormat(format Format) error { + file.format = format + + newname := fmt.Sprintf("%s.%s", strings.TrimSuffix(file.name, filepath.Ext(file.name)), format) + oldname := file.name + file.name = newname + + if err := file.storage.Update(oldname, newname, file); err != nil { + return err + } + + if !file.isCreated { + return nil + } + return file.storage.Rename(oldname, newname) +} + +// Format returns file extension. +func (file *File) Format() Format { + return file.format +} + +// Open opens a file and returns file descriptor. +// If file is not found, storage.ErrFileNotFound is returned. +func (file *File) Open() (storage.FileInterface, error) { + file.Lock() + defer file.Unlock() + + return file.storage.Open(file.Name()) +} + +// Create creates a file and returns file descriptor. +func (file *File) Create() (storage.FileInterface, error) { + file.Lock() + defer file.Unlock() + + fl, err := file.storage.Create(file.name) + if err != nil { + return nil, err + } + + file.isCreated = true + return fl, nil +} + +// Remove removes the file. +func (file *File) Remove() error { + file.Lock() + defer file.Unlock() + + delete(file.storage.filesMap, file.name) + + if !file.isCreated { + return nil + } + file.isCreated = false + + return file.storage.Remove(file.name) +} + +// Copy creates a copy of the current file. +func (file *File) Copy() (*File, error) { + src, err := file.Open() + if err != nil { + return nil, err + } + defer src.Close() + + newFile := file.storage.NewFile() + if err := newFile.SetFormat(file.format); err != nil { + return nil, err + } + + dst, err := newFile.Create() + if err != nil { + return nil, err + } + defer dst.Close() + + if _, err := io.Copy(dst, src); err != nil { + return nil, errors.Errorf("copy file: %w", err) + } + return newFile, nil +} + +// Bytes returns the contents of the file by bytes. +func (file *File) Bytes() ([]byte, error) { + f, err := file.Open() + if err != nil { + return nil, err + } + defer f.Close() + + buf := new(bytes.Buffer) + if _, err := buf.ReadFrom(f); err != nil { + return nil, errors.Errorf("read file: %w", err) + } + + return buf.Bytes(), nil +} + +// Write writes data to the file. +func (file *File) Write(data []byte) (n int, err error) { + f, err := file.Create() + if err != nil { + return 0, errors.Errorf("create file: %w", err) + } + defer f.Close() + + n, err = f.Write(data) + if err != nil { + return n, errors.Errorf("write file: %w", err) + } + + return +} + +// ResizeImage resizes image. +func (file *File) ResizeImage(width, height int) error { + src, err := file.LoadImage() + if err != nil { + return err + } + + dst := imaging.Resize(src, width, height, imaging.Lanczos) + + return file.SaveImage(dst) +} + +// RemoveAfter removes the file after the specified duration. +func (file *File) RemoveAfter(d time.Duration) { + go func() { + time.AfterFunc(d, func() { file.Remove() }) + }() +} + +// LoadImage opens images from the file. +func (file *File) LoadImage() (image.Image, error) { + f, err := file.Open() + if err != nil { + return nil, err + } + defer f.Close() + + img, _, err := image.Decode(f) + if err != nil { + // Reset the reader to the beginning of the file + _, errSeek := f.Seek(0, io.SeekStart) + if errSeek != nil { + return nil, errors.Errorf("reset file reader: %w", errSeek) + } + + var errWebp error + img, errWebp = webp.Decode(f, &decoder.Options{}) + if errWebp != nil { + return nil, errors.Errorf("decode image(%s) - %w - tried webp as well: %w", f.Name(), err, errWebp) + } + } + + return img, nil +} + +// SaveImage saves image to the file. +func (file *File) SaveImage(img image.Image) error { + f, err := file.Create() + if err != nil { + return err + } + defer f.Close() + + switch file.format { + case JPEG: + if nrgba, ok := img.(*image.NRGBA); ok && nrgba.Opaque() { + rgba := &image.RGBA{ + Pix: nrgba.Pix, + Stride: nrgba.Stride, + Rect: nrgba.Rect, + } + if err := jpeg.Encode(f, rgba, nil); err != nil { + return errors.Errorf("encode jpeg rgba(%s): %w", f.Name(), err) + } + return nil + } + if err := jpeg.Encode(f, img, nil); err != nil { + return errors.Errorf("encode jpeg(%s): %w", f.Name(), err) + } + return nil + + case PNG: + encoder := png.Encoder{CompressionLevel: png.DefaultCompression} + if err := encoder.Encode(f, img); err != nil { + return errors.Errorf("encode png(%s): %w", f.Name(), err) + } + return nil + + case GIF: + if err := gif.Encode(f, img, nil); err != nil { + return errors.Errorf("encode gif(%s): %w", f.Name(), err) + } + return nil + case WEBP: + opts, err := encoder.NewLosslessEncoderOptions(encoder.PresetDefault, 0) + if err != nil { + return errors.Errorf("create lossless encoder option %w", err) + } + if err := webp.Encode(f, img, opts); err != nil { + return errors.Errorf("encode webp(%s): %w", f.Name(), err) + } + return nil + + } + + return ErrUnsupportedFormat +} + +// Thumbnail creates a thumbnail file from the NFT file and store in to storage layer +func (file *File) Thumbnail(coordinate ThumbnailCoordinate) (*File, error) { + f := NewFile(file.storage, "thumbnail-of-"+file.name) + if f == nil { + return nil, errors.Errorf("create new file for thumbnail-of-%q", file.Name()) + } + if err := f.SetFormat(file.Format()); err != nil { + return nil, errors.Errorf("set format for thumbnail-of-%q", file.Name()) + } + + img, err := file.LoadImage() + if err != nil { + return nil, errors.Errorf("load image from file(%s): %w", file.Name(), err) + } + + rect := image.Rect(int(coordinate.TopLeftX), int(coordinate.TopLeftY), int(coordinate.BottomRightX), int(coordinate.BottomRightY)) + thumbnail := imaging.Crop(img, rect) + if thumbnail == nil { + return nil, errors.Errorf("generate thumbnail(%s): %w", file.Name(), err) + } + + if err := f.SaveImage(thumbnail); err != nil { + return nil, errors.Errorf("save thumbnail(%s): %w", file.Name(), err) + } + + return f, nil +} + +// UpdateFormat updates file format +func (file *File) UpdateFormat() error { + f, err := file.Open() + if err != nil { + return err + } + defer f.Close() + + // Try decoding with the standard library first + _, format, err := image.Decode(f) + if err != nil { + // If standard decoding fails, reset the reader and try WebP decoding + _, errSeek := f.Seek(0, io.SeekStart) + if errSeek != nil { + return errors.Errorf("reset file reader: %w", errSeek) + } + + _, errWebp := webp.Decode(f, &decoder.Options{}) + if errWebp != nil { + return errors.Errorf("decode image(%s) in updateFormat - tried webp as well: %w", f.Name(), errWebp) + } + format = "webp" + } + + err = file.SetFormatFromExtension(format) + if err != nil { + log.WithError(err).Error(fmt.Sprintf("not able to set extension:%s", err.Error())) + return errors.Errorf("set file format(%s): %w", file.Name(), err) + } + + return nil +} + +// Encoder represents an image encoder. +type Encoder interface { + Encode(img image.Image) (image.Image, error) +} + +// Encode encodes the image by the given encoder. +func (file *File) Encode(enc Encoder) error { + img, err := file.LoadImage() + if err != nil { + return fmt.Errorf("load image: %w", err) + } + + encImg, err := enc.Encode(img) + if err != nil { + return fmt.Errorf("common encode image: %w", err) + } + return file.SaveImage(encImg) +} + +// Decoder represents an image decoder. +type Decoder interface { + Decode(img image.Image) error +} + +// Decode decodes the image by the given decoder. +func (file *File) Decode(dec Decoder) error { + img, err := file.LoadImage() + if err != nil { + return err + } + if err := dec.Decode(img); err != nil { + return fmt.Errorf("common decode image: %w", err) + } + + return nil +} + +// NewFile returns a newFile File instance. +func NewFile(storage *Storage, name string) *File { + return &File{ + storage: storage, + name: name, + } +} diff --git a/pkg/storage/files/format.go b/pkg/storage/files/format.go new file mode 100644 index 00000000..5a54fc23 --- /dev/null +++ b/pkg/storage/files/format.go @@ -0,0 +1,36 @@ +package files + +import ( + "github.com/LumeraProtocol/supernode/pkg/errors" +) + +// ErrUnsupportedFormat means the given image format is not supported. +var ErrUnsupportedFormat = errors.New("imaging: unsupported image format") + +// Image file formats. +const ( + JPEG Format = iota + PNG + GIF + WEBP +) + +var formatExts = map[string]Format{ + "jpg": JPEG, + "jpeg": JPEG, + "png": PNG, + "webp": WEBP, +} + +var formatNames = map[Format]string{ + JPEG: "jpeg", + PNG: "png", + WEBP: "webp", +} + +// Format is an image file format. +type Format int + +func (f Format) String() string { + return formatNames[f] +} diff --git a/pkg/storage/files/storage.go b/pkg/storage/files/storage.go new file mode 100644 index 00000000..9ce1d4e7 --- /dev/null +++ b/pkg/storage/files/storage.go @@ -0,0 +1,82 @@ +package files + +import ( + "context" + "fmt" + "sync/atomic" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/random" + "github.com/LumeraProtocol/supernode/pkg/storage" +) + +// Storage represents a file storage. +type Storage struct { + storage.FileStorageInterface + + idCounter int64 + prefix string + filesMap map[string]*File +} + +// Run removes all files when the context is canceled. +func (storage *Storage) Run(ctx context.Context) error { + <-ctx.Done() + + var errs error + for _, file := range storage.filesMap { + if err := file.Remove(); err != nil { + errs = errors.Append(errs, err) + } + } + + return errs +} + +// NewFile returns a new File instance with a unique name. +func (storage *Storage) NewFile() *File { + id := atomic.AddInt64(&storage.idCounter, 1) + name := fmt.Sprintf("%s-%d", storage.prefix, id) + + file := NewFile(storage, name) + storage.filesMap[name] = file + + return file +} + +// File returns File by the given name. +func (storage *Storage) File(name string) (*File, error) { + file, ok := storage.filesMap[name] + if !ok { + return nil, errors.New("image not found") + } + return file, nil +} + +// Update changes the key to identify a *File to a new key +func (storage *Storage) Update(oldname, newname string, file *File) error { + f, ok := storage.filesMap[oldname] + if !ok { + return errors.New("file not found") + } + + if f != file { + return errors.New("not the same file") + } + + delete(storage.filesMap, oldname) + storage.filesMap[newname] = file + return nil +} + +// NewStorage returns a new Storage instance. +func NewStorage(storage storage.FileStorageInterface) *Storage { + prefix, _ := random.String(8, random.Base62Chars) + + return &Storage{ + FileStorageInterface: storage, + + prefix: prefix, + filesMap: make(map[string]*File), + } +} diff --git a/pkg/storage/files/storage_test.go b/pkg/storage/files/storage_test.go new file mode 100644 index 00000000..43087f95 --- /dev/null +++ b/pkg/storage/files/storage_test.go @@ -0,0 +1,37 @@ +package files + +import ( + "os" + "path/filepath" + "testing" + + "github.com/LumeraProtocol/supernode/pkg/storage/fs" + "github.com/stretchr/testify/assert" +) + +func Test_StoreFileAfterSetFormat(t *testing.T) { + storage := NewStorage(fs.NewFileStorage(os.TempDir())) + + files := []struct { + name string + format Format + }{ + {"test.jpeg", JPEG}, + {"test.jpg", JPEG}, + {"test.png", PNG}, + {"test.webp", WEBP}, + } + + for _, file := range files { + f := storage.NewFile() + assert.NotNil(t, f) + + // + err := f.SetFormatFromExtension(filepath.Ext(file.name)) + assert.Equal(t, nil, err) + assert.Equal(t, file.format, f.format) + + _, err = storage.File(f.Name()) + assert.Equal(t, nil, err) + } +} diff --git a/pkg/storage/files/thumbnail.go b/pkg/storage/files/thumbnail.go new file mode 100644 index 00000000..b747c701 --- /dev/null +++ b/pkg/storage/files/thumbnail.go @@ -0,0 +1,9 @@ +package files + +// ThumbnailCoordinate contains coordinate of region crop by user +type ThumbnailCoordinate struct { + TopLeftX int64 `json:"top_left_x"` + TopLeftY int64 `json:"top_left_y"` + BottomRightX int64 `json:"bottom_right_x"` + BottomRightY int64 `json:"bottom_right_y"` +} diff --git a/pkg/storage/fs/file.go b/pkg/storage/fs/file.go new file mode 100644 index 00000000..fe9597b8 --- /dev/null +++ b/pkg/storage/fs/file.go @@ -0,0 +1,87 @@ +package fs + +import ( + "os" + "path/filepath" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/storage" +) + +const ( + logPrefix = "storage-fs" +) + +// FS represents file system storage. +type FS struct { + dir string +} + +// Open implements storage.FileStorageInterface.Open +func (fs *FS) Open(filename string) (storage.FileInterface, error) { + filename = filepath.Join(fs.dir, filename) + + if _, err := os.Stat(filename); os.IsNotExist(err) { + return nil, storage.ErrFileNotFound + } + + file, err := os.Open(filename) + if err != nil { + return nil, errors.Errorf("open file %q: %w", filename, err) + } + return file, nil +} + +// Create implements storage.FileStorageInterface.Create +func (fs *FS) Create(filename string) (storage.FileInterface, error) { + filename = filepath.Join(fs.dir, filename) + + if _, err := os.Stat(filename); !os.IsNotExist(err) { + log.WithPrefix(logPrefix).Debugf("Rewrite file %q", filename) + } else { + log.WithPrefix(logPrefix).Debugf("Create file %q", filename) + } + + file, err := os.Create(filename) + if err != nil { + return nil, errors.Errorf("create file %q: %w", filename, err) + } + return file, nil +} + +// Remove implements storage.FileStorageInterface.Remove +func (fs *FS) Remove(filename string) error { + filename = filepath.Join(fs.dir, filename) + + log.WithPrefix(logPrefix).Debugf("Remove file %q", filename) + + if err := os.Remove(filename); err != nil { + return errors.Errorf("remove file %q: %w", filename, err) + } + return nil +} + +// Rename renames oldName to newName. +func (fs *FS) Rename(oldname, newname string) error { + if oldname == newname { + return nil + } + + oldname = filepath.Join(fs.dir, oldname) + newname = filepath.Join(fs.dir, newname) + + log.WithPrefix(logPrefix).Debugf("Rename file %q to %q", oldname, newname) + + if err := os.Rename(oldname, newname); err != nil { + return errors.Errorf("rename file %q to %q: %w", oldname, newname, err) + } + return nil +} + +// NewFileStorage returns new FS instance. Where `dir` is the path for storing files. +func NewFileStorage(dir string) storage.FileStorageInterface { + return &FS{ + dir: dir, + } +} diff --git a/pkg/storage/fs/file_test.go b/pkg/storage/fs/file_test.go new file mode 100644 index 00000000..955e590f --- /dev/null +++ b/pkg/storage/fs/file_test.go @@ -0,0 +1,168 @@ +package fs + +import ( + "fmt" + "os" + "testing" + + "github.com/LumeraProtocol/supernode/pkg/storage" + + "github.com/stretchr/testify/assert" +) + +func TestFSOpen(t *testing.T) { + t.Parallel() + + type args struct { + filename string + } + + type handleFunc func(dir string, t assert.TestingT) + + testCases := []struct { + args args + createfunc handleFunc + assertion assert.ErrorAssertionFunc + valueAssert assert.ValueAssertionFunc + }{ + { + args: args{"test.txt"}, + assertion: assert.NoError, + valueAssert: assert.NotNil, + createfunc: func(dir string, t assert.TestingT) { + fs := &FS{ + dir: dir, + } + + _, err := fs.Create("test.txt") + assert.NoError(t, err) + }, + }, { + args: args{"non-exit.txt"}, + assertion: assert.Error, + valueAssert: assert.Nil, + createfunc: func(dir string, t assert.TestingT) {}, + }, + } + + for i, testCase := range testCases { + testCase := testCase + + t.Run("group", func(t *testing.T) { + dir, _ := os.MkdirTemp("", "*") + defer os.RemoveAll(dir) + + t.Run(fmt.Sprintf("testCase-%d", i), func(t *testing.T) { + testCase.createfunc(dir, t) + fs := &FS{dir: dir} + + got, err := fs.Open(testCase.args.filename) + testCase.assertion(t, err) + testCase.valueAssert(t, got) + }) + }) + + } +} + +func TestFSCreate(t *testing.T) { + t.Parallel() + + type args struct { + filename string + } + + testCases := []struct { + args args + assertion assert.ErrorAssertionFunc + }{ + { + args: args{"test-1.txt"}, + assertion: assert.NoError, + }, + } + + t.Run("group", func(t *testing.T) { + dir, _ := os.MkdirTemp("", "*") + defer os.RemoveAll(dir) + + for i, testCase := range testCases { + testCase := testCase + + t.Run(fmt.Sprintf("testCase-%d", i), func(t *testing.T) { + fs := &FS{ + dir: dir, + } + got, err := fs.Create(testCase.args.filename) + testCase.assertion(t, err) + assert.NotNil(t, got) + assert.FileExists(t, fmt.Sprintf("%s/%s", dir, testCase.args.filename)) + }) + } + }) +} + +func TestFSRemove(t *testing.T) { + t.Parallel() + + type args struct { + filename string + } + + testCases := []struct { + args args + assertion assert.ErrorAssertionFunc + }{ + { + args: args{"test-2.txt"}, + assertion: assert.NoError, + }, + } + + t.Run("group", func(t *testing.T) { + dir, _ := os.MkdirTemp("", "*") + defer os.RemoveAll(dir) + + for i, testCase := range testCases { + testCase := testCase + + t.Run(fmt.Sprintf("testCase-%d", i), func(t *testing.T) { + fs := &FS{ + dir: dir, + } + + _, err := fs.Create(testCase.args.filename) + assert.NoError(t, err) + + testCase.assertion(t, fs.Remove(testCase.args.filename)) + }) + } + + }) +} + +func TestNewFileStorage(t *testing.T) { + t.Parallel() + + type args struct { + dir string + } + + testCases := []struct { + args args + want storage.FileStorageInterface + }{ + { + args: args{"./"}, + want: &FS{dir: "./"}, + }, + } + + for i, testCase := range testCases { + testCase := testCase + + t.Run(fmt.Sprintf("testCase-%d", i), func(t *testing.T) { + assert.Equal(t, testCase.want, NewFileStorage(testCase.args.dir)) + }) + } +} diff --git a/pkg/storage/queries/health_check.go b/pkg/storage/queries/health_check.go new file mode 100644 index 00000000..c47db8cf --- /dev/null +++ b/pkg/storage/queries/health_check.go @@ -0,0 +1,430 @@ +package queries + +import ( + "context" + "fmt" + "time" + + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/types" + "github.com/LumeraProtocol/supernode/pkg/utils/metrics" + json "github.com/json-iterator/go" +) + +type HealthCheckChallengeQueries interface { + InsertHealthCheckChallengeMessage(challenge types.HealthCheckChallengeLogMessage) error + InsertBroadcastHealthCheckMessage(challenge types.BroadcastHealthCheckLogMessage) error + QueryHCChallengeMessage(challengeID string, messageType int) (challengeMessage types.HealthCheckChallengeLogMessage, err error) + GetHealthCheckChallengeMetricsByChallengeID(challengeID string) ([]types.HealthCheckChallengeLogMessage, error) + + GetHCMetricsByChallengeIDAndMessageType(challengeID string, messageType types.HealthCheckMessageType) ([]types.HealthCheckChallengeLogMessage, error) + BatchInsertHCMetrics(metrics []types.HealthCheckChallengeLogMessage) error + HealthCheckChallengeMetrics(timestamp time.Time) ([]types.HealthCheckChallengeLogMessage, error) + InsertHealthCheckChallengeMetric(metric types.HealthCheckChallengeMetric) error + GetHCSummaryStats(from time.Time) (hcMetrics metrics.HCMetrics, err error) + GetTotalHCGeneratedAndProcessedAndEvaluated(from time.Time) (metrics.HCMetrics, error) + GetMetricsDataByHealthCheckChallengeID(ctx context.Context, challengeID string) ([]types.HealthCheckMessage, error) + GetLastNHCMetrics() ([]types.NHcMetric, error) + + GetDistinctHCChallengeIDsCountForScoreAggregation(after, before time.Time) (int, error) + GetDistinctHCChallengeIDs(after, before time.Time, batchNumber int) ([]string, error) +} + +// GetTotalHCGeneratedAndProcessedAndEvaluated retrieves the total health-check challenges generated/processed/evaluated +func (s *SQLiteStore) GetTotalHCGeneratedAndProcessedAndEvaluated(from time.Time) (metrics.HCMetrics, error) { + metrics := metrics.HCMetrics{} + + // Query for total number of challenges + totalChallengeQuery := "SELECT COUNT(DISTINCT challenge_id) FROM healthcheck_challenge_metrics WHERE message_type = 1 AND created_at > ?" + err := s.db.QueryRow(totalChallengeQuery, from).Scan(&metrics.TotalChallenges) + if err != nil { + return metrics, err + } + + // Query for total challenges responded + totalChallengesProcessedQuery := "SELECT COUNT(DISTINCT challenge_id) FROM healthcheck_challenge_metrics WHERE message_type = 2 AND created_at > ?" + err = s.db.QueryRow(totalChallengesProcessedQuery, from).Scan(&metrics.TotalChallengesProcessed) + if err != nil { + return metrics, err + } + + totalChallengesEvaluatedQuery := "SELECT COUNT(DISTINCT challenge_id) FROM healthcheck_challenge_metrics WHERE message_type = 3 AND created_at > ?" + err = s.db.QueryRow(totalChallengesEvaluatedQuery, from).Scan(&metrics.TotalChallengesEvaluatedByChallenger) + if err != nil { + return metrics, err + } + + return metrics, nil +} + +// GetHCObserversEvaluations retrieves the observer's evaluations +func (s *SQLiteStore) GetHCObserversEvaluations(from time.Time) ([]types.HealthCheckChallengeLogMessage, error) { + var messages []types.HealthCheckChallengeLogMessage + + query := "SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at FROM healthcheck_challenge_metrics WHERE message_type = 4 and created_at > ?" + rows, err := s.db.Query(query, from) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var msg types.HealthCheckChallengeLogMessage + err := rows.Scan(&msg.ID, &msg.ChallengeID, &msg.MessageType, &msg.Data, &msg.Sender, &msg.CreatedAt, &msg.UpdatedAt) + if err != nil { + return nil, err + } + messages = append(messages, msg) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return messages, nil +} + +// GetHCSummaryStats get health-check summary stats +func (s *SQLiteStore) GetHCSummaryStats(from time.Time) (hcMetrics metrics.HCMetrics, err error) { + hcStats := metrics.HCMetrics{} + hcMetrics, err = s.GetTotalHCGeneratedAndProcessedAndEvaluated(from) + if err != nil { + return hcMetrics, err + } + hcStats.TotalChallenges = hcMetrics.TotalChallenges + hcStats.TotalChallengesProcessed = hcMetrics.TotalChallengesProcessed + hcStats.TotalChallengesEvaluatedByChallenger = hcMetrics.TotalChallengesEvaluatedByChallenger + + hcObserversEvaluations, err := s.GetHCObserversEvaluations(from) + if err != nil { + return hcMetrics, err + } + log.WithField("observer_evaluations", len(hcObserversEvaluations)).Info("observer evaluations retrieved") + + observerEvaluationMetrics := processHCObserverEvaluations(hcObserversEvaluations) + log.WithField("observer_evaluation_metrics", len(observerEvaluationMetrics)).Info("observer evaluation metrics retrieved") + + for _, obMetrics := range observerEvaluationMetrics { + if obMetrics.ChallengesVerified >= 3 { + hcMetrics.TotalChallengesVerified++ + } else { + if obMetrics.FailedByInvalidTimestamps > 0 { + hcMetrics.SlowResponsesObservedByObservers++ + } + if obMetrics.FailedByInvalidSignatures > 0 { + hcMetrics.InvalidSignaturesObservedByObservers++ + } + if obMetrics.FailedByInvalidEvaluation > 0 { + hcMetrics.InvalidEvaluationObservedByObservers++ + } + } + } + + return hcMetrics, nil +} + +// GetHealthCheckChallengeMetricsByChallengeID gets the health-check challenge by ID +func (s *SQLiteStore) GetHealthCheckChallengeMetricsByChallengeID(challengeID string) ([]types.HealthCheckChallengeLogMessage, error) { + const query = ` + SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at + FROM healthcheck_challenge_metrics + WHERE challenge_id = ?;` + + rows, err := s.db.Query(query, challengeID) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.HealthCheckChallengeLogMessage + for rows.Next() { + var m types.HealthCheckChallengeLogMessage + err := rows.Scan(&m.ID, &m.ChallengeID, &m.MessageType, &m.Data, &m.Sender, &m.CreatedAt, &m.UpdatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// GetMetricsDataByHealthCheckChallengeID gets the metrics data by health-check challenge id +func (s *SQLiteStore) GetMetricsDataByHealthCheckChallengeID(ctx context.Context, challengeID string) (healthCheckChallengeMessages []types.HealthCheckMessage, err error) { + hcMetrics, err := s.GetHealthCheckChallengeMetricsByChallengeID(challengeID) + if err != nil { + return healthCheckChallengeMessages, err + } + log.WithContext(ctx).WithField("rows", len(hcMetrics)).Info("health-check-challenge metrics row count") + + for _, hcMetric := range hcMetrics { + msg := types.HealthCheckMessageData{} + if err := json.Unmarshal(hcMetric.Data, &msg); err != nil { + return healthCheckChallengeMessages, fmt.Errorf("cannot unmarshal health check challenge data: %w", err) + } + + healthCheckChallengeMessages = append(healthCheckChallengeMessages, types.HealthCheckMessage{ + ChallengeID: hcMetric.ChallengeID, + MessageType: types.HealthCheckMessageType(hcMetric.MessageType), + Sender: hcMetric.Sender, + SenderSignature: hcMetric.SenderSignature, + Data: msg, + }) + } + + return healthCheckChallengeMessages, nil +} + +// InsertHealthCheckChallengeMessage inserts failed healthcheck challenge to db +func (s *SQLiteStore) InsertHealthCheckChallengeMessage(challenge types.HealthCheckChallengeLogMessage) error { + now := time.Now().UTC() + const insertQuery = "INSERT INTO healthcheck_challenge_messages(id, challenge_id, message_type, data, sender_id, sender_signature, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?,?);" + _, err := s.db.Exec(insertQuery, challenge.ChallengeID, challenge.MessageType, challenge.Data, challenge.Sender, challenge.SenderSignature, now, now) + + if err != nil { + return err + } + + return nil +} + +// InsertHealthCheckChallengeMetric inserts the health-check challenge metrics +func (s *SQLiteStore) InsertHealthCheckChallengeMetric(m types.HealthCheckChallengeMetric) error { + now := time.Now().UTC() + + const metricsQuery = "INSERT INTO healthcheck_challenge_metrics(id, challenge_id, message_type, data, sender_id, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?) ON CONFLICT DO NOTHING;" + _, err := s.db.Exec(metricsQuery, m.ChallengeID, m.MessageType, m.Data, m.SenderID, now, now) + if err != nil { + return err + } + + return nil +} + +// BatchInsertHCMetrics inserts the health-check challenges in a batch +func (s *SQLiteStore) BatchInsertHCMetrics(metrics []types.HealthCheckChallengeLogMessage) error { + tx, err := s.db.Begin() + if err != nil { + return err + } + + stmt, err := tx.Prepare(` + INSERT OR IGNORE INTO healthcheck_challenge_metrics + (id, challenge_id, message_type, data, sender_id, created_at, updated_at) + VALUES (NULL,?,?,?,?,?,?) + `) + if err != nil { + tx.Rollback() + return err + } + defer stmt.Close() + + for _, metric := range metrics { + now := time.Now().UTC() + + _, err = stmt.Exec(metric.ChallengeID, metric.MessageType, metric.Data, metric.Sender, now, now) + if err != nil { + tx.Rollback() + return err + } + } + + // Commit the transaction + return tx.Commit() +} + +// HealthCheckChallengeMetrics retrieves all the metrics needs to be broadcast +func (s *SQLiteStore) HealthCheckChallengeMetrics(timestamp time.Time) ([]types.HealthCheckChallengeLogMessage, error) { + const query = ` + SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at + FROM healthcheck_challenge_metrics + WHERE created_at > ? + ` + + rows, err := s.db.Query(query, timestamp) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.HealthCheckChallengeLogMessage + for rows.Next() { + var m types.HealthCheckChallengeLogMessage + err := rows.Scan(&m.ID, &m.ChallengeID, &m.MessageType, &m.Data, &m.Sender, &m.CreatedAt, &m.UpdatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// InsertBroadcastHealthCheckMessage inserts healthcheck healthcheck challenge msg to db +func (s *SQLiteStore) InsertBroadcastHealthCheckMessage(challenge types.BroadcastHealthCheckLogMessage) error { + now := time.Now().UTC() + const insertQuery = "INSERT INTO broadcast_healthcheck_challenge_messages(id, challenge_id, data, challenger, recipient, observers, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?,?);" + _, err := s.db.Exec(insertQuery, challenge.ChallengeID, challenge.Data, challenge.Challenger, challenge.Recipient, challenge.Observers, now, now) + if err != nil { + return err + } + + return nil +} + +// QueryHCChallengeMessage retrieves healthcheck challenge message against challengeID and messageType +func (s *SQLiteStore) QueryHCChallengeMessage(challengeID string, messageType int) (challengeMessage types.HealthCheckChallengeLogMessage, err error) { + const selectQuery = "SELECT * FROM healthcheck_challenge_messages WHERE challenge_id=? AND message_type=?" + err = s.db.QueryRow(selectQuery, challengeID, messageType).Scan( + &challengeMessage.ID, &challengeMessage.ChallengeID, &challengeMessage.MessageType, &challengeMessage.Data, + &challengeMessage.Sender, &challengeMessage.SenderSignature, &challengeMessage.CreatedAt, &challengeMessage.UpdatedAt) + + if err != nil { + return challengeMessage, err + } + + return challengeMessage, nil +} + +// GetHCMetricsByChallengeIDAndMessageType retrieves all the metrics by challengeID and messageType +func (s *SQLiteStore) GetHCMetricsByChallengeIDAndMessageType(challengeID string, messageType types.HealthCheckMessageType) ([]types.HealthCheckChallengeLogMessage, error) { + const query = ` + SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at + FROM healthcheck_challenge_metrics + WHERE challenge_id = ? + AND message_type = ?;` + + rows, err := s.db.Query(query, challengeID, int(messageType)) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.HealthCheckChallengeLogMessage + for rows.Next() { + var m types.HealthCheckChallengeLogMessage + err := rows.Scan(&m.ID, &m.ChallengeID, &m.MessageType, &m.Data, &m.Sender, &m.CreatedAt, &m.UpdatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +func processHCObserverEvaluations(observersEvaluations []types.HealthCheckChallengeLogMessage) map[string]HCObserverEvaluationMetrics { + evaluationMap := make(map[string]HCObserverEvaluationMetrics) + + for _, observerEvaluation := range observersEvaluations { + var oe types.HealthCheckMessageData + if err := json.Unmarshal(observerEvaluation.Data, &oe); err != nil { + continue + } + + oem, exists := evaluationMap[observerEvaluation.ChallengeID] + if !exists { + oem = HCObserverEvaluationMetrics{} // Initialize if not exists + } + + if isHCObserverEvaluationVerified(oe.ObserverEvaluation) { + oem.ChallengesVerified++ + } else { + if !oe.ObserverEvaluation.IsChallengeTimestampOK || + !oe.ObserverEvaluation.IsProcessTimestampOK || + !oe.ObserverEvaluation.IsEvaluationTimestampOK { + oem.FailedByInvalidTimestamps++ + } + + if !oe.ObserverEvaluation.IsChallengerSignatureOK || + !oe.ObserverEvaluation.IsRecipientSignatureOK { + oem.FailedByInvalidSignatures++ + } + + if !oe.ObserverEvaluation.IsEvaluationResultOK { + oem.FailedByInvalidEvaluation++ + } + } + + evaluationMap[observerEvaluation.ChallengeID] = oem + } + + return evaluationMap +} + +func isHCObserverEvaluationVerified(observerEvaluation types.HealthCheckObserverEvaluationData) bool { + if !observerEvaluation.IsEvaluationResultOK { + return false + } + + if !observerEvaluation.IsChallengerSignatureOK { + return false + } + + if !observerEvaluation.IsRecipientSignatureOK { + return false + } + + if !observerEvaluation.IsChallengeTimestampOK { + return false + } + + if !observerEvaluation.IsProcessTimestampOK { + return false + } + + if !observerEvaluation.IsEvaluationTimestampOK { + return false + } + + return true +} + +// GetDistinctHCChallengeIDsCountForScoreAggregation gets the count of distinct challenge ids for score aggregation +func (s *SQLiteStore) GetDistinctHCChallengeIDsCountForScoreAggregation(after, before time.Time) (int, error) { + query := ` + SELECT COUNT(DISTINCT challenge_id) + FROM healthcheck_challenge_metrics + WHERE message_type = 4 AND created_at >= ? AND created_at < ? + ` + + var challengeIDsCount int + err := s.db.QueryRow(query, after, before).Scan(&challengeIDsCount) + if err != nil { + return 0, err + } + + return challengeIDsCount, nil +} + +// GetDistinctHCChallengeIDs retrieves the distinct challenge ids for score aggregation +func (s *SQLiteStore) GetDistinctHCChallengeIDs(after, before time.Time, batchNumber int) ([]string, error) { + offset := batchNumber * batchSizeForChallengeIDsRetrieval + + query := ` + SELECT DISTINCT challenge_id + FROM healthcheck_challenge_metrics + WHERE message_type = 4 AND created_at >= ? AND created_at < ? + LIMIT ? OFFSET ? + ` + + rows, err := s.db.Query(query, after, before, batchSizeForChallengeIDsRetrieval, offset) + if err != nil { + return nil, err + } + defer rows.Close() + + var challengeIDs []string + for rows.Next() { + var challengeID string + if err := rows.Scan(&challengeID); err != nil { + return nil, err + } + challengeIDs = append(challengeIDs, challengeID) + } + + if err = rows.Err(); err != nil { + return nil, err + } + + return challengeIDs, nil +} diff --git a/pkg/storage/queries/local.go b/pkg/storage/queries/local.go new file mode 100644 index 00000000..e677de76 --- /dev/null +++ b/pkg/storage/queries/local.go @@ -0,0 +1,16 @@ +package queries + +import ( + "context" +) + +// LocalStoreInterface is interface for queries sqlite store +type LocalStoreInterface interface { + CloseHistoryDB(ctx context.Context) + + TaskHistoryQueries + SelfHealingQueries + StorageChallengeQueries + PingHistoryQueries + HealthCheckChallengeQueries +} diff --git a/pkg/storage/queries/ping_history.go b/pkg/storage/queries/ping_history.go new file mode 100644 index 00000000..84bfc6e0 --- /dev/null +++ b/pkg/storage/queries/ping_history.go @@ -0,0 +1,294 @@ +package queries + +import ( + "time" + + "github.com/LumeraProtocol/supernode/pkg/types" +) + +type PingHistoryQueries interface { + UpsertPingHistory(pingInfo types.PingInfo) error + GetPingInfoBySupernodeID(supernodeID string) (*types.PingInfo, error) + GetAllPingInfos() (types.PingInfos, error) + GetWatchlistPingInfo() ([]types.PingInfo, error) + GetAllPingInfoForOnlineNodes() (types.PingInfos, error) + UpdatePingInfo(supernodeID string, isOnWatchlist, isAdjusted bool) error + + UpdateSCMetricsBroadcastTimestamp(nodeID string, broadcastAt time.Time) error + UpdateMetricsBroadcastTimestamp(nodeID string) error + UpdateGenerationMetricsBroadcastTimestamp(nodeID string) error + UpdateExecutionMetricsBroadcastTimestamp(nodeID string) error + UpdateHCMetricsBroadcastTimestamp(nodeID string, broadcastAt time.Time) error +} + +// UpsertPingHistory inserts/update ping information into the ping_history table +func (s *SQLiteStore) UpsertPingHistory(pingInfo types.PingInfo) error { + now := time.Now().UTC() + + const upsertQuery = ` + INSERT INTO ping_history ( + supernode_id, ip_address, total_pings, total_successful_pings, + avg_ping_response_time, is_online, is_on_watchlist, is_adjusted, last_seen, cumulative_response_time, + created_at, updated_at + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + ON CONFLICT(supernode_id) + DO UPDATE SET + total_pings = excluded.total_pings, + total_successful_pings = excluded.total_successful_pings, + avg_ping_response_time = excluded.avg_ping_response_time, + is_online = excluded.is_online, + is_on_watchlist = excluded.is_on_watchlist, + is_adjusted = excluded.is_adjusted, + last_seen = excluded.last_seen, + cumulative_response_time = excluded.cumulative_response_time, + updated_at = excluded.updated_at;` + + _, err := s.db.Exec(upsertQuery, + pingInfo.SupernodeID, pingInfo.IPAddress, pingInfo.TotalPings, + pingInfo.TotalSuccessfulPings, pingInfo.AvgPingResponseTime, + pingInfo.IsOnline, pingInfo.IsOnWatchlist, pingInfo.IsAdjusted, pingInfo.LastSeen.Time, pingInfo.CumulativeResponseTime, now, now) + if err != nil { + return err + } + + return nil +} + +// GetPingInfoBySupernodeID retrieves a ping history record by supernode ID +func (s *SQLiteStore) GetPingInfoBySupernodeID(supernodeID string) (*types.PingInfo, error) { + const selectQuery = ` + SELECT id, supernode_id, ip_address, total_pings, total_successful_pings, + avg_ping_response_time, is_online, is_on_watchlist, is_adjusted, last_seen, cumulative_response_time, + created_at, updated_at + FROM ping_history + WHERE supernode_id = ?;` + + var pingInfo types.PingInfo + row := s.db.QueryRow(selectQuery, supernodeID) + + // Scan the row into the PingInfo struct + err := row.Scan( + &pingInfo.ID, &pingInfo.SupernodeID, &pingInfo.IPAddress, &pingInfo.TotalPings, + &pingInfo.TotalSuccessfulPings, &pingInfo.AvgPingResponseTime, + &pingInfo.IsOnline, &pingInfo.IsOnWatchlist, &pingInfo.IsAdjusted, &pingInfo.LastSeen, &pingInfo.CumulativeResponseTime, + &pingInfo.CreatedAt, &pingInfo.UpdatedAt, + ) + + if err != nil { + return nil, err + } + + return &pingInfo, nil +} + +// GetWatchlistPingInfo retrieves all the nodes that are on watchlist +func (s *SQLiteStore) GetWatchlistPingInfo() ([]types.PingInfo, error) { + const selectQuery = ` + SELECT id, supernode_id, ip_address, total_pings, total_successful_pings, + avg_ping_response_time, is_online, is_on_watchlist, is_adjusted, last_seen, cumulative_response_time, + created_at, updated_at + FROM ping_history + WHERE is_on_watchlist = true AND is_adjusted = false;` + + rows, err := s.db.Query(selectQuery) + if err != nil { + return nil, err + } + defer rows.Close() + + var pingInfos types.PingInfos + for rows.Next() { + var pingInfo types.PingInfo + if err := rows.Scan( + &pingInfo.ID, &pingInfo.SupernodeID, &pingInfo.IPAddress, &pingInfo.TotalPings, + &pingInfo.TotalSuccessfulPings, &pingInfo.AvgPingResponseTime, + &pingInfo.IsOnline, &pingInfo.IsOnWatchlist, &pingInfo.IsAdjusted, &pingInfo.LastSeen, &pingInfo.CumulativeResponseTime, + &pingInfo.CreatedAt, &pingInfo.UpdatedAt, + ); err != nil { + return nil, err + } + pingInfos = append(pingInfos, pingInfo) + } + + if err = rows.Err(); err != nil { + return nil, err + } + + return pingInfos, nil +} + +// UpdatePingInfo updates the ping info +func (s *SQLiteStore) UpdatePingInfo(supernodeID string, isOnWatchlist, isAdjusted bool) error { + // Update query + const updateQuery = ` +UPDATE ping_history +SET is_adjusted = ?, is_on_watchlist = ? +WHERE supernode_id = ?;` + + // Execute the update query + _, err := s.db.Exec(updateQuery, isAdjusted, isOnWatchlist, supernodeID) + if err != nil { + return err + } + + return nil +} + +// UpdateMetricsBroadcastTimestamp updates the ping info metrics_last_broadcast_at +func (s *SQLiteStore) UpdateMetricsBroadcastTimestamp(nodeID string) error { + // Update query + const updateQuery = ` +UPDATE ping_history +SET metrics_last_broadcast_at = ? +WHERE supernode_id = ?;` + + // Execute the update query + _, err := s.db.Exec(updateQuery, time.Now().UTC(), nodeID) + if err != nil { + return err + } + + return nil +} + +// UpdateGenerationMetricsBroadcastTimestamp updates the ping info generation_metrics_last_broadcast_at +func (s *SQLiteStore) UpdateGenerationMetricsBroadcastTimestamp(nodeID string) error { + // Update query + const updateQuery = ` +UPDATE ping_history +SET generation_metrics_last_broadcast_at = ? +WHERE supernode_id = ?;` + + // Execute the update query + _, err := s.db.Exec(updateQuery, time.Now().Add(-180*time.Minute).UTC(), nodeID) + if err != nil { + return err + } + + return nil +} + +// UpdateExecutionMetricsBroadcastTimestamp updates the ping info execution_metrics_last_broadcast_at +func (s *SQLiteStore) UpdateExecutionMetricsBroadcastTimestamp(nodeID string) error { + // Update query + const updateQuery = ` +UPDATE ping_history +SET execution_metrics_last_broadcast_at = ? +WHERE supernode_id = ?;` + + // Execute the update query + _, err := s.db.Exec(updateQuery, time.Now().Add(-180*time.Minute).UTC(), nodeID) + if err != nil { + return err + } + + return nil +} + +// UpdateSCMetricsBroadcastTimestamp updates the SC metrics last broadcast at timestamp +func (s *SQLiteStore) UpdateSCMetricsBroadcastTimestamp(nodeID string, updatedAt time.Time) error { + // Update query + const updateQuery = ` +UPDATE ping_history +SET metrics_last_broadcast_at = ? +WHERE supernode_id = ?;` + + // Execute the update query + _, err := s.db.Exec(updateQuery, time.Now().UTC().Add(-180*time.Minute), nodeID) + if err != nil { + return err + } + + return nil +} + +// GetAllPingInfos retrieves all ping infos +func (s *SQLiteStore) GetAllPingInfos() (types.PingInfos, error) { + const selectQuery = ` + SELECT id, supernode_id, ip_address, total_pings, total_successful_pings, + avg_ping_response_time, is_online, is_on_watchlist, is_adjusted, last_seen, cumulative_response_time, + created_at, updated_at + FROM ping_history + ` + rows, err := s.db.Query(selectQuery) + if err != nil { + return nil, err + } + defer rows.Close() + + var pingInfos types.PingInfos + for rows.Next() { + + var pingInfo types.PingInfo + if err := rows.Scan( + &pingInfo.ID, &pingInfo.SupernodeID, &pingInfo.IPAddress, &pingInfo.TotalPings, + &pingInfo.TotalSuccessfulPings, &pingInfo.AvgPingResponseTime, + &pingInfo.IsOnline, &pingInfo.IsOnWatchlist, &pingInfo.IsAdjusted, &pingInfo.LastSeen, &pingInfo.CumulativeResponseTime, + &pingInfo.CreatedAt, &pingInfo.UpdatedAt, + ); err != nil { + return nil, err + } + pingInfos = append(pingInfos, pingInfo) + } + + if err = rows.Err(); err != nil { + return nil, err + } + + return pingInfos, nil +} + +// GetAllPingInfoForOnlineNodes retrieves all ping infos for nodes that are online +func (s *SQLiteStore) GetAllPingInfoForOnlineNodes() (types.PingInfos, error) { + const selectQuery = ` + SELECT id, supernode_id, ip_address, total_pings, total_successful_pings, + avg_ping_response_time, is_online, is_on_watchlist, is_adjusted, last_seen, cumulative_response_time, + metrics_last_broadcast_at, generation_metrics_last_broadcast_at, execution_metrics_last_broadcast_at, + created_at, updated_at + FROM ping_history + WHERE is_online = true` + rows, err := s.db.Query(selectQuery) + if err != nil { + return nil, err + } + defer rows.Close() + + var pingInfos types.PingInfos + for rows.Next() { + + var pingInfo types.PingInfo + if err := rows.Scan( + &pingInfo.ID, &pingInfo.SupernodeID, &pingInfo.IPAddress, &pingInfo.TotalPings, + &pingInfo.TotalSuccessfulPings, &pingInfo.AvgPingResponseTime, + &pingInfo.IsOnline, &pingInfo.IsOnWatchlist, &pingInfo.IsAdjusted, &pingInfo.LastSeen, &pingInfo.CumulativeResponseTime, + &pingInfo.MetricsLastBroadcastAt, &pingInfo.GenerationMetricsLastBroadcastAt, &pingInfo.ExecutionMetricsLastBroadcastAt, + &pingInfo.CreatedAt, &pingInfo.UpdatedAt, + ); err != nil { + return nil, err + } + pingInfos = append(pingInfos, pingInfo) + } + + if err = rows.Err(); err != nil { + return nil, err + } + + return pingInfos, nil +} + +// UpdateHCMetricsBroadcastTimestamp updates health-check challenges last broadcast at +func (s *SQLiteStore) UpdateHCMetricsBroadcastTimestamp(nodeID string, updatedAt time.Time) error { + // Update query + const updateQuery = ` +UPDATE ping_history +SET health_check_metrics_last_broadcast_at = ? +WHERE supernode_id = ?;` + + // Execute the update query + _, err := s.db.Exec(updateQuery, time.Now().UTC().Add(-180*time.Minute), nodeID) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/storage/queries/self_healing.go b/pkg/storage/queries/self_healing.go new file mode 100644 index 00000000..5a4731f3 --- /dev/null +++ b/pkg/storage/queries/self_healing.go @@ -0,0 +1,644 @@ +package queries + +import ( + "context" + "fmt" + "time" + + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/types" + "github.com/LumeraProtocol/supernode/pkg/utils/metrics" + json "github.com/json-iterator/go" +) + +type SelfHealingQueries interface { + BatchInsertSelfHealingChallengeEvents(ctx context.Context, event []types.SelfHealingChallengeEvent) error + UpdateSHChallengeEventProcessed(challengeID string, isProcessed bool) error + GetSelfHealingChallengeEvents() ([]types.SelfHealingChallengeEvent, error) + CleanupSelfHealingChallenges() (err error) + QuerySelfHealingChallenges() (challenges []types.SelfHealingChallenge, err error) + + QueryMetrics(ctx context.Context, from time.Time, to *time.Time) (m metrics.Metrics, err error) + InsertSelfHealingGenerationMetrics(metrics types.SelfHealingGenerationMetric) error + InsertSelfHealingExecutionMetrics(metrics types.SelfHealingExecutionMetric) error + BatchInsertExecutionMetrics(metrics []types.SelfHealingExecutionMetric) error + GetSelfHealingGenerationMetrics(timestamp time.Time) ([]types.SelfHealingGenerationMetric, error) + GetSelfHealingExecutionMetrics(timestamp time.Time) ([]types.SelfHealingExecutionMetric, error) + GetLastNSHChallenges(ctx context.Context, n int) (types.SelfHealingReports, error) + GetSHChallengeReport(ctx context.Context, challengeID string) (types.SelfHealingReports, error) + GetSHExecutionMetrics(ctx context.Context, from time.Time) (metrics.SHExecutionMetrics, error) +} + +var ( + oneYearAgo = time.Now().AddDate(-1, 0, 0) +) + +// SHChallengeMetric represents the self-healing challenge metric +type SHChallengeMetric struct { + ChallengeID string + + // healer node + IsAck bool + IsAccepted bool + IsRejected bool + + // verifier nodes + HasMinVerifications bool + IsVerified bool + IsReconstructionRequiredVerified bool + IsReconstructionNotRequiredVerified bool + IsUnverified bool + IsReconstructionRequiredNotVerified bool + IsReconstructionNotRequiredNotVerified bool + IsReconstructionRequiredHashMismatch bool + + IsHealed bool +} + +type HCObserverEvaluationMetrics struct { + ChallengesVerified int + FailedByInvalidTimestamps int + FailedByInvalidSignatures int + FailedByInvalidEvaluation int +} + +type ObserverEvaluationMetrics struct { + ChallengesVerified int + FailedByInvalidTimestamps int + FailedByInvalidSignatures int + FailedByInvalidEvaluation int +} + +// InsertSelfHealingGenerationMetrics inserts self-healing generation metrics +func (s *SQLiteStore) InsertSelfHealingGenerationMetrics(metrics types.SelfHealingGenerationMetric) error { + now := time.Now().UTC() + const insertQuery = "INSERT INTO self_healing_generation_metrics(id, trigger_id, message_type, data, sender_id, sender_signature, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?,?) ON CONFLICT DO NOTHING;" + _, err := s.db.Exec(insertQuery, metrics.TriggerID, metrics.MessageType, metrics.Data, metrics.SenderID, metrics.SenderSignature, now, now) + if err != nil { + return err + } + + return nil +} + +// InsertSelfHealingExecutionMetrics inserts self-healing execution metrics +func (s *SQLiteStore) InsertSelfHealingExecutionMetrics(metrics types.SelfHealingExecutionMetric) error { + now := time.Now().UTC() + const insertQuery = "INSERT INTO self_healing_execution_metrics(id, trigger_id, challenge_id, message_type, data, sender_id, sender_signature, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?,?,?) ON CONFLICT DO NOTHING;" + + _, err := s.db.Exec(insertQuery, metrics.TriggerID, metrics.ChallengeID, metrics.MessageType, metrics.Data, metrics.SenderID, metrics.SenderSignature, now, now) + if err != nil { + return err + } + + return nil +} + +// BatchInsertExecutionMetrics inserts execution metrics in a batch +func (s *SQLiteStore) BatchInsertExecutionMetrics(metrics []types.SelfHealingExecutionMetric) error { + tx, err := s.db.Begin() + if err != nil { + return err + } + + stmt, err := tx.Prepare(` + INSERT OR IGNORE INTO self_healing_execution_metrics + (id, trigger_id, challenge_id, message_type, data, sender_id, sender_signature, created_at, updated_at) + VALUES (NULL,?,?,?,?,?,?,?,?) + `) + if err != nil { + tx.Rollback() + return err + } + defer stmt.Close() + + for _, metric := range metrics { + now := time.Now().UTC() + + _, err = stmt.Exec(metric.TriggerID, metric.ChallengeID, metric.MessageType, metric.Data, metric.SenderID, metric.SenderSignature, now, now) + if err != nil { + tx.Rollback() + return err + } + } + + // Commit the transaction + return tx.Commit() +} + +// GetSelfHealingExecutionMetrics retrieves all self_healing_execution_metrics records created after the specified timestamp. +func (s *SQLiteStore) GetSelfHealingExecutionMetrics(timestamp time.Time) ([]types.SelfHealingExecutionMetric, error) { + const query = ` + SELECT id, trigger_id, challenge_id, message_type, data, sender_id, sender_signature, created_at, updated_at + FROM self_healing_execution_metrics + WHERE created_at > ? + ` + + rows, err := s.db.Query(query, timestamp) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.SelfHealingExecutionMetric + for rows.Next() { + var m types.SelfHealingExecutionMetric + if err := rows.Scan(&m.ID, &m.TriggerID, &m.ChallengeID, &m.MessageType, &m.Data, &m.SenderID, &m.SenderSignature, &m.CreatedAt, &m.UpdatedAt); err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// GetSelfHealingGenerationMetrics retrieves all self_healing_generation_metrics records created after the specified timestamp. +func (s *SQLiteStore) GetSelfHealingGenerationMetrics(timestamp time.Time) ([]types.SelfHealingGenerationMetric, error) { + const query = ` + SELECT id, trigger_id, message_type, data, sender_id, sender_signature, created_at, updated_at + FROM self_healing_generation_metrics + WHERE created_at > ? + ` + + rows, err := s.db.Query(query, timestamp) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.SelfHealingGenerationMetric + for rows.Next() { + var m types.SelfHealingGenerationMetric + if err := rows.Scan(&m.ID, &m.TriggerID, &m.MessageType, &m.Data, &m.SenderID, &m.SenderSignature, &m.CreatedAt, &m.UpdatedAt); err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// GetLastNSCMetrics gets the N number of latest challenge IDs from the DB +func (s *SQLiteStore) GetLastNSCMetrics() ([]types.NScMetric, error) { + const query = ` +SELECT + count(*) AS count, + challenge_id, + MAX(created_at) AS most_recent +FROM + storage_challenge_metrics +GROUP BY + challenge_id +HAVING + count(*) > 5 +ORDER BY + most_recent DESC +LIMIT 20;` + + rows, err := s.db.Query(query) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.NScMetric + for rows.Next() { + var m types.NScMetric + err := rows.Scan(&m.Count, &m.ChallengeID, &m.CreatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// GetLastNHCMetrics gets the N number of latest health-check challenge IDs from the DB +func (s *SQLiteStore) GetLastNHCMetrics() ([]types.NHcMetric, error) { + const query = ` +SELECT + count(*) AS count, + challenge_id, + MAX(created_at) AS most_recent +FROM + healthcheck_challenge_metrics +GROUP BY + challenge_id +HAVING + count(*) > 5 +ORDER BY + most_recent DESC +LIMIT 20;` + + rows, err := s.db.Query(query) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.NHcMetric + for rows.Next() { + var m types.NHcMetric + err := rows.Scan(&m.Count, &m.ChallengeID, &m.CreatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// GetSHExecutionMetrics retrieves self-healing execution metrics +func (s *SQLiteStore) GetSHExecutionMetrics(ctx context.Context, from time.Time) (metrics.SHExecutionMetrics, error) { + m := metrics.SHExecutionMetrics{} + rows, err := s.GetSelfHealingExecutionMetrics(from) + if err != nil { + return m, err + } + log.WithContext(ctx).WithField("rows", len(rows)).Info("self-healing execution metrics row count") + + challenges := make(map[string]SHChallengeMetric) + for _, row := range rows { + if _, ok := challenges[row.ChallengeID]; !ok { + challenges[row.ChallengeID] = SHChallengeMetric{ + ChallengeID: row.ChallengeID, + } + } + + if row.MessageType == int(types.SelfHealingVerificationMessage) { + messages := types.SelfHealingMessages{} + if err := json.Unmarshal(row.Data, &messages); err != nil { + return m, fmt.Errorf("cannot unmarshal self healing execution message type 3: %w - row ID: %d", err, row.ID) + } + + if len(messages) >= minVerifications { + ch := challenges[row.ChallengeID] + ch.HasMinVerifications = true + challenges[row.ChallengeID] = ch + } + + reconReqVerified := 0 + reconNotReqVerified := 0 + reconReqUnverified := 0 + reconNotReqUnverified := 0 + reconReqHashMismatch := 0 + + for _, message := range messages { + if message.SelfHealingMessageData.Verification.VerifiedTicket.IsReconstructionRequired { + if message.SelfHealingMessageData.Verification.VerifiedTicket.IsReconstructionRequiredByHealer { + if message.SelfHealingMessageData.Verification.VerifiedTicket.IsVerified { + reconReqVerified++ + } else { + reconReqHashMismatch++ + } + } else { + reconNotReqUnverified++ + } + } else { + if message.SelfHealingMessageData.Verification.VerifiedTicket.IsReconstructionRequiredByHealer { + reconReqUnverified++ + } else { + reconNotReqVerified++ + } + } + } + + if reconReqVerified >= minVerifications { + ch := challenges[row.ChallengeID] + ch.IsVerified = true + ch.IsReconstructionRequiredVerified = true + challenges[row.ChallengeID] = ch + } else if reconNotReqVerified >= minVerifications { + ch := challenges[row.ChallengeID] + ch.IsVerified = true + ch.IsReconstructionNotRequiredVerified = true + challenges[row.ChallengeID] = ch + } else if reconReqUnverified >= minVerifications { + ch := challenges[row.ChallengeID] + ch.IsUnverified = true + ch.IsReconstructionRequiredNotVerified = true + challenges[row.ChallengeID] = ch + } else if reconNotReqUnverified >= minVerifications { + ch := challenges[row.ChallengeID] + ch.IsUnverified = true + ch.IsReconstructionNotRequiredNotVerified = true + challenges[row.ChallengeID] = ch + } else if reconReqHashMismatch >= minVerifications { + ch := challenges[row.ChallengeID] + ch.IsReconstructionRequiredHashMismatch = true + challenges[row.ChallengeID] = ch + } + + } else if row.MessageType == int(types.SelfHealingResponseMessage) { + messages := types.SelfHealingMessages{} + if err := json.Unmarshal(row.Data, &messages); err != nil { + return m, fmt.Errorf("cannot unmarshal self healing execution message type 3: %w - row ID: %d", err, row.ID) + } + if len(messages) == 0 { + return m, fmt.Errorf("len of selfhealing messages should not be 0 - problem with row ID %d", row.ID) + } + + data := messages[0].SelfHealingMessageData + + ch := challenges[row.ChallengeID] + if data.Response.RespondedTicket.IsReconstructionRequired { + ch.IsAccepted = true + } else { + ch.IsRejected = true + } + challenges[row.ChallengeID] = ch + + } else if row.MessageType == int(types.SelfHealingCompletionMessage) { + ch := challenges[row.ChallengeID] + ch.IsHealed = true + challenges[row.ChallengeID] = ch + } else if row.MessageType == int(types.SelfHealingAcknowledgementMessage) { + ch := challenges[row.ChallengeID] + ch.IsAck = true + challenges[row.ChallengeID] = ch + } + } + + log.WithContext(ctx).WithField("challenges", len(challenges)).Info("self-healing execution metrics challenges count") + + for _, challenge := range challenges { + log.WithContext(ctx).WithField("challenge-id", challenge.ChallengeID).WithField("is-accepted", challenge.IsAccepted). + WithField("is-verified", challenge.IsVerified).WithField("is-healed", challenge.IsHealed). + Info("self-healing challenge metric") + + if challenge.IsAck { + m.TotalChallengesAcknowledged++ + } + + if challenge.IsAccepted { + m.TotalChallengesAccepted++ + } + + if challenge.IsRejected { + m.TotalChallengesRejected++ + } + + if challenge.IsVerified { + m.TotalChallengeEvaluationsVerified++ + } + + if challenge.IsReconstructionRequiredVerified { + m.TotalReconstructionsApproved++ + } + + if challenge.IsReconstructionNotRequiredVerified { + m.TotalReconstructionsNotRquiredApproved++ + } + + if challenge.IsUnverified { + m.TotalChallengeEvaluationsUnverified++ + } + + if challenge.IsReconstructionRequiredNotVerified { + m.TotalReconstructionsNotApproved++ + } + + if challenge.IsReconstructionNotRequiredNotVerified { + m.TotalReconstructionsNotRequiredEvaluationNotApproved++ + } + + if challenge.IsReconstructionRequiredHashMismatch { + m.TotalReconstructionRequiredHashMismatch++ + } + + if challenge.IsHealed { + m.TotalFilesHealed++ + } + } + + return m, nil +} + +// QueryMetrics queries the self-healing metrics +func (s *SQLiteStore) QueryMetrics(ctx context.Context, from time.Time, _ *time.Time) (m metrics.Metrics, err error) { + genMetric, err := s.GetSelfHealingGenerationMetrics(from) + if err != nil { + return metrics.Metrics{}, err + } + + te := metrics.SHTriggerMetrics{} + challengesIssued := 0 + for _, metric := range genMetric { + t := metrics.SHTriggerMetric{} + data := types.SelfHealingMessages{} + if err := json.Unmarshal(metric.Data, &data); err != nil { + return metrics.Metrics{}, fmt.Errorf("cannot unmarshal self healing generation message type 3: %w", err) + } + + if len(data) < 1 { + return metrics.Metrics{}, fmt.Errorf("len of selfhealing messages data JSON should not be 0") + } + + t.TriggerID = metric.TriggerID + t.ListOfNodes = data[0].SelfHealingMessageData.Challenge.NodesOnWatchlist + t.TotalTicketsIdentified = len(data[0].SelfHealingMessageData.Challenge.ChallengeTickets) + + for _, ticket := range data[0].SelfHealingMessageData.Challenge.ChallengeTickets { + t.TotalFilesIdentified += len(ticket.MissingKeys) + } + + challengesIssued += t.TotalTicketsIdentified + + te = append(te, t) + } + + em, err := s.GetSHExecutionMetrics(ctx, from) + if err != nil { + return metrics.Metrics{}, fmt.Errorf("cannot get self healing execution metrics: %w", err) + } + + em.TotalChallengesIssued = challengesIssued + em.TotalFileHealingFailed = em.TotalReconstructionsApproved - em.TotalFilesHealed + + m.SHTriggerMetrics = te + + m.SHExecutionMetrics = em + + return m, nil +} + +// GetLastNSHChallenges retrieves the latest 'N' self-healing challenges +func (s *SQLiteStore) GetLastNSHChallenges(ctx context.Context, n int) (types.SelfHealingReports, error) { + challenges := types.SelfHealingReports{} + rows, err := s.GetSelfHealingExecutionMetrics(oneYearAgo) + if err != nil { + return challenges, err + } + log.WithContext(ctx).WithField("rows", len(rows)).Info("self-healing execution metrics row count") + + challengesInserted := 0 + for _, row := range rows { + if _, ok := challenges[row.ChallengeID]; !ok { + if challengesInserted == n { + continue + } + + challenges[row.ChallengeID] = types.SelfHealingReport{} + challengesInserted++ + } + + messages := types.SelfHealingMessages{} + if err := json.Unmarshal(row.Data, &messages); err != nil { + return challenges, fmt.Errorf("cannot unmarshal self healing execution message type 3: %w", err) + } + + msgType := types.SelfHealingMessageType(row.MessageType) + challenges[row.ChallengeID][msgType.String()] = messages + } + + return challenges, nil +} + +// GetSHChallengeReport returns the self-healing report +func (s *SQLiteStore) GetSHChallengeReport(ctx context.Context, challengeID string) (types.SelfHealingReports, error) { + challenges := types.SelfHealingReports{} + rows, err := s.GetSelfHealingExecutionMetrics(oneYearAgo) + if err != nil { + return challenges, err + } + log.WithContext(ctx).WithField("rows", len(rows)).Info("self-healing execution metrics row count") + + for _, row := range rows { + if row.ChallengeID == challengeID { + if _, ok := challenges[row.ChallengeID]; !ok { + challenges[row.ChallengeID] = types.SelfHealingReport{} + } + + messages := types.SelfHealingMessages{} + if err := json.Unmarshal(row.Data, &messages); err != nil { + return challenges, fmt.Errorf("cannot unmarshal self healing execution message type 3: %w", err) + } + + msgType := types.SelfHealingMessageType(row.MessageType) + challenges[row.ChallengeID][msgType.String()] = messages + } + } + + return challenges, nil +} + +// QuerySelfHealingChallenges retrieves self-healing audit logs stored in DB for self-healing +func (s *SQLiteStore) QuerySelfHealingChallenges() (challenges []types.SelfHealingChallenge, err error) { + const selectQuery = "SELECT * FROM self_healing_challenges" + rows, err := s.db.Query(selectQuery) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + challenge := types.SelfHealingChallenge{} + err = rows.Scan(&challenge.ID, &challenge.ChallengeID, &challenge.MerkleRoot, &challenge.FileHash, + &challenge.ChallengingNode, &challenge.RespondingNode, &challenge.VerifyingNode, &challenge.ReconstructedFileHash, + &challenge.Status, &challenge.CreatedAt, &challenge.UpdatedAt) + if err != nil { + return nil, err + } + + challenges = append(challenges, challenge) + } + + return challenges, nil +} + +// BatchInsertSelfHealingChallengeEvents inserts self-healing-challenge events in a batch +func (s *SQLiteStore) BatchInsertSelfHealingChallengeEvents(ctx context.Context, eventsBatch []types.SelfHealingChallengeEvent) error { + tx, err := s.db.BeginTx(ctx, nil) + if err != nil { + return err + } + + stmt, err := tx.Prepare(` + INSERT OR IGNORE INTO self_healing_challenge_events + (trigger_id, ticket_id, challenge_id, data, sender_id, is_processed, created_at, updated_at) + VALUES (?, ?, ?, ?, ?, ?, ?, ?) + `) + if err != nil { + tx.Rollback() + return err + } + defer stmt.Close() + + stmt2, err := tx.Prepare(` + INSERT OR IGNORE INTO self_healing_execution_metrics(id, trigger_id, challenge_id, message_type, data, sender_id, sender_signature, created_at, updated_at) + VALUES(NULL,?,?,?,?,?,?,?,?); + `) + if err != nil { + tx.Rollback() + return err + } + defer stmt2.Close() + + for _, event := range eventsBatch { + now := time.Now().UTC() + + _, err = stmt.Exec(event.TriggerID, event.TicketID, event.ChallengeID, event.Data, event.SenderID, false, now, now) + if err != nil { + tx.Rollback() + return err + } + + _, err = stmt2.Exec(event.ExecMetric.TriggerID, event.ExecMetric.ChallengeID, event.ExecMetric.MessageType, event.ExecMetric.Data, event.ExecMetric.SenderID, event.ExecMetric.SenderSignature, now, now) + if err != nil { + tx.Rollback() + return err + } + } + + return tx.Commit() +} + +// GetSelfHealingChallengeEvents retrieves the challenge events from DB +func (s *SQLiteStore) GetSelfHealingChallengeEvents() ([]types.SelfHealingChallengeEvent, error) { + const selectQuery = ` + SELECT trigger_id, ticket_id, challenge_id, data, sender_id, is_processed, created_at, updated_at + FROM self_healing_challenge_events + WHERE is_processed = false + ` + rows, err := s.db.Query(selectQuery) + if err != nil { + return nil, err + } + defer rows.Close() + + var events []types.SelfHealingChallengeEvent + + for rows.Next() { + var event types.SelfHealingChallengeEvent + if err := rows.Scan( + &event.TriggerID, &event.TicketID, &event.ChallengeID, &event.Data, &event.SenderID, &event.IsProcessed, + &event.CreatedAt, &event.UpdatedAt, + ); err != nil { + return nil, err + } + + events = append(events, event) + } + + return events, nil +} + +// UpdateSHChallengeEventProcessed updates the is_processed flag of an event +func (s *SQLiteStore) UpdateSHChallengeEventProcessed(challengeID string, isProcessed bool) error { + const updateQuery = ` + UPDATE self_healing_challenge_events + SET is_processed = ? + WHERE challenge_id = ? + ` + _, err := s.db.Exec(updateQuery, isProcessed, challengeID) + return err +} + +// CleanupSelfHealingChallenges cleans up self-healing challenges stored in DB for inspection +func (s *SQLiteStore) CleanupSelfHealingChallenges() (err error) { + const delQuery = "DELETE FROM self_healing_challenges" + _, err = s.db.Exec(delQuery) + return err +} diff --git a/pkg/storage/queries/sqlite.go b/pkg/storage/queries/sqlite.go new file mode 100644 index 00000000..c1d4cb02 --- /dev/null +++ b/pkg/storage/queries/sqlite.go @@ -0,0 +1,413 @@ +package queries + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/LumeraProtocol/supernode/pkg/configurer" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/jmoiron/sqlx" + _ "github.com/mattn/go-sqlite3" //go-sqlite3 +) + +var ( + DefaulthPath = configurer.DefaultPath() +) + +const minVerifications = 3 +const createTaskHistory string = ` + CREATE TABLE IF NOT EXISTS task_history ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + time DATETIME NOT NULL, + task_id TEXT NOT NULL, + status TEXT NOT NULL + );` + +const alterTaskHistory string = `ALTER TABLE task_history ADD COLUMN details TEXT;` + +const createStorageChallengeMessages string = ` + CREATE TABLE IF NOT EXISTS storage_challenge_messages ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + challenge_id TEXT NOT NULL, + message_type INTEGER NOT NULL, + data BLOB NOT NULL, + sender_id TEXT NOT NULL, + sender_signature BLOB NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +);` + +const createBroadcastChallengeMessages string = ` + CREATE TABLE IF NOT EXISTS broadcast_challenge_messages ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + challenge_id TEXT NOT NULL, + challenger TEXT NOT NULL, + recipient TEXT NOT NULL, + observers TEXT NOT NULL, + data BLOB NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +);` + +const createStorageChallengeMessagesUniqueIndex string = ` +CREATE UNIQUE INDEX IF NOT EXISTS storage_challenge_messages_unique ON storage_challenge_messages(challenge_id, message_type, sender_id); +` + +const createSelfHealingChallenges string = ` + CREATE TABLE IF NOT EXISTS self_healing_challenges ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + challenge_id TEXT NOT NULL, + merkleroot TEXT NOT NULL, + file_hash TEXT NOT NULL, + challenging_node TEXT NOT NULL, + responding_node TEXT NOT NULL, + verifying_node TEXT, + reconstructed_file_hash BLOB, + status TEXT NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL + );` + +const createPingHistory string = ` + CREATE TABLE IF NOT EXISTS ping_history ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + supernode_id TEXT UNIQUE NOT NULL, + ip_address TEXT UNIQUE NOT NULL, + total_pings INTEGER NOT NULL, + total_successful_pings INTEGER NOT NULL, + avg_ping_response_time FLOAT NOT NULL, + is_online BOOLEAN NOT NULL, + is_on_watchlist BOOLEAN NOT NULL, + is_adjusted BOOLEAN NOT NULL, + cumulative_response_time FLOAT NOT NULL, + last_seen DATETIME NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL + );` + +const createPingHistoryUniqueIndex string = ` +CREATE UNIQUE INDEX IF NOT EXISTS ping_history_unique ON ping_history(supernode_id, ip_address); +` + +const createSelfHealingGenerationMetrics string = ` + CREATE TABLE IF NOT EXISTS self_healing_generation_metrics ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + trigger_id TEXT NOT NULL, + message_type INTEGER NOT NULL, + data BLOB NOT NULL, + sender_id TEXT NOT NULL, + sender_signature BLOB NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +);` + +const createSelfHealingGenerationMetricsUniqueIndex string = ` +CREATE UNIQUE INDEX IF NOT EXISTS self_healing_generation_metrics_unique ON self_healing_generation_metrics(trigger_id); +` + +const createSelfHealingExecutionMetrics string = ` + CREATE TABLE IF NOT EXISTS self_healing_execution_metrics ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + trigger_id TEXT NOT NULL, + challenge_id TEXT NOT NULL, + message_type INTEGER NOT NULL, + data BLOB NOT NULL, + sender_id TEXT NOT NULL, + sender_signature BLOB NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +);` + +const createSelfHealingChallengeTickets string = ` + CREATE TABLE IF NOT EXISTS self_healing_challenge_events ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + trigger_id TEXT NOT NULL, + ticket_id TEXT NOT NULL, + challenge_id TEXT NOT NULL, + data BLOB NOT NULL, + sender_id TEXT NOT NULL, + is_processed BOOLEAN NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +); +` + +const createSelfHealingChallengeTicketsUniqueIndex string = ` +CREATE UNIQUE INDEX IF NOT EXISTS self_healing_challenge_events_unique ON self_healing_challenge_events(trigger_id, ticket_id, challenge_id); +` + +const createSelfHealingExecutionMetricsUniqueIndex string = ` +CREATE UNIQUE INDEX IF NOT EXISTS self_healing_execution_metrics_unique ON self_healing_execution_metrics(trigger_id, challenge_id, message_type); +` + +const alterTablePingHistory = `ALTER TABLE ping_history +ADD COLUMN metrics_last_broadcast_at DATETIME NULL;` + +const alterTablePingHistoryGenerationMetrics = `ALTER TABLE ping_history +ADD COLUMN generation_metrics_last_broadcast_at DATETIME NULL;` + +const alterTablePingHistoryExecutionMetrics = `ALTER TABLE ping_history +ADD COLUMN execution_metrics_last_broadcast_at DATETIME NULL;` + +const createStorageChallengeMetrics string = ` + CREATE TABLE IF NOT EXISTS storage_challenge_metrics ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + challenge_id TEXT NOT NULL, + message_type INTEGER NOT NULL, + data BLOB NOT NULL, + sender_id TEXT NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +);` + +const createStorageChallengeMetricsUniqueIndex string = ` +CREATE UNIQUE INDEX IF NOT EXISTS storage_challenge_metrics_unique ON storage_challenge_metrics(challenge_id, message_type, sender_id); +` + +const createHealthCheckChallengeMessages string = ` + CREATE TABLE IF NOT EXISTS healthcheck_challenge_messages ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + challenge_id TEXT NOT NULL, + message_type INTEGER NOT NULL, + data BLOB NOT NULL, + sender_id TEXT NOT NULL, + sender_signature BLOB NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +);` + +const createBroadcastHealthCheckChallengeMessages string = ` + CREATE TABLE IF NOT EXISTS broadcast_healthcheck_challenge_messages ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + challenge_id TEXT NOT NULL, + challenger TEXT NOT NULL, + recipient TEXT NOT NULL, + observers TEXT NOT NULL, + data BLOB NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +);` + +const createHealthCheckChallengeMetrics string = ` + CREATE TABLE IF NOT EXISTS healthcheck_challenge_metrics ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + challenge_id TEXT NOT NULL, + message_type INTEGER NOT NULL, + data BLOB NOT NULL, + sender_id TEXT NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL +); +` +const createHealthCheckChallengeMetricsUniqueIndex string = ` +CREATE UNIQUE INDEX IF NOT EXISTS healthcheck_challenge_metrics_unique ON healthcheck_challenge_metrics(challenge_id, message_type, sender_id); +` +const alterTablePingHistoryHealthCheckColumn = `ALTER TABLE ping_history +ADD COLUMN health_check_metrics_last_broadcast_at DATETIME NULL;` + +const createPingHistoryWithoutUniqueIPAddress string = ` +BEGIN TRANSACTION; + +CREATE TABLE IF NOT EXISTS new_ping_history ( + id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + supernode_id TEXT UNIQUE NOT NULL, + ip_address TEXT NOT NULL, -- Removed UNIQUE constraint here + total_pings INTEGER NOT NULL, + total_successful_pings INTEGER NOT NULL, + avg_ping_response_time FLOAT NOT NULL, + is_online BOOLEAN NOT NULL, + is_on_watchlist BOOLEAN NOT NULL, + is_adjusted BOOLEAN NOT NULL, + cumulative_response_time FLOAT NOT NULL, + last_seen DATETIME NOT NULL, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL, + metrics_last_broadcast_at DATETIME, -- Assuming these columns already exist in the old table + generation_metrics_last_broadcast_at DATETIME, + execution_metrics_last_broadcast_at DATETIME, + health_check_metrics_last_broadcast_at DATETIME +); + +-- Step 2: Copy data including all columns from the old table +INSERT INTO new_ping_history ( + id, + supernode_id, + ip_address, + total_pings, + total_successful_pings, + avg_ping_response_time, + is_online, + is_on_watchlist, + is_adjusted, + cumulative_response_time, + last_seen, + created_at, + updated_at, + metrics_last_broadcast_at, + generation_metrics_last_broadcast_at, + execution_metrics_last_broadcast_at, + health_check_metrics_last_broadcast_at +) +SELECT + id, + supernode_id, + ip_address, + total_pings, + total_successful_pings, + avg_ping_response_time, + is_online, + is_on_watchlist, + is_adjusted, + cumulative_response_time, + last_seen, + created_at, + updated_at, + metrics_last_broadcast_at, + generation_metrics_last_broadcast_at, + execution_metrics_last_broadcast_at, + health_check_metrics_last_broadcast_at +FROM ping_history; + +-- Step 3: Drop the original table +DROP TABLE ping_history; + +-- Step 4: Rename the new table to the original table's name +ALTER TABLE new_ping_history RENAME TO ping_history; + +COMMIT; +` + +const ( + historyDBName = "history.db" + emptyString = "" +) + +// SQLiteStore handles sqlite ops +type SQLiteStore struct { + db *sqlx.DB +} + +// CloseHistoryDB closes history database +func (s *SQLiteStore) CloseHistoryDB(ctx context.Context) { + if err := s.db.Close(); err != nil { + log.WithContext(ctx).WithError(err).Error("error closing history db") + } +} + +// OpenHistoryDB opens history DB +func OpenHistoryDB() (LocalStoreInterface, error) { + dbFile := filepath.Join(DefaulthPath, historyDBName) + db, err := sqlx.Connect("sqlite3", dbFile) + if err != nil { + return nil, fmt.Errorf("cannot open sqlite database: %w", err) + } + + if _, err := db.Exec(createTaskHistory); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createStorageChallengeMessages); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createStorageChallengeMessagesUniqueIndex); err != nil { + return nil, fmt.Errorf("cannot execute migration: %w", err) + } + + if _, err := db.Exec(createBroadcastChallengeMessages); err != nil { + return nil, fmt.Errorf("cannot execute migration: %w", err) + } + + if _, err := db.Exec(createSelfHealingChallenges); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createPingHistory); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createPingHistoryUniqueIndex); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createSelfHealingGenerationMetrics); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createSelfHealingGenerationMetricsUniqueIndex); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createSelfHealingExecutionMetrics); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createSelfHealingExecutionMetricsUniqueIndex); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createSelfHealingChallengeTickets); err != nil { + return nil, fmt.Errorf("cannot create createSelfHealingChallengeTickets: %w", err) + } + + if _, err := db.Exec(createSelfHealingChallengeTicketsUniqueIndex); err != nil { + return nil, fmt.Errorf("cannot create createSelfHealingChallengeTicketsUniqueIndex: %w", err) + } + + if _, err := db.Exec(createStorageChallengeMetrics); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createStorageChallengeMetricsUniqueIndex); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createHealthCheckChallengeMessages); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createHealthCheckChallengeMetrics); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createHealthCheckChallengeMetricsUniqueIndex); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + if _, err := db.Exec(createBroadcastHealthCheckChallengeMessages); err != nil { + return nil, fmt.Errorf("cannot create table(s): %w", err) + } + + _, _ = db.Exec(alterTaskHistory) + + _, _ = db.Exec(alterTablePingHistory) + + _, _ = db.Exec(alterTablePingHistoryGenerationMetrics) + + _, _ = db.Exec(alterTablePingHistoryExecutionMetrics) + + _, _ = db.Exec(alterTablePingHistoryHealthCheckColumn) + + _, err = db.Exec(createPingHistoryWithoutUniqueIPAddress) + if err != nil { + log.WithError(err).Error("error executing ping-history w/o unique ip-address constraint migration") + } + + pragmas := []string{ + "PRAGMA synchronous=NORMAL;", + "PRAGMA cache_size=-262144;", + "PRAGMA busy_timeout=120000;", + "PRAGMA journal_mode=WAL;", + } + + for _, pragma := range pragmas { + if _, err := db.Exec(pragma); err != nil { + return nil, fmt.Errorf("cannot set sqlite database parameter: %w", err) + } + } + + return &SQLiteStore{ + db: db, + }, nil +} diff --git a/pkg/storage/queries/storage_challenge.go b/pkg/storage/queries/storage_challenge.go new file mode 100644 index 00000000..204fd0cc --- /dev/null +++ b/pkg/storage/queries/storage_challenge.go @@ -0,0 +1,493 @@ +package queries + +import ( + "context" + "fmt" + "time" + + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/types" + "github.com/LumeraProtocol/supernode/pkg/utils/metrics" + + json "github.com/json-iterator/go" +) + +const batchSizeForChallengeIDsRetrieval = 500 + +type StorageChallengeQueries interface { + InsertStorageChallengeMessage(challenge types.StorageChallengeLogMessage) error + InsertBroadcastMessage(challenge types.BroadcastLogMessage) error + QueryStorageChallengeMessage(challengeID string, messageType int) (challenge types.StorageChallengeLogMessage, err error) + CleanupStorageChallenges() (err error) + GetStorageChallengeMetricsByChallengeID(challengeID string) ([]types.StorageChallengeLogMessage, error) + GetMetricsByChallengeIDAndMessageType(challengeID string, messageType types.MessageType) ([]types.StorageChallengeLogMessage, error) + + BatchInsertSCMetrics(metrics []types.StorageChallengeLogMessage) error + StorageChallengeMetrics(timestamp time.Time) ([]types.StorageChallengeLogMessage, error) + InsertStorageChallengeMetric(metric types.StorageChallengeMetric) error + GetSCSummaryStats(from time.Time) (scMetrics metrics.SCMetrics, err error) + GetTotalSCGeneratedAndProcessedAndEvaluated(from time.Time) (metrics.SCMetrics, error) + GetChallengerEvaluations(from time.Time) ([]types.StorageChallengeLogMessage, error) + GetObserversEvaluations(from time.Time) ([]types.StorageChallengeLogMessage, error) + GetMetricsDataByStorageChallengeID(ctx context.Context, challengeID string) ([]types.Message, error) + GetLastNSCMetrics() ([]types.NScMetric, error) + GetDistinctChallengeIDsCountForScoreAggregation(after, before time.Time) (int, error) + GetDistinctChallengeIDs(after, before time.Time, batchNumber int) ([]string, error) + BatchInsertScoreAggregationChallenges(challengeIDs []string, isAggregated bool) error +} + +// InsertStorageChallengeMessage inserts failed storage challenge to db +func (s *SQLiteStore) InsertStorageChallengeMessage(challenge types.StorageChallengeLogMessage) error { + now := time.Now().UTC() + const insertQuery = "INSERT INTO storage_challenge_messages(id, challenge_id, message_type, data, sender_id, sender_signature, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?,?) ON CONFLICT DO NOTHING;" + _, err := s.db.Exec(insertQuery, challenge.ChallengeID, challenge.MessageType, challenge.Data, challenge.Sender, challenge.SenderSignature, now, now) + if err != nil { + return err + } + + return nil +} + +func (s *SQLiteStore) InsertStorageChallengeMetric(m types.StorageChallengeMetric) error { + now := time.Now().UTC() + + const metricsQuery = "INSERT INTO storage_challenge_metrics(id, challenge_id, message_type, data, sender_id, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?) ON CONFLICT DO NOTHING;" + _, err := s.db.Exec(metricsQuery, m.ChallengeID, m.MessageType, m.Data, m.SenderID, now, now) + if err != nil { + return err + } + + return nil +} + +func (s *SQLiteStore) BatchInsertSCMetrics(metrics []types.StorageChallengeLogMessage) error { + tx, err := s.db.Begin() + if err != nil { + return err + } + + stmt, err := tx.Prepare(` + INSERT OR IGNORE INTO storage_challenge_metrics + (id, challenge_id, message_type, data, sender_id, created_at, updated_at) + VALUES (NULL,?,?,?,?,?,?) + `) + if err != nil { + tx.Rollback() + return err + } + defer stmt.Close() + + for _, metric := range metrics { + now := time.Now().UTC() + + _, err = stmt.Exec(metric.ChallengeID, metric.MessageType, metric.Data, metric.Sender, now, now) + if err != nil { + tx.Rollback() + return err + } + } + + // Commit the transaction + return tx.Commit() +} + +func (s *SQLiteStore) GetMetricsDataByStorageChallengeID(ctx context.Context, challengeID string) (storageChallengeMessages []types.Message, err error) { + scMetrics, err := s.GetStorageChallengeMetricsByChallengeID(challengeID) + if err != nil { + return storageChallengeMessages, err + } + log.WithContext(ctx).WithField("rows", len(scMetrics)).Info("storage-challenge metrics row count") + + for _, scMetric := range scMetrics { + msg := types.MessageData{} + if err := json.Unmarshal(scMetric.Data, &msg); err != nil { + return storageChallengeMessages, fmt.Errorf("cannot unmarshal storage challenge data: %w", err) + } + + storageChallengeMessages = append(storageChallengeMessages, types.Message{ + ChallengeID: scMetric.ChallengeID, + MessageType: types.MessageType(scMetric.MessageType), + Sender: scMetric.Sender, + SenderSignature: scMetric.SenderSignature, + Data: msg, + }) + } + + return storageChallengeMessages, nil +} + +func (s *SQLiteStore) GetTotalSCGeneratedAndProcessedAndEvaluated(from time.Time) (metrics.SCMetrics, error) { + metrics := metrics.SCMetrics{} + + // Query for total number of challenges + totalChallengeQuery := "SELECT COUNT(DISTINCT challenge_id) FROM storage_challenge_metrics WHERE message_type = 1 AND created_at > ?" + err := s.db.QueryRow(totalChallengeQuery, from).Scan(&metrics.TotalChallenges) + if err != nil { + return metrics, err + } + + // Query for total challenges responded + totalChallengesProcessedQuery := "SELECT COUNT(DISTINCT challenge_id) FROM storage_challenge_metrics WHERE message_type = 2 AND created_at > ?" + err = s.db.QueryRow(totalChallengesProcessedQuery, from).Scan(&metrics.TotalChallengesProcessed) + if err != nil { + return metrics, err + } + + totalChallengesEvaluatedQuery := "SELECT COUNT(DISTINCT challenge_id) FROM storage_challenge_metrics WHERE message_type = 3 AND created_at > ?" + err = s.db.QueryRow(totalChallengesEvaluatedQuery, from).Scan(&metrics.TotalChallengesEvaluatedByChallenger) + if err != nil { + return metrics, err + } + + return metrics, nil +} + +func (s *SQLiteStore) GetChallengerEvaluations(from time.Time) ([]types.StorageChallengeLogMessage, error) { + var messages []types.StorageChallengeLogMessage + + query := "SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at FROM storage_challenge_metrics WHERE message_type = 3 and created_at > ?" + rows, err := s.db.Query(query, from) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var msg types.StorageChallengeLogMessage + err := rows.Scan(&msg.ID, &msg.ChallengeID, &msg.MessageType, &msg.Data, &msg.Sender, &msg.CreatedAt, &msg.UpdatedAt) + if err != nil { + return nil, err + } + messages = append(messages, msg) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return messages, nil +} + +func (s *SQLiteStore) GetObserversEvaluations(from time.Time) ([]types.StorageChallengeLogMessage, error) { + var messages []types.StorageChallengeLogMessage + + query := "SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at FROM storage_challenge_metrics WHERE message_type = 4 and created_at > ?" + rows, err := s.db.Query(query, from) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var msg types.StorageChallengeLogMessage + err := rows.Scan(&msg.ID, &msg.ChallengeID, &msg.MessageType, &msg.Data, &msg.Sender, &msg.CreatedAt, &msg.UpdatedAt) + if err != nil { + return nil, err + } + messages = append(messages, msg) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return messages, nil +} + +func (s *SQLiteStore) GetSCSummaryStats(from time.Time) (scMetrics metrics.SCMetrics, err error) { + scStats := metrics.SCMetrics{} + scMetrics, err = s.GetTotalSCGeneratedAndProcessedAndEvaluated(from) + if err != nil { + return scMetrics, err + } + scStats.TotalChallenges = scMetrics.TotalChallenges + scStats.TotalChallengesProcessed = scMetrics.TotalChallengesProcessed + scStats.TotalChallengesEvaluatedByChallenger = scMetrics.TotalChallengesEvaluatedByChallenger + + observersEvaluations, err := s.GetObserversEvaluations(from) + if err != nil { + return scMetrics, err + } + log.WithField("observer_evaluations", len(observersEvaluations)).Info("observer evaluations retrieved") + + observerEvaluationMetrics := processObserverEvaluations(observersEvaluations) + log.WithField("observer_evaluation_metrics", len(observerEvaluationMetrics)).Info("observer evaluation metrics retrieved") + + for _, obMetrics := range observerEvaluationMetrics { + if obMetrics.ChallengesVerified > 2 { + scMetrics.TotalChallengesVerified++ + } else { + if obMetrics.FailedByInvalidTimestamps > 0 { + scMetrics.SlowResponsesObservedByObservers++ + } + if obMetrics.FailedByInvalidSignatures > 0 { + scMetrics.InvalidSignaturesObservedByObservers++ + } + if obMetrics.FailedByInvalidEvaluation > 0 { + scMetrics.InvalidEvaluationObservedByObservers++ + } + } + } + + return scMetrics, nil +} + +// InsertBroadcastMessage inserts broadcast storage challenge msg to db +func (s *SQLiteStore) InsertBroadcastMessage(challenge types.BroadcastLogMessage) error { + now := time.Now().UTC() + const insertQuery = "INSERT INTO broadcast_challenge_messages(id, challenge_id, data, challenger, recipient, observers, created_at, updated_at) VALUES(NULL,?,?,?,?,?,?,?);" + _, err := s.db.Exec(insertQuery, challenge.ChallengeID, challenge.Data, challenge.Challenger, challenge.Recipient, challenge.Observers, now, now) + if err != nil { + return err + } + + return nil +} + +// StorageChallengeMetrics retrieves all the metrics needs to be broadcast +func (s *SQLiteStore) StorageChallengeMetrics(timestamp time.Time) ([]types.StorageChallengeLogMessage, error) { + const query = ` + SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at + FROM storage_challenge_metrics + WHERE created_at > ? + ` + + rows, err := s.db.Query(query, timestamp) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.StorageChallengeLogMessage + for rows.Next() { + var m types.StorageChallengeLogMessage + err := rows.Scan(&m.ID, &m.ChallengeID, &m.MessageType, &m.Data, &m.Sender, &m.CreatedAt, &m.UpdatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// QueryStorageChallengeMessage retrieves storage challenge message against challengeID and messageType +func (s *SQLiteStore) QueryStorageChallengeMessage(challengeID string, messageType int) (challengeMessage types.StorageChallengeLogMessage, err error) { + const selectQuery = "SELECT * FROM storage_challenge_messages WHERE challenge_id=? AND message_type=?" + err = s.db.QueryRow(selectQuery, challengeID, messageType).Scan( + &challengeMessage.ID, &challengeMessage.ChallengeID, &challengeMessage.MessageType, &challengeMessage.Data, + &challengeMessage.Sender, &challengeMessage.SenderSignature, &challengeMessage.CreatedAt, &challengeMessage.UpdatedAt) + + if err != nil { + return challengeMessage, err + } + + return challengeMessage, nil +} + +// CleanupStorageChallenges cleans up challenges stored in DB for self-healing +func (s *SQLiteStore) CleanupStorageChallenges() (err error) { + const delQuery = "DELETE FROM storage_challenge_messages" + _, err = s.db.Exec(delQuery) + return err +} + +// GetStorageChallengeMetricsByChallengeID retrieves all the metrics +func (s *SQLiteStore) GetStorageChallengeMetricsByChallengeID(challengeID string) ([]types.StorageChallengeLogMessage, error) { + const query = ` + SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at + FROM storage_challenge_metrics + WHERE challenge_id = ?;` + + rows, err := s.db.Query(query, challengeID) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.StorageChallengeLogMessage + for rows.Next() { + var m types.StorageChallengeLogMessage + err := rows.Scan(&m.ID, &m.ChallengeID, &m.MessageType, &m.Data, &m.Sender, &m.CreatedAt, &m.UpdatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +// GetMetricsByChallengeIDAndMessageType retrieves all the metrics by challengeID and messageType +func (s *SQLiteStore) GetMetricsByChallengeIDAndMessageType(challengeID string, messageType types.MessageType) ([]types.StorageChallengeLogMessage, error) { + const query = ` + SELECT id, challenge_id, message_type, data, sender_id, created_at, updated_at + FROM storage_challenge_metrics + WHERE challenge_id = ? + AND message_type = ?;` + + rows, err := s.db.Query(query, challengeID, int(messageType)) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []types.StorageChallengeLogMessage + for rows.Next() { + var m types.StorageChallengeLogMessage + err := rows.Scan(&m.ID, &m.ChallengeID, &m.MessageType, &m.Data, &m.Sender, &m.CreatedAt, &m.UpdatedAt) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + + return metrics, rows.Err() +} + +func processObserverEvaluations(observersEvaluations []types.StorageChallengeLogMessage) map[string]ObserverEvaluationMetrics { + evaluationMap := make(map[string]ObserverEvaluationMetrics) + + for _, observerEvaluation := range observersEvaluations { + var oe types.MessageData + if err := json.Unmarshal(observerEvaluation.Data, &oe); err != nil { + continue + } + + oem, exists := evaluationMap[observerEvaluation.ChallengeID] + if !exists { + oem = ObserverEvaluationMetrics{} // Initialize if not exists + } + + if isObserverEvaluationVerified(oe.ObserverEvaluation) { + oem.ChallengesVerified++ + } else { + if !oe.ObserverEvaluation.IsChallengeTimestampOK || + !oe.ObserverEvaluation.IsProcessTimestampOK || + !oe.ObserverEvaluation.IsEvaluationTimestampOK { + oem.FailedByInvalidTimestamps++ + } + + if !oe.ObserverEvaluation.IsChallengerSignatureOK || + !oe.ObserverEvaluation.IsRecipientSignatureOK { + oem.FailedByInvalidSignatures++ + } + + if !oe.ObserverEvaluation.IsEvaluationResultOK { + oem.FailedByInvalidEvaluation++ + } + } + + evaluationMap[observerEvaluation.ChallengeID] = oem + } + + return evaluationMap +} + +func isObserverEvaluationVerified(observerEvaluation types.ObserverEvaluationData) bool { + if !observerEvaluation.IsEvaluationResultOK { + return false + } + + if !observerEvaluation.IsChallengerSignatureOK { + return false + } + + if !observerEvaluation.IsRecipientSignatureOK { + return false + } + + if !observerEvaluation.IsChallengeTimestampOK { + return false + } + + if !observerEvaluation.IsProcessTimestampOK { + return false + } + + if !observerEvaluation.IsEvaluationTimestampOK { + return false + } + + return true +} + +// GetDistinctChallengeIDsCountForScoreAggregation gets the count of distinct challenge ids for score aggregation +func (s *SQLiteStore) GetDistinctChallengeIDsCountForScoreAggregation(after, before time.Time) (int, error) { + query := ` + SELECT COUNT(DISTINCT challenge_id) + FROM storage_challenge_metrics + WHERE message_type = 4 AND created_at >= ? AND created_at < ? + ` + + var challengeIDsCount int + err := s.db.QueryRow(query, after, before).Scan(&challengeIDsCount) + if err != nil { + return 0, err + } + + return challengeIDsCount, nil +} + +// GetDistinctChallengeIDs retrieves the distinct challenge ids for score aggregation +func (s *SQLiteStore) GetDistinctChallengeIDs(after, before time.Time, batchNumber int) ([]string, error) { + offset := batchNumber * batchSizeForChallengeIDsRetrieval + + query := ` + SELECT DISTINCT challenge_id + FROM storage_challenge_metrics + WHERE message_type = 4 AND created_at >= ? AND created_at < ? + LIMIT ? OFFSET ? + ` + + rows, err := s.db.Query(query, after, before, batchSizeForChallengeIDsRetrieval, offset) + if err != nil { + return nil, err + } + defer rows.Close() + + var challengeIDs []string + for rows.Next() { + var challengeID string + if err := rows.Scan(&challengeID); err != nil { + return nil, err + } + challengeIDs = append(challengeIDs, challengeID) + } + + if err = rows.Err(); err != nil { + return nil, err + } + + return challengeIDs, nil +} + +// BatchInsertScoreAggregationChallenges inserts the batch of challenge ids for score aggregation +func (s *SQLiteStore) BatchInsertScoreAggregationChallenges(challengeIDs []string, isAggregated bool) error { + tx, err := s.db.Begin() + if err != nil { + return err + } + + stmt, err := tx.Prepare(` + INSERT OR IGNORE INTO sc_score_aggregation_queue + (challenge_id, is_aggregated, created_at, updated_at) + VALUES (?,?,?,?) + `) + if err != nil { + tx.Rollback() + return err + } + defer stmt.Close() + + for _, id := range challengeIDs { + now := time.Now().UTC() + + _, err = stmt.Exec(id, isAggregated, now, now) + if err != nil { + tx.Rollback() + return err + } + } + + // Commit the transaction + return tx.Commit() +} diff --git a/pkg/storage/queries/task_history.go b/pkg/storage/queries/task_history.go new file mode 100644 index 00000000..28a8572c --- /dev/null +++ b/pkg/storage/queries/task_history.go @@ -0,0 +1,70 @@ +package queries + +import ( + "fmt" + + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/types" + + json "github.com/json-iterator/go" +) + +type TaskHistoryQueries interface { + InsertTaskHistory(history types.TaskHistory) (int, error) + QueryTaskHistory(taskID string) (history []types.TaskHistory, err error) +} + +// InsertTaskHistory inserts task history +func (s *SQLiteStore) InsertTaskHistory(history types.TaskHistory) (hID int, err error) { + var stringifyDetails string + if history.Details != nil { + stringifyDetails = history.Details.Stringify() + } + + const insertQuery = "INSERT INTO task_history(id, time, task_id, status, details) VALUES(NULL,?,?,?,?);" + res, err := s.db.Exec(insertQuery, history.CreatedAt, history.TaskID, history.Status, stringifyDetails) + + if err != nil { + return 0, err + } + + var id int64 + if id, err = res.LastInsertId(); err != nil { + return 0, err + } + + return int(id), nil +} + +// QueryTaskHistory gets task history by taskID +func (s *SQLiteStore) QueryTaskHistory(taskID string) (history []types.TaskHistory, err error) { + const selectQuery = "SELECT * FROM task_history WHERE task_id = ? LIMIT 100" + rows, err := s.db.Query(selectQuery, taskID) + if err != nil { + return nil, err + } + defer rows.Close() + + var data []types.TaskHistory + for rows.Next() { + i := types.TaskHistory{} + var details string + err = rows.Scan(&i.ID, &i.CreatedAt, &i.TaskID, &i.Status, &details) + if err != nil { + return nil, err + } + + if details != emptyString { + err = json.Unmarshal([]byte(details), &i.Details) + if err != nil { + log.Info(details) + log.WithError(err).Error(fmt.Sprintf("cannot unmarshal task history details: %s", details)) + i.Details = nil + } + } + + data = append(data, i) + } + + return data, nil +} diff --git a/pkg/storage/rqstore/store.go b/pkg/storage/rqstore/store.go index f6fe52f2..bc62a40a 100644 --- a/pkg/storage/rqstore/store.go +++ b/pkg/storage/rqstore/store.go @@ -41,7 +41,7 @@ type SymbolDir struct { func NewSQLiteRQStore(file string) (*SQLiteRQStore, error) { db, err := sqlx.Connect("sqlite3", file) if err != nil { - return nil, fmt.Errorf("cannot open rq-service database: %w", err) + return nil, fmt.Errorf("cannot open rq-services database: %w", err) } // Create the rq_symbols_dir table if it doesn't exist diff --git a/pkg/testutil/accounts.go b/pkg/testutil/accounts.go index d2e08fd4..f4084f23 100644 --- a/pkg/testutil/accounts.go +++ b/pkg/testutil/accounts.go @@ -1,20 +1,20 @@ package testutil import ( - "testing" "crypto/ecdh" "github.com/stretchr/testify/require" + "testing" - "github.com/cosmos/go-bip39" "github.com/cosmos/cosmos-sdk/codec" codectypes "github.com/cosmos/cosmos-sdk/codec/types" + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" "github.com/cosmos/cosmos-sdk/crypto/hd" "github.com/cosmos/cosmos-sdk/crypto/keyring" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + "github.com/cosmos/go-bip39" "github.com/LumeraProtocol/lumera/x/lumeraid/securekeyx" ) - + // setupTestKeyExchange creates a key exchange instance for testing func SetupTestKeyExchange(t *testing.T, kb keyring.Keyring, addr string, peerType securekeyx.PeerType) *securekeyx.SecureKeyExchange { ke, err := securekeyx.NewSecureKeyExchange(kb, addr, peerType, ecdh.P256()) @@ -23,7 +23,7 @@ func SetupTestKeyExchange(t *testing.T, kb keyring.Keyring, addr string, peerTyp } func generateMnemonic() (string, error) { - entropy, err := bip39.NewEntropy(128) // 128 bits for a 12-word mnemonic + entropy, err := bip39.NewEntropy(256) // 128 bits for a 12-word mnemonic if err != nil { return "", err } diff --git a/pkg/testutil/lumera.go b/pkg/testutil/lumera.go new file mode 100644 index 00000000..cdffd6da --- /dev/null +++ b/pkg/testutil/lumera.go @@ -0,0 +1,163 @@ +package testutil + +import ( + "context" + + cmtservice "github.com/cosmos/cosmos-sdk/client/grpc/cmtservice" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + sdktx "github.com/cosmos/cosmos-sdk/types/tx" + + "github.com/LumeraProtocol/lumera/x/action/types" + supernodeTypes "github.com/LumeraProtocol/lumera/x/supernode/types" + "github.com/LumeraProtocol/supernode/pkg/lumera" + "github.com/LumeraProtocol/supernode/pkg/lumera/modules/action" + "github.com/LumeraProtocol/supernode/pkg/lumera/modules/node" + "github.com/LumeraProtocol/supernode/pkg/lumera/modules/supernode" + "github.com/LumeraProtocol/supernode/pkg/lumera/modules/tx" +) + +// MockLumeraClient implements the lumera.Client interface for testing purposes +type MockLumeraClient struct { + actionMod *MockActionModule + supernodeMod *MockSupernodeModule + txMod *MockTxModule + nodeMod *MockNodeModule + kr keyring.Keyring + addresses []string // Store node addresses for testing +} + +// NewMockLumeraClient creates a new mock Lumera client for testing +func NewMockLumeraClient(kr keyring.Keyring, addresses []string) (lumera.Client, error) { + actionMod := &MockActionModule{} + supernodeMod := &MockSupernodeModule{addresses: addresses} + txMod := &MockTxModule{} + nodeMod := &MockNodeModule{} + + return &MockLumeraClient{ + actionMod: actionMod, + supernodeMod: supernodeMod, + txMod: txMod, + nodeMod: nodeMod, + kr: kr, + addresses: addresses, + }, nil +} + +// Action returns the Action module client +func (c *MockLumeraClient) Action() action.Module { + return c.actionMod +} + +// SuperNode returns the SuperNode module client +func (c *MockLumeraClient) SuperNode() supernode.Module { + return c.supernodeMod +} + +// Tx returns the Transaction module client +func (c *MockLumeraClient) Tx() tx.Module { + return c.txMod +} + +// Node returns the Node module client +func (c *MockLumeraClient) Node() node.Module { + return c.nodeMod +} + +// Close closes all connections +func (c *MockLumeraClient) Close() error { + return nil +} + +// MockActionModule implements the action.Module interface for testing +type MockActionModule struct{} + +func (m *MockActionModule) GetAction(ctx context.Context, actionID string) (*types.QueryGetActionResponse, error) { + return &types.QueryGetActionResponse{}, nil +} + +func (m *MockActionModule) GetActionFee(ctx context.Context, dataSize string) (*types.QueryGetActionFeeResponse, error) { + return &types.QueryGetActionFeeResponse{}, nil +} + +// MockSupernodeModule implements the supernode.Module interface for testing +type MockSupernodeModule struct { + addresses []string +} + +func (m *MockSupernodeModule) GetTopSuperNodesForBlock(ctx context.Context, blockHeight uint64) (*supernodeTypes.QueryGetTopSuperNodesForBlockResponse, error) { + // Create supernodes with the actual node addresses supplied in the test + supernodes := make([]*supernodeTypes.SuperNode, 0, len(m.addresses)) + + for i, addr := range m.addresses { + if i >= 2 { // Only use first couple for bootstrap + break + } + + supernode := &supernodeTypes.SuperNode{ + SupernodeAccount: addr, // Use the real account address for testing + PrevIpAddresses: []*supernodeTypes.IPAddressHistory{ + { + Address: "127.0.0.1:900" + string('0'+i), + Height: 10, + }, + }, + } + supernodes = append(supernodes, supernode) + } + + return &supernodeTypes.QueryGetTopSuperNodesForBlockResponse{ + Supernodes: supernodes, + }, nil +} + +func (m *MockSupernodeModule) GetSuperNode(ctx context.Context, address string) (*supernodeTypes.QueryGetSuperNodeResponse, error) { + return &supernodeTypes.QueryGetSuperNodeResponse{}, nil +} + +// MockTxModule implements the tx.Module interface for testing +type MockTxModule struct{} + +func (m *MockTxModule) BroadcastTx(ctx context.Context, txBytes []byte, mode sdktx.BroadcastMode) (*sdktx.BroadcastTxResponse, error) { + return &sdktx.BroadcastTxResponse{}, nil +} + +func (m *MockTxModule) SimulateTx(ctx context.Context, txBytes []byte) (*sdktx.SimulateResponse, error) { + return &sdktx.SimulateResponse{}, nil +} + +func (m *MockTxModule) GetTx(ctx context.Context, hash string) (*sdktx.GetTxResponse, error) { + return &sdktx.GetTxResponse{}, nil +} + +// MockNodeModule implements the node.Module interface for testing +type MockNodeModule struct{} + +func (m *MockNodeModule) GetLatestBlock(ctx context.Context) (*cmtservice.GetLatestBlockResponse, error) { + return &cmtservice.GetLatestBlockResponse{ + SdkBlock: &cmtservice.Block{ + Header: cmtservice.Header{ + Height: 100, + }, + }, + }, nil +} + +func (m *MockNodeModule) GetBlockByHeight(ctx context.Context, height int64) (*cmtservice.GetBlockByHeightResponse, error) { + return &cmtservice.GetBlockByHeightResponse{}, nil +} + +func (m *MockNodeModule) GetNodeInfo(ctx context.Context) (*cmtservice.GetNodeInfoResponse, error) { + return &cmtservice.GetNodeInfoResponse{}, nil +} + +func (m *MockNodeModule) GetSyncing(ctx context.Context) (*cmtservice.GetSyncingResponse, error) { + return &cmtservice.GetSyncingResponse{}, nil +} + +func (m *MockNodeModule) GetLatestValidatorSet(ctx context.Context) (*cmtservice.GetLatestValidatorSetResponse, error) { + return &cmtservice.GetLatestValidatorSetResponse{}, nil +} + +func (m *MockNodeModule) GetValidatorSetByHeight(ctx context.Context, height int64) (*cmtservice.GetValidatorSetByHeightResponse, error) { + return &cmtservice.GetValidatorSetByHeightResponse{}, nil +} diff --git a/pkg/types/healthcheck_challenge.go b/pkg/types/healthcheck_challenge.go new file mode 100644 index 00000000..a171138b --- /dev/null +++ b/pkg/types/healthcheck_challenge.go @@ -0,0 +1,164 @@ +package types + +import ( + "encoding/json" + "time" + + "github.com/LumeraProtocol/supernode/pkg/utils" +) + +// HealthCheckMessageType represents the type of message sent in the health-check process +type HealthCheckMessageType int + +const ( + // HealthCheckChallengeMessageType represents the challenge message + HealthCheckChallengeMessageType HealthCheckMessageType = iota + 1 + // HealthCheckResponseMessageType represents the response message + HealthCheckResponseMessageType + // HealthCheckEvaluationMessageType represents the evaluation message + HealthCheckEvaluationMessageType + // HealthCheckAffirmationMessageType represents the affirmation message + HealthCheckAffirmationMessageType + // HealthCheckBroadcastMessageType represents the broadcast message + HealthCheckBroadcastMessageType +) + +// String returns the message string +func (hcm HealthCheckMessageType) String() string { + switch hcm { + case HealthCheckChallengeMessageType: + return "challenge" + case HealthCheckResponseMessageType: + return "response" + case HealthCheckEvaluationMessageType: + return "evaluation" + case HealthCheckAffirmationMessageType: + return "affirmation" + default: + return "unknown" + } +} + +// BroadcastHealthCheckMessage represents the healthcheck challenge message that needs to be broadcast after evaluation +type BroadcastHealthCheckMessage struct { + ChallengeID string + Challenger map[string][]byte + Recipient map[string][]byte + Observers map[string][]byte +} + +// BroadcastHealthCheckLogMessage represents the broadcast message log to be stored in the DB +type BroadcastHealthCheckLogMessage struct { + ChallengeID string `db:"challenge_id"` + Challenger string `db:"challenger"` + Recipient string `db:"recipient"` + Observers string `db:"observers"` + Data []byte `db:"data"` +} + +// HealthCheckChallengeData represents the data of challenge +type HealthCheckChallengeData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` +} + +// HealthCheckResponseData represents the data of response +type HealthCheckResponseData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` +} + +// HealthCheckEvaluationData represents the data of evaluation +type HealthCheckEvaluationData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` + IsVerified bool `json:"is_verified"` +} + +// HealthCheckObserverEvaluationData represents the data of Observer's evaluation +type HealthCheckObserverEvaluationData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + IsChallengeTimestampOK bool `json:"is_challenge_timestamp_ok"` + IsProcessTimestampOK bool `json:"is_process_timestamp_ok"` + IsEvaluationTimestampOK bool `json:"is_evaluation_timestamp_ok"` + IsRecipientSignatureOK bool `json:"is_recipient_signature_ok"` + IsChallengerSignatureOK bool `json:"is_challenger_signature_ok"` + IsEvaluationResultOK bool `json:"is_evaluation_result_ok"` + Timestamp time.Time `json:"timestamp"` +} + +// HealthCheckMessageData represents the health check challenge message data +type HealthCheckMessageData struct { + ChallengerID string `json:"challenger_id"` + Challenge HealthCheckChallengeData `json:"challenge"` + Observers []string `json:"observers"` + RecipientID string `json:"recipient_id"` + Response HealthCheckResponseData `json:"response"` + ChallengerEvaluation HealthCheckEvaluationData `json:"challenger_evaluation"` + ObserverEvaluation HealthCheckObserverEvaluationData `json:"observer_evaluation"` +} + +// HealthCheckMessage represents the healthcheck challenge message +type HealthCheckMessage struct { + MessageType HealthCheckMessageType `json:"message_type"` + ChallengeID string `json:"challenge_id"` + Data HealthCheckMessageData `json:"data"` + Sender string `json:"sender"` + SenderSignature []byte `json:"sender_signature"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// HealthCheckChallengeMetric represents the metric log to be stored in the DB +type HealthCheckChallengeMetric struct { + ID int `db:"id"` + MessageType int `db:"message_type"` + ChallengeID string `db:"challenge_id"` + Data []byte `db:"data"` + SenderID string `db:"sender_id"` +} + +// HealthCheckChallengeLogMessage represents the message log to be stored in the DB +type HealthCheckChallengeLogMessage struct { + ID int `db:"id"` + MessageType int `db:"message_type"` + ChallengeID string `db:"challenge_id"` + Data []byte `db:"data"` + Sender string `db:"sender_id"` + SenderSignature []byte `db:"sender_signature"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +// BroadcastHealthCheckMessageMetrics is the struct for broadcast message metrics +type BroadcastHealthCheckMessageMetrics struct { + ID int `db:"id"` + ChallengeID string `db:"challenge_id"` + Challenger string `db:"challenger"` + Recipient string `db:"recipient"` + Observers string `db:"observers"` + Data []byte `db:"data"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +// ProcessBroadcastHealthCheckChallengeMetricsRequest represents the request for broadcasting metrics +type ProcessBroadcastHealthCheckChallengeMetricsRequest struct { + Data []byte `json:"data"` + SenderID string `json:"sender_id"` +} + +// HealthCheckChallengeMessages represents an array of health-check message +type HealthCheckChallengeMessages []HealthCheckMessage + +// Hash returns the hash of the health-check-challenge challenge log data +func (mdl HealthCheckChallengeMessages) Hash() string { + data, _ := json.Marshal(mdl) + hash, _ := utils.Sha3256hash(data) + + return string(hash) +} diff --git a/pkg/types/self_healing.go b/pkg/types/self_healing.go new file mode 100644 index 00000000..0ba13081 --- /dev/null +++ b/pkg/types/self_healing.go @@ -0,0 +1,252 @@ +package types + +import ( + "database/sql" + "encoding/json" + "time" + + "github.com/LumeraProtocol/supernode/pkg/utils" +) + +// SelfHealingMessageType represents the type of message sent in the self-healing process +type SelfHealingMessageType int + +const ( + // SelfHealingChallengeMessage represents the challenge message + SelfHealingChallengeMessage SelfHealingMessageType = iota + 1 + // SelfHealingResponseMessage represents the response message + SelfHealingResponseMessage + // SelfHealingVerificationMessage represents the verification message + SelfHealingVerificationMessage + // SelfHealingCompletionMessage represents the challenge message processed successfully + SelfHealingCompletionMessage + // SelfHealingAcknowledgementMessage represents the acknowledgement message + SelfHealingAcknowledgementMessage +) + +func (s SelfHealingMessageType) String() string { + messages := [...]string{"", "challenge", "response", "verification", "completion", "acknowledgement"} + if s < 1 || int(s) >= len(messages) { + return "unknown" + } + + return messages[s] +} + +// TicketType represents the type of ticket; nft, cascade, sense +type TicketType int + +const ( + // TicketTypeCascade represents the cascade ticket type + TicketTypeCascade TicketType = iota + 1 + // TicketTypeSense represents the sense ticket type + TicketTypeSense + // TicketTypeNFT represents the NFT ticket type + TicketTypeNFT +) + +func (t TicketType) String() string { + tickets := [...]string{"", "cascade", "sense", "nft"} + if t < 1 || int(t) >= len(tickets) { + return "unknown" + } + + return tickets[t] +} + +// PingInfo represents the structure of data to be inserted into the ping_history table +type PingInfo struct { + ID int `db:"id"` + SupernodeID string `db:"supernode_id"` + IPAddress string `db:"ip_address"` + TotalPings int `db:"total_pings"` + TotalSuccessfulPings int `db:"total_successful_pings"` + AvgPingResponseTime float64 `db:"avg_ping_response_time"` + IsOnline bool `db:"is_online"` + IsOnWatchlist bool `db:"is_on_watchlist"` + IsAdjusted bool `db:"is_adjusted"` + CumulativeResponseTime float64 `db:"cumulative_response_time"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` + LastSeen sql.NullTime `db:"last_seen"` + MetricsLastBroadcastAt sql.NullTime `db:"metrics_last_broadcast_at"` + HealthCheckMetricsLastBroadcastAt sql.NullTime `db:"health_check_metrics_last_broadcast_at"` + GenerationMetricsLastBroadcastAt sql.NullTime `db:"generation_metrics_last_broadcast_at"` + ExecutionMetricsLastBroadcastAt sql.NullTime `db:"execution_metrics_last_broadcast_at"` + SCScoreLastAggregatedAt sql.NullTime `db:"sc_score_last_aggregated_at"` + LastResponseTime float64 `db:"-"` +} + +// PingInfos represents array of ping info +type PingInfos []PingInfo + +// SelfHealingReports represents the self-healing metrics for each challenge +type SelfHealingReports map[string]SelfHealingReport + +// SelfHealingReport represents the self-healing challenges +type SelfHealingReport map[string]SelfHealingMessages + +// SelfHealingMessages represents the self-healing metrics for each challenge = message_type = 3 +type SelfHealingMessages []SelfHealingMessage + +// SelfHealingMessage represents the self-healing message +type SelfHealingMessage struct { + TriggerID string `json:"trigger_id"` + MessageType SelfHealingMessageType `json:"message_type"` + SelfHealingMessageData SelfHealingMessageData `json:"data"` + SenderID string `json:"sender_id"` + SenderSignature []byte `json:"sender_signature"` +} + +// SelfHealingMessageData represents the self-healing message data == message_type = 2 +type SelfHealingMessageData struct { + ChallengerID string `json:"challenger_id"` + RecipientID string `json:"recipient_id"` + Challenge SelfHealingChallengeData `json:"challenge"` + Response SelfHealingResponseData `json:"response"` + Verification SelfHealingVerificationData `json:"verification"` +} + +// SelfHealingChallengeData represents the challenge data for self-healing sent by the challenger +type SelfHealingChallengeData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` + ChallengeTickets []ChallengeTicket `json:"challenge_tickets"` + NodesOnWatchlist string `json:"nodes_on_watchlist"` +} + +// ChallengeTicket represents the ticket details for self-healing challenge +type ChallengeTicket struct { + TxID string `json:"tx_id"` + TicketType TicketType `json:"ticket_type"` + MissingKeys []string `json:"missing_keys"` + DataHash []byte `json:"data_hash"` + Recipient string `json:"recipient"` +} + +// RespondedTicket represents the details of ticket responded in a self-healing challenge +type RespondedTicket struct { + TxID string `json:"tx_id"` + TicketType TicketType `json:"ticket_type"` + MissingKeys []string `json:"missing_keys"` + ReconstructedFileHash []byte `json:"reconstructed_file_hash"` + IsReconstructionRequired bool `json:"is_reconstruction_required"` + Error string `json:"error"` +} + +// SelfHealingResponseData represents the response data for self-healing sent by the recipient +type SelfHealingResponseData struct { + ChallengeID string `json:"challenge_id"` + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` + RespondedTicket RespondedTicket `json:"responded_ticket"` + Verifiers []string `json:"verifiers"` +} + +// VerifiedTicket represents the details of ticket verified in self-healing challenge +type VerifiedTicket struct { + TxID string `json:"tx_id"` + TicketType TicketType `json:"ticket_type"` + MissingKeys []string `json:"missing_keys"` + ReconstructedFileHash []byte `json:"reconstructed_file_hash"` + IsReconstructionRequired bool `json:"is_reconstruction_required"` + IsReconstructionRequiredByHealer bool `json:"is_reconstruction_required_by_healer"` + IsVerified bool `json:"is_verified"` + Message string `json:"message"` +} + +// SelfHealingVerificationData represents the verification data for self-healing challenge +type SelfHealingVerificationData struct { + NodeID string `json:"node_id"` + NodeAddress string `json:"node_address"` + ChallengeID string `json:"challenge_id"` + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` + VerifiedTicket VerifiedTicket `json:"verified_ticket"` + VerifiersData map[string][]byte `json:"verifiers_data"` +} + +// SelfHealingGenerationMetric represents the self-healing generation metrics for trigger events +type SelfHealingGenerationMetric struct { + ID int `db:"id"` + TriggerID string `db:"trigger_id"` + MessageType int `db:"message_type"` + Data []byte `db:"data"` + SenderID string `db:"sender_id"` + SenderSignature []byte `db:"sender_signature"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +// CombinedSelfHealingMetrics represents the combination of generation and execution metrics +type CombinedSelfHealingMetrics struct { + GenerationMetrics []SelfHealingGenerationMetric + ExecutionMetrics []SelfHealingExecutionMetric +} + +// SelfHealingExecutionMetric represents the self-healing execution metrics for trigger events +type SelfHealingExecutionMetric struct { + ID int `db:"id"` + TriggerID string `db:"trigger_id"` + ChallengeID string `db:"challenge_id"` + MessageType int `db:"message_type"` + Data []byte `db:"data"` + SenderID string `db:"sender_id"` + SenderSignature []byte `db:"sender_signature"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +// SelfHealingMetricType represents the type of self-healing metric +type SelfHealingMetricType int + +const ( + // GenerationSelfHealingMetricType represents the generation metric for self-healing + GenerationSelfHealingMetricType SelfHealingMetricType = 1 + // ExecutionSelfHealingMetricType represents the execution metric for self-healing + ExecutionSelfHealingMetricType SelfHealingMetricType = 2 +) + +// ProcessBroadcastMetricsRequest represents the request for broadcasting metrics +type ProcessBroadcastMetricsRequest struct { + Data []byte `json:"data"` + Type SelfHealingMetricType `json:"type"` + SenderID string `json:"sender_id"` + SenderSignature []byte `json:"sender_signature"` +} + +// SelfHealingMetrics represents the self-healing metrics for each challenge +type SelfHealingMetrics struct { + ChallengeID string `db:"challenge_id"` + SentTicketsForSelfHealing int `db:"sent_tickets_for_self_healing"` + EstimatedMissingKeys int `db:"estimated_missing_keys"` + TicketsInProgress int `db:"tickets_in_progress"` + TicketsRequiredSelfHealing int `db:"tickets_required_self_healing"` + SuccessfullySelfHealedTickets int `db:"successfully_self_healed_tickets"` + SuccessfullyVerifiedTickets int `db:"successfully_verified_tickets"` +} + +// SelfHealingChallengeEvent represents the challenge event that needs to be healed. +type SelfHealingChallengeEvent struct { + ID int64 + TriggerID string + ChallengeID string + TicketID string + Data []byte + SenderID string + IsProcessed bool + ExecMetric SelfHealingExecutionMetric + CreatedAt time.Time + UpdatedAt time.Time +} + +// Hash returns the hash of the self-healing challenge reports +func (s SelfHealingReports) Hash() string { + data, _ := json.Marshal(s) + hash, _ := utils.Sha3256hash(data) + + return string(hash) +} diff --git a/pkg/types/storage_challenge.go b/pkg/types/storage_challenge.go new file mode 100644 index 00000000..2b0432e1 --- /dev/null +++ b/pkg/types/storage_challenge.go @@ -0,0 +1,249 @@ +package types + +import ( + "encoding/json" + "time" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/utils" +) + +// MessageType represents the type of message +type MessageType int + +const ( + // ChallengeMessageType represents the challenge message + ChallengeMessageType MessageType = iota + 1 + // ResponseMessageType represents the response message + ResponseMessageType + // EvaluationMessageType represents the evaluation message + EvaluationMessageType + // AffirmationMessageType represents the affirmation message + AffirmationMessageType + //BroadcastMessageType represents the message that needs to be broadcast + BroadcastMessageType +) + +// String returns the message string +func (m MessageType) String() string { + switch m { + case ChallengeMessageType: + return "challenge" + case ResponseMessageType: + return "response" + case EvaluationMessageType: + return "evaluation" + case AffirmationMessageType: + return "affirmation" + default: + return "unknown" + } +} + +// MessageTypeFromString returns the message type from string +func MessageTypeFromString(str string) (MessageType, error) { + switch str { + case "challenge": + return ChallengeMessageType, nil + case "response": + return ResponseMessageType, nil + case "evaluation": + return EvaluationMessageType, nil + case "affirmation": + return AffirmationMessageType, nil + default: + return 0, errors.New("invalid message type string") + } +} + +// StorageChallengeSignatures represents the signature struct for broadcasting +type StorageChallengeSignatures struct { + Challenger map[string]string `json:"challenger,omitempty"` + Recipient map[string]string `json:"recipient,omitempty"` + Obs map[string]string `json:"obs,omitempty"` +} + +// Message represents the storage challenge message +type Message struct { + MessageType MessageType `json:"message_type"` + ChallengeID string `json:"challenge_id"` + Data MessageData `json:"data"` + Sender string `json:"sender"` + SenderSignature []byte `json:"sender_signature"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// BroadcastMessage represents the storage challenge message that needs to be broadcast after evaluation +type BroadcastMessage struct { + ChallengeID string + Challenger map[string][]byte + Recipient map[string][]byte + Observers map[string][]byte +} + +type MessageDataList []MessageData + +// MessageData represents the storage challenge message data +type MessageData struct { + ChallengerID string `json:"challenger_id"` + Challenge ChallengeData `json:"challenge"` + Observers []string `json:"observers"` + RecipientID string `json:"recipient_id"` + Response ResponseData `json:"response"` + ChallengerEvaluation EvaluationData `json:"challenger_evaluation"` + ObserverEvaluation ObserverEvaluationData `json:"observer_evaluation"` +} + +// ChallengeData represents the data of challenge +type ChallengeData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` + FileHash string `json:"file_hash"` + StartIndex int `json:"start_index"` + EndIndex int `json:"end_index"` +} + +// ResponseData represents the data of response +type ResponseData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Hash string `json:"hash"` + Timestamp time.Time `json:"timestamp"` +} + +// EvaluationData represents the data of evaluation +type EvaluationData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + Timestamp time.Time `json:"timestamp"` + Hash string `json:"hash"` + IsVerified bool `json:"is_verified"` +} + +// ObserverEvaluationData represents the data of Observer's evaluation +type ObserverEvaluationData struct { + Block int32 `json:"block"` + Merkelroot string `json:"merkelroot"` + IsChallengeTimestampOK bool `json:"is_challenge_timestamp_ok"` + IsProcessTimestampOK bool `json:"is_process_timestamp_ok"` + IsEvaluationTimestampOK bool `json:"is_evaluation_timestamp_ok"` + IsRecipientSignatureOK bool `json:"is_recipient_signature_ok"` + IsChallengerSignatureOK bool `json:"is_challenger_signature_ok"` + IsEvaluationResultOK bool `json:"is_evaluation_result_ok"` + Reason string `json:"reason"` + TrueHash string `json:"true_hash"` + Timestamp time.Time `json:"timestamp"` +} + +// StorageChallengeLogMessage represents the message log to be stored in the DB +type StorageChallengeLogMessage struct { + ID int `db:"id"` + MessageType int `db:"message_type"` + ChallengeID string `db:"challenge_id"` + Data []byte `db:"data"` + Sender string `db:"sender_id"` + SenderSignature []byte `db:"sender_signature"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +// StorageChallengeMetric represents the metric log to be stored in the DB +type StorageChallengeMetric struct { + ID int `db:"id"` + MessageType int `db:"message_type"` + ChallengeID string `db:"challenge_id"` + Data []byte `db:"data"` + SenderID string `db:"sender_id"` +} + +// BroadcastLogMessage represents the broadcast message log to be stored in the DB +type BroadcastLogMessage struct { + ChallengeID string `db:"challenge_id"` + Challenger string `db:"challenger"` + Recipient string `db:"recipient"` + Observers string `db:"observers"` + Data []byte `db:"data"` +} + +// BroadcastMessageMetrics is the struct for broadcast message metrics +type BroadcastMessageMetrics struct { + ID int `db:"id"` + ChallengeID string `db:"challenge_id"` + Challenger string `db:"challenger"` + Recipient string `db:"recipient"` + Observers string `db:"observers"` + Data []byte `db:"data"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +// ProcessBroadcastChallengeMetricsRequest represents the request for broadcasting metrics +type ProcessBroadcastChallengeMetricsRequest struct { + Data []byte `json:"data"` + SenderID string `json:"sender_id"` +} + +type StorageChallengeMessages []Message + +// Hash returns the hash of the storage-challenge challenge log data +func (mdl StorageChallengeMessages) Hash() string { + data, _ := json.Marshal(mdl) + hash, _ := utils.Sha3256hash(data) + + return string(hash) +} + +// NScMetric gets the latest challenge IDs from the DB +type NScMetric struct { + Count int + ChallengeID string + CreatedAt string +} + +// NHcMetric gets the latest health-check challenge IDs from the DB +type NHcMetric struct { + Count int + ChallengeID string + CreatedAt string +} + +type AccumulativeChallengeData struct { + NodeID string `db:"node_id"` + IPAddress string `db:"ip_address"` + TotalChallengesAsRecipients int `db:"total_challenges_as_recipients"` + TotalChallengesAsObservers int `db:"total_challenges_as_observers"` + TotalChallengesAsChallengers int `db:"total_challenges_as_challengers"` + CorrectChallengerEvaluations int `db:"correct_challenger_evaluations"` + CorrectObserverEvaluations int `db:"correct_observer_evaluations"` + CorrectRecipientEvaluations int `db:"correct_recipient_evaluations"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +// AggregatedScore represents the structure of data in the aggregated_challenge_scores table +type AggregatedScore struct { + NodeID string + IPAddress string + StorageChallengeScore float64 + HealthCheckChallengeScore float64 + CreatedAt time.Time + UpdatedAt time.Time +} + +type AggregatedScoreList []AggregatedScore + +func (asl AggregatedScoreList) Hash() string { + data, _ := json.Marshal(asl) + hash, _ := utils.Sha3256hash(data) + + return string(hash) +} + +type ScoreAggregationEvent struct { + ChallengeID string `db:"challenge_id"` + IsAggregated bool `db:"is_aggregated"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} diff --git a/pkg/types/ticket.go b/pkg/types/ticket.go new file mode 100644 index 00000000..9698e0d1 --- /dev/null +++ b/pkg/types/ticket.go @@ -0,0 +1,89 @@ +package types + +import ( + "time" +) + +type File struct { + FileID string + UploadTimestamp time.Time + Path string + FileIndex string + BaseFileID string + TaskID string + RegTxid string + ActivationTxid string + ReqBurnTxnAmount float64 + BurnTxnID string + ReqAmount float64 + IsConcluded bool + CascadeMetadataTicketID string + UUIDKey string + HashOfOriginalBigFile string + NameOfOriginalBigFileWithExt string + SizeOfOriginalBigFile float64 + DataTypeOfOriginalBigFile string + StartBlock int32 + DoneBlock int + PastelID string + Passphrase string +} + +type Files []*File + +func (f Files) Names() []string { + names := make([]string, 0, len(f)) + for _, file := range f { + names = append(names, file.FileID) + } + return names +} + +type RegistrationAttempt struct { + ID int + FileID string + BaseFileID string + RegStartedAt time.Time + ProcessorSNS string + FinishedAt time.Time + IsSuccessful bool + IsConfirmed bool + ErrorMessage string +} + +type ActivationAttempt struct { + ID int + FileID string + BaseFileID string + ActivationAttemptAt time.Time + IsSuccessful bool + IsConfirmed bool + ErrorMessage string +} + +func (fs Files) GetUnconcludedFiles() (Files, error) { + var unconcludedFiles Files + for _, f := range fs { + if !f.IsConcluded { + unconcludedFiles = append(unconcludedFiles, f) + } + } + + return unconcludedFiles, nil +} + +func (fs Files) GetBase() *File { + for _, f := range fs { + if f.FileIndex == "0" { + return f + } + } + + return nil +} + +type MultiVolCascadeTicketTxIDMap struct { + ID int64 + MultiVolCascadeTicketTxid string + BaseFileID string +} diff --git a/pkg/types/types.go b/pkg/types/types.go new file mode 100644 index 00000000..09238d2c --- /dev/null +++ b/pkg/types/types.go @@ -0,0 +1,142 @@ +package types + +import ( + "time" + + "github.com/LumeraProtocol/supernode/pkg/log" + + json "github.com/json-iterator/go" +) + +// MeshedSuperNode represents meshed sn +type MeshedSuperNode struct { + SessID string + NodeID string +} + +// NftRegMetadata represents nft reg metadata +type NftRegMetadata struct { + CreatorPastelID string + BlockHash string + BlockHeight string + Timestamp string + GroupID string + CollectionTxID string +} + +// ActionRegMetadata represents action reg metadata +type ActionRegMetadata struct { + CreatorPastelID string + BlockHash string + BurnTxID string + BlockHeight string + Timestamp string + EstimatedFee int64 + GroupID string + CollectionTxID string +} + +// TaskHistory represents task history +type TaskHistory struct { + ID int + TaskID string + CreatedAt time.Time + Status string + Details *Details +} + +// StorageChallengeStatus represents possible storage challenge statuses +type StorageChallengeStatus int + +const ( + //UndefinedStorageChallengeStatus represents invalid storage challenge type + UndefinedStorageChallengeStatus StorageChallengeStatus = iota + //GeneratedStorageChallengeStatus represents when the challenge is stored after generation + GeneratedStorageChallengeStatus + //ProcessedStorageChallengeStatus represents when the challenge is stored after processing + ProcessedStorageChallengeStatus + //VerifiedStorageChallengeStatus represents when the challenge is stored after verification + VerifiedStorageChallengeStatus +) + +// StorageChallenge represents storage challenge log +type StorageChallenge struct { + ID int64 + ChallengeID string + FileHash string + ChallengingNode string + RespondingNode string + VerifyingNodes string + GeneratedHash string + Status StorageChallengeStatus + StartingIndex int + EndingIndex int + CreatedAt time.Time + UpdatedAt time.Time +} + +// SelfHealingStatus represents possible self-healing statuses of failed challenge +type SelfHealingStatus string + +const ( + //UndefinedSelfHealingStatus represents invalid status for self-healing operation + UndefinedSelfHealingStatus SelfHealingStatus = "Undefined" + //CreatedSelfHealingStatus represents when the failed challenge gets stored in DB + CreatedSelfHealingStatus SelfHealingStatus = "Created" + //InProgressSelfHealingStatus represents when the challenge is retrieved for self-healing + InProgressSelfHealingStatus SelfHealingStatus = "InProgress" + //FailedSelfHealingStatus represents when the reconstruction has been completed + FailedSelfHealingStatus SelfHealingStatus = "Failed" + //CompletedSelfHealingStatus represents when the reconstruction has been completed + CompletedSelfHealingStatus SelfHealingStatus = "Completed" + //ReconstructionNotRequiredSelfHealingStatus represents when the reconstruction has been completed + ReconstructionNotRequiredSelfHealingStatus SelfHealingStatus = "ReconstructionNotRequired" +) + +// SelfHealingChallenge represents self-healing challenge +type SelfHealingChallenge struct { + ID int64 + ChallengeID string + MerkleRoot string + FileHash string + ChallengingNode string + RespondingNode string + VerifyingNode string + ReconstructedFileHash []byte + Status SelfHealingStatus + CreatedAt time.Time + UpdatedAt time.Time +} + +// Fields represents status log +type Fields map[string]interface{} + +// Details represents status log details with additional fields +type Details struct { + Message string + Fields Fields +} + +// Stringify convert the Details' struct to stringify json +func (d *Details) Stringify() string { + details, err := json.Marshal(&d) + if err != nil { + log.WithError(err).Error("unable to marshal task history details") + return "" + } + + return string(details) +} + +// NewDetails initializes and return the valid detail object +func NewDetails(msg string, fields Fields) *Details { + return &Details{ + Message: msg, + Fields: fields, + } +} + +// IsValid checks if the status log map is not empty +func (f Fields) IsValid() bool { + return len(f) != 0 +} diff --git a/proto/proto.go b/proto/proto.go new file mode 100644 index 00000000..34045007 --- /dev/null +++ b/proto/proto.go @@ -0,0 +1,6 @@ +package proto + +const ( + // MetadataKeySessID is unique numeric for every registration process, encompasses for all connections. + MetadataKeySessID = "sessID" +) diff --git a/proto/supernode/action/cascade/service.proto b/proto/supernode/action/cascade/service.proto new file mode 100644 index 00000000..3b3aa408 --- /dev/null +++ b/proto/supernode/action/cascade/service.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package cascade; + +option go_package = "github.com/LumeraProtocol/supernode/gen/supernode/action/cascade"; + +service CascadeService { + rpc UploadInputData (UploadInputDataRequest) returns (UploadInputDataResponse); +} + +message UploadInputDataRequest { + string filename = 1; + string action_id = 2; + string data_hash = 3; + int32 rq_ic = 4; + int32 rq_max = 5; +} + +message UploadInputDataResponse { + bool success = 1; + string message = 2; +} diff --git a/proto/supernode/agents/.gitkeep b/proto/supernode/agents/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/proto/supernode/nft/.gitkeep b/proto/supernode/nft/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/proto/supernode/supernode/cascade_service.proto b/proto/supernode/supernode/cascade_service.proto new file mode 100644 index 00000000..ff88c41c --- /dev/null +++ b/proto/supernode/supernode/cascade_service.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; + +option go_package = "github.com/LumeraProtocol/supernode/gen/supernode/supernode"; + +package supernode; + +// this proto defines GRPC methods that SN uses to talk to another SNs during Cascade Registration + +service CascadeService { + // Session informs primary supernode about its `nodeID` and `sessID` it wants to connect to. + // The stream is used by the parties to inform each other about the cancellation of the task. + rpc Session(stream SessionRequest) returns (stream SessionReply); + + // SendSenseTicketSignature send signature from supernodes mn2/mn3 for given reg NFT session id to primary supernode + rpc SendCascadeTicketSignature(SendTicketSignatureRequest) returns (SendTicketSignatureReply); +} + +message SessionRequest { + string nodeID = 1; +} + +message SessionReply { + string sessID = 1; +} + +message SendTicketSignatureRequest { + string nodeID = 1; + bytes signature = 2; + bytes data = 3; + bytes rqFile = 4; + EncoderParameters rqEncodeParams = 5; +} + +message SendTicketSignatureReply {} + +message EncoderParameters { + bytes Oti = 1; +} diff --git a/sdk/action/.gitkeep b/sdk/action/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/sdk/action/client/check_health.go b/sdk/action/client/check_health.go deleted file mode 100644 index 8bf2ae9e..00000000 --- a/sdk/action/client/check_health.go +++ /dev/null @@ -1,40 +0,0 @@ -package client - -import ( - "context" - "errors" - - pb "github.com/LumeraProtocol/supernode/gen/supernode/action" - "github.com/LumeraProtocol/supernode/pkg/logtrace" - "github.com/LumeraProtocol/supernode/pkg/net" -) - -type HealthCheckResponse struct { - Status string `json:"status"` -} - -func (c *Client) CheckHealth(ctx context.Context) (HealthCheckResponse, error) { - ctx = net.AddCorrelationID(ctx) - - fields := logtrace.Fields{ - logtrace.FieldMethod: "CheckHealth", - logtrace.FieldModule: logtrace.ValueActionSDK, - } - logtrace.Info(ctx, "performing health check", fields) - - res, err := c.service.CheckHealth(ctx, &pb.GetHealthCheckRequest{}) - if err != nil { - fields[logtrace.FieldError] = err.Error() - logtrace.Error(ctx, "health check failed", fields) - return HealthCheckResponse{}, err - } - - if res == nil { - logtrace.Warn(ctx, "action server returned nil response", fields) - return HealthCheckResponse{}, errors.New("received nil response from action-server") - } - fields[logtrace.FieldStatus] = res.Status - - logtrace.Info(ctx, "health check successful", fields) - return HealthCheckResponse{Status: res.Status}, nil -} diff --git a/sdk/action/client/client.go b/sdk/action/client/client.go deleted file mode 100644 index bd614be1..00000000 --- a/sdk/action/client/client.go +++ /dev/null @@ -1,35 +0,0 @@ -package client - -import ( - "context" - - "google.golang.org/grpc" - - pb "github.com/LumeraProtocol/supernode/gen/supernode/action" -) - -type Client struct { - conn *grpc.ClientConn - service pb.ActionServiceClient -} - -func NewClient(address string, opts ...Option) (*Client, error) { - options := NewDefaultOptions() - - for _, opt := range opts { - opt(options) - } - - ctx, cancel := context.WithTimeout(context.Background(), options.Timeout) - defer cancel() - - conn, err := grpc.DialContext(ctx, address, grpc.WithInsecure()) // Example - if err != nil { - return nil, err - } - - return &Client{ - conn: conn, - service: pb.NewActionServiceClient(conn), - }, nil -} diff --git a/sdk/action/client/options.go b/sdk/action/client/options.go deleted file mode 100644 index a3d709fd..00000000 --- a/sdk/action/client/options.go +++ /dev/null @@ -1,28 +0,0 @@ -package client - -import "time" - -const ( - defaultTimeout = 10 * time.Second -) - -// Options holds configuration options for the client -type Options struct { - Timeout time.Duration -} - -// Option defines the functional option signature -type Option func(options *Options) - -func NewDefaultOptions() *Options { - return &Options{ - Timeout: defaultTimeout, - } -} - -// WithTimeout sets a custom timeout for gRPC connection -func WithTimeout(d time.Duration) Option { - return func(o *Options) { - o.Timeout = d - } -} diff --git a/sdk/nft/.gitkeep b/sdk/nft/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/supernode/cmd/.gitkeep b/supernode/cmd/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/supernode/cmd/keys.go b/supernode/cmd/keys.go new file mode 100644 index 00000000..82cde93c --- /dev/null +++ b/supernode/cmd/keys.go @@ -0,0 +1,17 @@ +package cmd + +import ( + "github.com/spf13/cobra" +) + +// keysCmd represents the keys command +var keysCmd = &cobra.Command{ + Use: "keys", + Short: "Manage keys", + Long: `Manage keys for the Supernode. +This command provides subcommands for adding, recovering, and listing keys.`, +} + +func init() { + rootCmd.AddCommand(keysCmd) +} diff --git a/supernode/cmd/keys_add.go b/supernode/cmd/keys_add.go new file mode 100644 index 00000000..d5cb4964 --- /dev/null +++ b/supernode/cmd/keys_add.go @@ -0,0 +1,71 @@ +package cmd + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/LumeraProtocol/supernode/pkg/keyring" +) + +// keysAddCmd represents the add command for creating a new key +var keysAddCmd = &cobra.Command{ + Use: "add [name]", + Short: "Add a new key", + Long: `Add a new key with the given name. +This command will generate a new mnemonic and derive a key pair from it. +The generated key pair will be stored in the keyring. + +Example: + supernode keys add mykey`, + Args: cobra.MaximumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + var keyName string + if len(args) > 0 { + keyName = args[0] + } else { + // Use the key_name from config file as default + keyName = appConfig.SupernodeConfig.KeyName + } + + if keyName == "" { + return fmt.Errorf("key name is required") + } + + // Initialize keyring using config values + kr, err := keyring.InitKeyring( + appConfig.KeyringConfig.Backend, + appConfig.KeyringConfig.Dir, + ) + if err != nil { + return fmt.Errorf("failed to initialize keyring: %w", err) + } + + // Generate mnemonic and create new account + // Default to 256 bits of entropy (24 words) + mnemonic, info, err := keyring.CreateNewAccount(kr, keyName, 256) + if err != nil { + return fmt.Errorf("failed to create new account: %w", err) + } + + // Get address + address, err := info.GetAddress() + if err != nil { + return fmt.Errorf("failed to get address: %w", err) + } + + // Print results + fmt.Println("Key generated successfully!") + fmt.Printf("- Name: %s\n", info.Name) + fmt.Printf("- Address: %s\n", address.String()) + fmt.Printf("- Mnemonic: %s\n", mnemonic) + fmt.Println("\nIMPORTANT: Write down the mnemonic and keep it in a safe place.") + fmt.Println("The mnemonic is the only way to recover your account if you forget your password.") + + return nil + }, +} + +func init() { + keysCmd.AddCommand(keysAddCmd) +} diff --git a/supernode/cmd/keys_recover.go b/supernode/cmd/keys_recover.go new file mode 100644 index 00000000..8f8e3ad4 --- /dev/null +++ b/supernode/cmd/keys_recover.go @@ -0,0 +1,81 @@ +package cmd + +import ( + "bufio" + "fmt" + "os" + "strings" + + "github.com/spf13/cobra" + + "github.com/LumeraProtocol/supernode/pkg/keyring" +) + +// keysRecoverCmd represents the recover command for recovering a key from mnemonic +var keysRecoverCmd = &cobra.Command{ + Use: "recover [name]", + Short: "Recover a key using a mnemonic", + Long: `Recover a key using a BIP39 mnemonic. +This command will derive a key pair from the provided mnemonic and store it in the keyring. + +Example: + supernode keys recover mykey`, + Args: cobra.MaximumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + var keyName string + if len(args) > 0 { + keyName = args[0] + } else { + // Use the key_name from config file as default + keyName = appConfig.SupernodeConfig.KeyName + } + + if keyName == "" { + return fmt.Errorf("key name is required") + } + + // Initialize keyring using config values + kr, err := keyring.InitKeyring( + appConfig.KeyringConfig.Backend, + appConfig.KeyringConfig.Dir, + ) + if err != nil { + return fmt.Errorf("failed to initialize keyring: %w", err) + } + + // Prompt for mnemonic or use from config + var mnemonic string + + fmt.Print("Enter your mnemonic: ") + reader := bufio.NewReader(os.Stdin) + mnemonic, err = reader.ReadString('\n') + if err != nil { + return fmt.Errorf("failed to read mnemonic: %w", err) + } + mnemonic = strings.TrimSpace(mnemonic) + + // Recover account from mnemonic + info, err := keyring.RecoverAccountFromMnemonic(kr, keyName, mnemonic) + if err != nil { + return fmt.Errorf("failed to recover account: %w", err) + } + + // Get address + address, err := info.GetAddress() + if err != nil { + return fmt.Errorf("failed to get address: %w", err) + } + + // Print results + fmt.Println("Key recovered successfully!") + fmt.Printf("- Name: %s\n", info.Name) + fmt.Printf("- Address: %s\n", address.String()) + + return nil + }, +} + +func init() { + keysCmd.AddCommand(keysRecoverCmd) + // Remove all flags - we'll use config file only +} diff --git a/supernode/cmd/root.go b/supernode/cmd/root.go new file mode 100644 index 00000000..c574c313 --- /dev/null +++ b/supernode/cmd/root.go @@ -0,0 +1,54 @@ +package cmd + +import ( + "fmt" + "os" + + "github.com/LumeraProtocol/supernode/supernode/config" + "github.com/spf13/cobra" +) + +var ( + cfgFile string + appConfig *config.Config +) + +var rootCmd = &cobra.Command{ + Use: "supernode", + Short: "Lumera CLI tool for key management", + Long: `A command line tool for managing Lumera blockchain keys. +This application allows you to create and recover keys using mnemonics.`, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + // Skip config loading for help command + if cmd.Name() == "help" { + return nil + } + + // If config file path is not specified, use the default in current directory + if cfgFile == "" { + cfgFile = "config.yml" + } + + // Load configuration + var err error + appConfig, err = config.LoadConfig(cfgFile) + if err != nil { + return fmt.Errorf("failed to load config file %s: %w", cfgFile, err) + } + + return nil + }, +} + +// Execute adds all child commands to the root command and sets flags appropriately. +func Execute() { + if err := rootCmd.Execute(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func init() { + // Allow user to override config file location with --config flag + rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "Config file path (default is ./config.yaml)") +} diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go new file mode 100644 index 00000000..1f372c25 --- /dev/null +++ b/supernode/cmd/start.go @@ -0,0 +1,81 @@ +package cmd + +import ( + "context" + "log/slog" + "os" + "os/signal" + "syscall" + + "github.com/LumeraProtocol/supernode/pkg/keyring" + "github.com/LumeraProtocol/supernode/pkg/logtrace" + "github.com/spf13/cobra" +) + +// startCmd represents the start command +var startCmd = &cobra.Command{ + Use: "start", + Short: "Start the supernode", + Long: `Start the supernode service using the configuration defined in config.yaml. +The supernode will connect to the Lumera network and begin participating in the network.`, + RunE: func(cmd *cobra.Command, args []string) error { + // Initialize logging + logLevel := slog.LevelInfo + logtrace.Setup("supernode", "dev", logLevel) + + // Create context with correlation ID for tracing + ctx := logtrace.CtxWithCorrelationID(context.Background(), "supernode-start") + + // Log configuration info + logtrace.Info(ctx, "Starting supernode with configuration", logtrace.Fields{ + "config_file": cfgFile, + "keyring_dir": appConfig.KeyringConfig.Dir, + "key_name": appConfig.SupernodeConfig.KeyName, + }) + + // Initialize keyring + kr, err := keyring.InitKeyring( + appConfig.KeyringConfig.Backend, + appConfig.KeyringConfig.Dir, + ) + if err != nil { + logtrace.Error(ctx, "Failed to initialize keyring", logtrace.Fields{ + "error": err.Error(), + }) + return err + } + + // Initialize the supernode (next step) + supernode, err := NewSupernode(ctx, appConfig, kr) + if err != nil { + logtrace.Error(ctx, "Failed to initialize supernode", logtrace.Fields{ + "error": err.Error(), + }) + return err + } + + // Start the supernode + if err := supernode.Start(ctx); err != nil { + logtrace.Error(ctx, "Failed to start supernode", logtrace.Fields{ + "error": err.Error(), + }) + return err + } + + // Set up signal handling for graceful shutdown + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + + // Wait for termination signal + sig := <-sigCh + logtrace.Info(ctx, "Received signal, shutting down", logtrace.Fields{ + "signal": sig.String(), + }) + + return nil + }, +} + +func init() { + rootCmd.AddCommand(startCmd) +} diff --git a/supernode/cmd/supernode.go b/supernode/cmd/supernode.go new file mode 100644 index 00000000..28b0f91f --- /dev/null +++ b/supernode/cmd/supernode.go @@ -0,0 +1,179 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + "github.com/LumeraProtocol/supernode/p2p" + "github.com/LumeraProtocol/supernode/pkg/logtrace" + "github.com/LumeraProtocol/supernode/pkg/lumera" + "github.com/LumeraProtocol/supernode/pkg/storage/rqstore" + "github.com/LumeraProtocol/supernode/supernode/config" + "github.com/cosmos/cosmos-sdk/crypto/keyring" +) + +// Supernode represents a supernode in the Lumera network +type Supernode struct { + config *config.Config + lumeraClient lumera.Client + p2pService p2p.P2P + keyring keyring.Keyring + rqStore rqstore.Store + keyName string // String that represents the supernode account in keyring + accountAddress string // String that represents the supernode account address lemera12Xxxxx +} + +// NewSupernode creates a new supernode instance +func NewSupernode(ctx context.Context, config *config.Config, kr keyring.Keyring) (*Supernode, error) { + if config == nil { + return nil, fmt.Errorf("config is nil") + } + + // Initialize Lumera client + lumeraClient, err := initLumeraClient(ctx, config) + if err != nil { + return nil, fmt.Errorf("failed to initialize Lumera client: %w", err) + } + + // Initialize RaptorQ store for Cascade processing + rqStore, err := initRQStore(ctx, config) + if err != nil { + return nil, fmt.Errorf("failed to initialize RaptorQ store: %w", err) + } + + // Create the supernode instance + supernode := &Supernode{ + config: config, + lumeraClient: lumeraClient, + keyring: kr, + rqStore: rqStore, + keyName: config.SupernodeConfig.KeyName, + } + + return supernode, nil +} + +// Start starts all supernode services +func (s *Supernode) Start(ctx context.Context) error { + // Initialize p2p service + + // Verify that the key specified in config exists + keyInfo, err := s.keyring.Key(appConfig.SupernodeConfig.KeyName) + if err != nil { + logtrace.Error(ctx, "Key not found in keyring", logtrace.Fields{ + "key_name": appConfig.SupernodeConfig.KeyName, + "error": err.Error(), + }) + + // Provide helpful guidance + fmt.Printf("\nError: Key '%s' not found in keyring at %s\n", + appConfig.SupernodeConfig.KeyName, appConfig.KeyringConfig.Dir) + fmt.Println("\nPlease create the key first with one of these commands:") + fmt.Printf(" supernode keys add %s\n", appConfig.SupernodeConfig.KeyName) + fmt.Printf(" supernode keys recover %s\n", appConfig.SupernodeConfig.KeyName) + return fmt.Errorf("key not found") + } + + // Get the account address for logging + address, err := keyInfo.GetAddress() + if err != nil { + logtrace.Error(ctx, "Failed to get address from key", logtrace.Fields{ + "error": err.Error(), + }) + return err + } + + logtrace.Info(ctx, "Found valid key in keyring", logtrace.Fields{ + "key_name": appConfig.SupernodeConfig.KeyName, + "address": address.String(), + }) + + p2pConfig := &p2p.Config{ + ListenAddress: s.config.P2PConfig.ListenAddress, + Port: s.config.P2PConfig.Port, + DataDir: s.config.P2PConfig.DataDir, + BootstrapNodes: s.config.P2PConfig.BootstrapNodes, + ExternalIP: s.config.P2PConfig.ExternalIP, + ID: address.String(), + } + + logtrace.Info(ctx, "Initializing P2P service", logtrace.Fields{ + "listen_address": p2pConfig.ListenAddress, + "port": p2pConfig.Port, + "data_dir": p2pConfig.DataDir, + "supernode_id": address.String(), + }) + + p2pService, err := p2p.New(ctx, p2pConfig, s.lumeraClient, s.keyring, s.rqStore, nil, nil) + if err != nil { + return fmt.Errorf("failed to initialize p2p service: %w", err) + } + s.p2pService = p2pService + + // Run the p2p service + logtrace.Info(ctx, "Starting P2P service", logtrace.Fields{}) + if err := s.p2pService.Run(ctx); err != nil { + return fmt.Errorf("p2p service error: %w", err) + } + + return nil +} + +// Stop stops all supernode services +func (s *Supernode) Stop(ctx context.Context) error { + // Close the Lumera client connection + if s.lumeraClient != nil { + logtrace.Info(ctx, "Closing Lumera client", logtrace.Fields{}) + if err := s.lumeraClient.Close(); err != nil { + logtrace.Error(ctx, "Error closing Lumera client", logtrace.Fields{ + "error": err.Error(), + }) + } + } + + return nil +} + +// initLumeraClient initializes the Lumera client based on configuration +func initLumeraClient(ctx context.Context, config *config.Config) (lumera.Client, error) { + if config == nil { + return nil, fmt.Errorf("config is nil") + } + + logtrace.Info(ctx, "Initializing Lumera client", logtrace.Fields{ + "grpc_addr": config.LumeraClientConfig.GRPCAddr, + "chain_id": config.LumeraClientConfig.ChainID, + "timeout": config.LumeraClientConfig.Timeout, + }) + + return lumera.NewClient( + ctx, + lumera.WithGRPCAddr(config.LumeraClientConfig.GRPCAddr), + lumera.WithChainID(config.LumeraClientConfig.ChainID), + lumera.WithTimeout(config.LumeraClientConfig.Timeout), + ) +} + +// initRQStore initializes the RaptorQ store for Cascade processing +func initRQStore(ctx context.Context, config *config.Config) (rqstore.Store, error) { + if config == nil { + return nil, fmt.Errorf("config is nil") + } + + // Create RaptorQ store directory if it doesn't exist + rqDir := config.P2PConfig.DataDir + "/rq" + if err := os.MkdirAll(rqDir, 0700); err != nil { + return nil, fmt.Errorf("failed to create RQ store directory: %w", err) + } + + // Create the SQLite file path + rqStoreFile := rqDir + "/rqstore.db" + + logtrace.Info(ctx, "Initializing RaptorQ store", logtrace.Fields{ + "file_path": rqStoreFile, + }) + + // Initialize RaptorQ store with SQLite + return rqstore.NewSQLiteRQStore(rqStoreFile) +} diff --git a/supernode/conf/.gitkeep b/supernode/conf/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/supernode/config.yml b/supernode/config.yml new file mode 100644 index 00000000..d9dc433a --- /dev/null +++ b/supernode/config.yml @@ -0,0 +1,26 @@ +# Supernode Configuration +supernode: + key_name: "mukey" # Account name for the supernode in keyring + ip_address: "0.0.0.0" + port: 4444 + data_dir: "~/.supernode" # Base directory in home folder + +# Keyring Configuration +keyring: + backend: "test" # Options: test, file, os + dir: "~/.supernode/keys" # Keyring directory in home folder + password: "keyring-password" # Only used for 'file' backend + +# P2P Network Configuration +p2p: + listen_address: "0.0.0.0" + port: 4445 + data_dir: "~/.supernode/data/p2p" # P2P data directory in home folder + bootstrap_nodes: "" # Comma-separated list of bootstrap peer addresses + external_ip: "" # External IP address for this node (if behind NAT) + +# Lumera Chain Configuration +lumera: + grpc_addr: "localhost:9090" + chain_id: "lumera" + timeout: 10 # Connection timeout in seconds \ No newline at end of file diff --git a/supernode/config/config.go b/supernode/config/config.go new file mode 100644 index 00000000..81c01848 --- /dev/null +++ b/supernode/config/config.go @@ -0,0 +1,122 @@ +package config + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/LumeraProtocol/supernode/pkg/logtrace" + "gopkg.in/yaml.v3" +) + +type SupernodeConfig struct { + KeyName string `yaml:"key_name"` + IpAddress string `yaml:"ip_address"` + Port uint16 `yaml:"port"` + DataDir string `yaml:"data_dir"` +} + +type KeyringConfig struct { + Backend string `yaml:"backend"` + Dir string `yaml:"dir"` + Password string `yaml:"password"` +} + +type P2PConfig struct { + ListenAddress string `yaml:"listen_address"` + Port uint16 `yaml:"port"` + DataDir string `yaml:"data_dir"` + BootstrapNodes string `yaml:"bootstrap_nodes"` + ExternalIP string `yaml:"external_ip"` +} + +type LumeraClientConfig struct { + GRPCAddr string `yaml:"grpc_addr"` + ChainID string `yaml:"chain_id"` + Timeout int `yaml:"timeout"` +} + +type Config struct { + SupernodeConfig `yaml:"supernode"` + KeyringConfig `yaml:"keyring"` + P2PConfig `yaml:"p2p"` + LumeraClientConfig `yaml:"lumera"` +} + +// LoadConfig loads the configuration from a file +func LoadConfig(filename string) (*Config, error) { + ctx := context.Background() + + // Check if config file exists + absPath, err := filepath.Abs(filename) + if err != nil { + return nil, fmt.Errorf("error getting absolute path for config file: %w", err) + } + + logtrace.Info(ctx, "Loading configuration", logtrace.Fields{ + "path": absPath, + }) + + if _, err := os.Stat(absPath); os.IsNotExist(err) { + return nil, fmt.Errorf("config file %s does not exist", absPath) + } + + data, err := os.ReadFile(absPath) + if err != nil { + return nil, fmt.Errorf("error reading config file: %w", err) + } + + var config Config + if err := yaml.Unmarshal(data, &config); err != nil { + return nil, fmt.Errorf("error parsing config file: %w", err) + } + + // Expand home directory in all paths + homeDir, err := os.UserHomeDir() + if err != nil { + return nil, fmt.Errorf("failed to get home directory: %w", err) + } + + // Process SupernodeConfig + if config.SupernodeConfig.DataDir != "" { + config.SupernodeConfig.DataDir = expandPath(config.SupernodeConfig.DataDir, homeDir) + if err := os.MkdirAll(config.SupernodeConfig.DataDir, 0700); err != nil { + return nil, fmt.Errorf("failed to create Supernode data directory: %w", err) + } + } + + // Process KeyringConfig + if config.KeyringConfig.Dir != "" { + config.KeyringConfig.Dir = expandPath(config.KeyringConfig.Dir, homeDir) + if err := os.MkdirAll(config.KeyringConfig.Dir, 0700); err != nil { + return nil, fmt.Errorf("failed to create keyring directory: %w", err) + } + } + + // Process P2PConfig + if config.P2PConfig.DataDir != "" { + config.P2PConfig.DataDir = expandPath(config.P2PConfig.DataDir, homeDir) + if err := os.MkdirAll(config.P2PConfig.DataDir, 0700); err != nil { + return nil, fmt.Errorf("failed to create P2P data directory: %w", err) + } + } + + logtrace.Info(ctx, "Configuration loaded successfully", logtrace.Fields{}) + return &config, nil +} + +// expandPath handles path expansion including home directory (~) +func expandPath(path string, homeDir string) string { + // Handle home directory expansion + if len(path) > 0 && path[0] == '~' { + path = filepath.Join(homeDir, path[1:]) + } + + // If path is not absolute, make it absolute based on home directory + if !filepath.IsAbs(path) { + path = filepath.Join(homeDir, path) + } + + return path +} diff --git a/supernode/main.go b/supernode/main.go new file mode 100644 index 00000000..4d4febe1 --- /dev/null +++ b/supernode/main.go @@ -0,0 +1,22 @@ +package main + +import ( + "context" + + "github.com/LumeraProtocol/supernode/pkg/keyring" + "github.com/LumeraProtocol/supernode/pkg/logtrace" + "github.com/LumeraProtocol/supernode/supernode/cmd" +) + +func main() { + // Create initial context with correlation ID + ctx := logtrace.CtxWithCorrelationID(context.Background(), "supernode-main") + + // Initialize Cosmos SDK configuration + logtrace.Info(ctx, "Initializing Cosmos SDK configuration", logtrace.Fields{}) + keyring.InitSDKConfig() + + // Execute root command + logtrace.Info(ctx, "Executing CLI command", logtrace.Fields{}) + cmd.Execute() +} diff --git a/supernode/node/action/server/cascade/cascade_action_server.go b/supernode/node/action/server/cascade/cascade_action_server.go new file mode 100644 index 00000000..751cec60 --- /dev/null +++ b/supernode/node/action/server/cascade/cascade_action_server.go @@ -0,0 +1,20 @@ +package cascade + +import ( + cascadeGen "github.com/LumeraProtocol/supernode/gen/supernode/action/cascade" + "github.com/LumeraProtocol/supernode/supernode/node/common" + "github.com/LumeraProtocol/supernode/supernode/services/cascade" +) + +type CascadeActionServer struct { + cascadeGen.UnimplementedCascadeServiceServer + + *common.RegisterCascade +} + +// NewCascadeActionServer returns a new CascadeActionServer instance. +func NewCascadeActionServer(service *cascade.CascadeService) *CascadeActionServer { + return &CascadeActionServer{ + RegisterCascade: common.NewRegisterCascade(service), + } +} diff --git a/supernode/node/action/server/cascade/upload_cascade_action_input.go b/supernode/node/action/server/cascade/upload_cascade_action_input.go new file mode 100644 index 00000000..df6a1eef --- /dev/null +++ b/supernode/node/action/server/cascade/upload_cascade_action_input.go @@ -0,0 +1,39 @@ +package cascade + +import ( + "context" + "fmt" + + pb "github.com/LumeraProtocol/supernode/gen/supernode/action/cascade" + "github.com/LumeraProtocol/supernode/pkg/logtrace" + cascadeService "github.com/LumeraProtocol/supernode/supernode/services/cascade" +) + +func (s *CascadeActionServer) UploadInputData(ctx context.Context, req *pb.UploadInputDataRequest) (*pb.UploadInputDataResponse, error) { + fields := logtrace.Fields{ + logtrace.FieldMethod: "UploadInputData", + logtrace.FieldModule: "CascadeActionServer", + logtrace.FieldRequest: req, + } + logtrace.Info(ctx, "request to upload cascade input data received", fields) + + task, err := s.TaskFromMD(ctx) + if err != nil { + return nil, err + } + + res, err := task.UploadInputData(ctx, &cascadeService.UploadInputDataRequest{ + Filename: req.Filename, + ActionID: req.ActionId, + DataHash: req.DataHash, + RqIc: req.RqIc, + RqMax: req.RqMax, + }) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to upload input data", fields) + return &pb.UploadInputDataResponse{}, fmt.Errorf("cascade services upload input data error: %w", err) + } + + return &pb.UploadInputDataResponse{Success: res.Success, Message: res.Message}, nil +} diff --git a/supernode/node/common/register_cascade.go b/supernode/node/common/register_cascade.go new file mode 100644 index 00000000..9f4ef70f --- /dev/null +++ b/supernode/node/common/register_cascade.go @@ -0,0 +1,51 @@ +package common + +import ( + "context" + + "google.golang.org/grpc/metadata" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/proto" + "github.com/LumeraProtocol/supernode/supernode/services/cascade" +) + +// RegisterCascade represents common grpc services for registration sense. +type RegisterCascade struct { + *cascade.CascadeService +} + +// SessID retrieves SessID from the metadata. +func (service *RegisterCascade) SessID(ctx context.Context) (string, bool) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return "", false + } + + mdVals := md.Get(proto.MetadataKeySessID) + if len(mdVals) == 0 { + return "", false + } + return mdVals[0], true +} + +// TaskFromMD returns task by SessID from the metadata. +func (service *RegisterCascade) TaskFromMD(ctx context.Context) (*cascade.CascadeRegistrationTask, error) { + sessID, ok := service.SessID(ctx) + if !ok { + return nil, errors.New("not found sessID in metadata") + } + + task := service.Task(sessID) + if task == nil { + return nil, errors.Errorf("not found %q task", sessID) + } + return task, nil +} + +// NewRegisterCascade returns a new RegisterSense instance. +func NewRegisterCascade(service *cascade.CascadeService) *RegisterCascade { + return &RegisterCascade{ + CascadeService: service, + } +} diff --git a/supernode/node/supernode/client/cascade_supernode_client.go b/supernode/node/supernode/client/cascade_supernode_client.go new file mode 100644 index 00000000..247143ca --- /dev/null +++ b/supernode/node/supernode/client/cascade_supernode_client.go @@ -0,0 +1,73 @@ +package client + +import ( + "context" + "io" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + pb "github.com/LumeraProtocol/supernode/gen/supernode/supernode" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + node "github.com/LumeraProtocol/supernode/supernode/node/supernode" +) + +type SupernodeCascadeActionClient struct { + sessID string + + conn *clientConn + client pb.CascadeServiceClient +} + +func (service *SupernodeCascadeActionClient) SessID() string { + return service.sessID +} + +func (service *SupernodeCascadeActionClient) Session(ctx context.Context, nodeID, sessID string) error { + service.sessID = sessID + + stream, err := service.client.Session(ctx) + if err != nil { + return errors.Errorf("open Health stream: %w", err) + } + + req := &pb.SessionRequest{ + NodeID: nodeID, + } + + if err := stream.Send(req); err != nil { + return errors.Errorf("send Session request: %w", err) + } + + resp, err := stream.Recv() + if err != nil { + if err == io.EOF { + return nil + } + switch status.Code(err) { + case codes.Canceled, codes.Unavailable: + return nil + } + return errors.Errorf("receive Session response: %w", err) + } + log.WithContext(ctx).WithField("resp", resp).Debug("Session response") + + go func() { + defer service.conn.Close() + for { + if _, err := stream.Recv(); err != nil { + return + } + } + }() + + return nil +} + +func newSupernodeCascadeActionClient(conn *clientConn) node.RegisterCascadeInterface { + return &SupernodeCascadeActionClient{ + conn: conn, + client: pb.NewCascadeServiceClient(conn), + } +} diff --git a/supernode/node/supernode/client/client.go b/supernode/node/supernode/client/client.go new file mode 100644 index 00000000..5d24e8b8 --- /dev/null +++ b/supernode/node/supernode/client/client.go @@ -0,0 +1,50 @@ +package client + +import ( + "context" + "time" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + netgrpcclient "github.com/LumeraProtocol/supernode/pkg/net/grpc/client" + "github.com/LumeraProtocol/supernode/pkg/random" + node "github.com/LumeraProtocol/supernode/supernode/node/supernode" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + _ "google.golang.org/grpc/keepalive" +) + +// this implements SN's GRPC methods that call another SN during Cascade Registration +// meaning - these methods implements client side of SN to SN GRPC communication + +type client struct { + *netgrpcclient.Client + keyRing keyring.Keyring + superNodeAccAddress string +} + +// Connect implements node.Client.Connect() +func (c *client) Connect(ctx context.Context, address string) (node.ConnectionInterface, error) { + clientOptions := netgrpcclient.DefaultClientOptions() + clientOptions.ConnWaitTime = 30 * time.Minute + clientOptions.MinConnectTimeout = 30 * time.Minute + clientOptions.EnableRetries = false + + id, _ := random.String(8, random.Base62Chars) + + grpcConn, err := c.Client.Connect(ctx, address, clientOptions) + if err != nil { + log.WithContext(ctx).WithError(err).Error("DialContext err") + return nil, errors.Errorf("dial address %s: %w", address, err) + } + + log.WithContext(ctx).Debugf("Connected to %s", address) + + conn := newClientConn(id, grpcConn) + + go func() { + //<-conn.Done() + log.WithContext(ctx).Debugf("Disconnected %s", grpcConn.Target()) + }() + + return conn, nil +} diff --git a/supernode/node/supernode/client/connection.go b/supernode/node/supernode/client/connection.go new file mode 100644 index 00000000..51471f99 --- /dev/null +++ b/supernode/node/supernode/client/connection.go @@ -0,0 +1,26 @@ +package client + +import ( + "google.golang.org/grpc" + + "github.com/LumeraProtocol/supernode/supernode/node/supernode" +) + +// clientConn represents grpc client connection. +type clientConn struct { + *grpc.ClientConn + + id string +} + +// RegisterCascade implements node.ConnectionInterface.RegisterSense() +func (conn *clientConn) RegisterCascade() supernode.RegisterCascadeInterface { + return newSupernodeCascadeActionClient(conn) +} + +func newClientConn(id string, conn *grpc.ClientConn) supernode.ConnectionInterface { + return &clientConn{ + ClientConn: conn, + id: id, + } +} diff --git a/supernode/node/supernode/client/send_cascade_ticket_signature.go b/supernode/node/supernode/client/send_cascade_ticket_signature.go new file mode 100644 index 00000000..ef247d30 --- /dev/null +++ b/supernode/node/supernode/client/send_cascade_ticket_signature.go @@ -0,0 +1,25 @@ +package client + +import ( + "context" + + pb "github.com/LumeraProtocol/supernode/gen/supernode/supernode" + "github.com/LumeraProtocol/supernode/pkg/raptorq" +) + +// SendCascadeTicketSignature implements SendCascadeTicketSignature +func (service *SupernodeCascadeActionClient) SendCascadeTicketSignature(ctx context.Context, nodeID string, signature []byte, data []byte, rqFile []byte, rqEncodeParams raptorq.EncoderParameters) error { + ctx = contextWithMDSessID(ctx, service.sessID) + + _, err := service.client.SendCascadeTicketSignature(ctx, &pb.SendTicketSignatureRequest{ + NodeID: nodeID, + Signature: signature, + Data: data, + RqFile: rqFile, + RqEncodeParams: &pb.EncoderParameters{ + Oti: rqEncodeParams.Oti, + }, + }) + + return err +} diff --git a/supernode/node/supernode/client/session.go b/supernode/node/supernode/client/session.go new file mode 100644 index 00000000..3dde0bbf --- /dev/null +++ b/supernode/node/supernode/client/session.go @@ -0,0 +1,13 @@ +package client + +import ( + "context" + + "github.com/LumeraProtocol/supernode/proto" + "google.golang.org/grpc/metadata" +) + +func contextWithMDSessID(ctx context.Context, sessID string) context.Context { + md := metadata.Pairs(proto.MetadataKeySessID, sessID) + return metadata.NewOutgoingContext(ctx, md) +} diff --git a/supernode/node/supernode/node_client_interface.go b/supernode/node/supernode/node_client_interface.go new file mode 100644 index 00000000..c7a64fe8 --- /dev/null +++ b/supernode/node/supernode/node_client_interface.go @@ -0,0 +1,44 @@ +package supernode + +import ( + "context" + + "github.com/LumeraProtocol/supernode/pkg/raptorq" +) + +// ClientInterface represents a base connection interface. +type ClientInterface interface { + // Connect connects to the server at the given address. + Connect(ctx context.Context, address string) (ConnectionInterface, error) +} + +// ConnectionInterface represents a client connection +type ConnectionInterface interface { + // Close closes connection. + Close() error + // RegisterCascade returns a new RegisterCascade stream + RegisterCascade() RegisterCascadeInterface +} + +// SuperNodePeerAPIInterface base interface for other Node API interfaces +type SuperNodePeerAPIInterface interface { + // SessID returns the taskID received from the server during the handshake. + SessID() (taskID string) + // Session sets up an initial connection with primary supernode, by telling sessID and its own nodeID. + Session(ctx context.Context, nodeID, sessID string) (err error) +} + +// revive:disable:exported + +// NodeMaker interface to make concrete node types +type NodeMaker interface { + MakeNode(conn ConnectionInterface) SuperNodePeerAPIInterface +} + +// RegisterCascadeInterface represents an interaction stream with supernodes for registering sense. +type RegisterCascadeInterface interface { + SuperNodePeerAPIInterface + + // Send signature of ticket to primary supernode + SendCascadeTicketSignature(ctx context.Context, nodeID string, signature []byte, data []byte, rqFile []byte, rqEncodeParams raptorq.EncoderParameters) error +} diff --git a/supernode/node/supernode/server/cascade/cascade_supernode_server.go b/supernode/node/supernode/server/cascade/cascade_supernode_server.go new file mode 100644 index 00000000..0a496b1f --- /dev/null +++ b/supernode/node/supernode/server/cascade/cascade_supernode_server.go @@ -0,0 +1,24 @@ +package cascade + +import ( + pb "github.com/LumeraProtocol/supernode/gen/supernode/supernode" + "github.com/LumeraProtocol/supernode/supernode/node/common" + "github.com/LumeraProtocol/supernode/supernode/services/cascade" +) + +// this implements SN's GRPC methods that are called by another SNs during Cascade Registration +// meaning - these methods implements server side of SN to SN GRPC communication + +// RegisterCascade represents grpc services for registration Sense tickets. +type RegisterCascade struct { + pb.UnimplementedCascadeServiceServer + + *common.RegisterCascade +} + +// NewRegisterCascade returns a new RegisterCascade instance. +func NewRegisterCascade(service *cascade.CascadeService) *RegisterCascade { + return &RegisterCascade{ + RegisterCascade: common.NewRegisterCascade(service), + } +} diff --git a/supernode/node/supernode/server/cascade/send_cascade_ticket_signature.go b/supernode/node/supernode/server/cascade/send_cascade_ticket_signature.go new file mode 100644 index 00000000..04aac103 --- /dev/null +++ b/supernode/node/supernode/server/cascade/send_cascade_ticket_signature.go @@ -0,0 +1,28 @@ +package cascade + +import ( + "context" + + pb "github.com/LumeraProtocol/supernode/gen/supernode/supernode" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + sc "github.com/LumeraProtocol/supernode/supernode/services/common" +) + +// SendCascadeTicketSignature implements supernode.RegisterCascadeServer.SendCascadeTicketSignature() +func (service *RegisterCascade) SendCascadeTicketSignature(ctx context.Context, req *pb.SendTicketSignatureRequest) (*pb.SendTicketSignatureReply, error) { + log.WithContext(ctx).WithField("req", req).Debugf("SendCascadeTicketSignature request") + task, err := service.TaskFromMD(ctx) + if err != nil { + return nil, err + } + + // TODO : add rq file to req, also confirm which func to call + err = task.ValidateSignedTicketFromSecondaryNode(ctx, req.Data, req.NodeID, req.Signature, req.RqFile) + + if err := task.AddPeerTicketSignature(req.NodeID, req.Signature, sc.StatusAssetUploaded); err != nil { + return nil, errors.Errorf("add peer signature %w", err) + } + + return &pb.SendTicketSignatureReply{}, nil +} diff --git a/supernode/node/supernode/server/cascade/session.go b/supernode/node/supernode/server/cascade/session.go new file mode 100644 index 00000000..ed8670a4 --- /dev/null +++ b/supernode/node/supernode/server/cascade/session.go @@ -0,0 +1,81 @@ +package cascade + +import ( + "context" + "io" + + pb "github.com/LumeraProtocol/supernode/gen/supernode/supernode" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/supernode/services/cascade" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" +) + +// Session implements supernode.RegisterSenseServer.Session() +func (service *RegisterCascade) Session(stream pb.CascadeService_SessionServer) error { + ctx, cancel := context.WithCancel(stream.Context()) + defer cancel() + + var task *cascade.CascadeRegistrationTask + isTaskNew := false + + if sessID, ok := service.SessID(ctx); ok { + if task = service.Task(sessID); task == nil { + return errors.Errorf("not found %q task", sessID) + } + } else { + task = service.NewCascadeRegistrationTask() + isTaskNew = true + } + + go func() { + <-task.Done() + cancel() + }() + + if isTaskNew { + defer task.Cancel() + } + + peer, _ := peer.FromContext(ctx) + log.WithContext(ctx).WithField("addr", peer.Addr).Debugf("Session stream") + defer log.WithContext(ctx).WithField("addr", peer.Addr).Debugf("Session stream closed") + + req, err := stream.Recv() + if err != nil { + return errors.Errorf("receive handshake request: %w", err) + } + log.WithContext(ctx).WithField("req", req).Debugf("Session request") + + if err := task.NetworkHandler.SessionNode(ctx, req.NodeID); err != nil { + return err + } + + if !isTaskNew { + defer task.Cancel() + } + + resp := &pb.SessionReply{ + SessID: task.ID(), + } + if err := stream.Send(resp); err != nil { + return errors.Errorf("send handshake response: %w", err) + } + log.WithContext(ctx).WithField("resp", resp).Debugf("Session response") + + for { + if _, err := stream.Recv(); err != nil { + if err == io.EOF { + return nil + } + switch status.Code(err) { + case codes.Canceled, codes.Unavailable: + return nil + } + return errors.Errorf("handshake stream closed: %w", err) + } + } +} diff --git a/supernode/node/supernode/server/common/register_cascade.go b/supernode/node/supernode/server/common/register_cascade.go new file mode 100644 index 00000000..08cdc0ad --- /dev/null +++ b/supernode/node/supernode/server/common/register_cascade.go @@ -0,0 +1,56 @@ +package common + +import ( + "context" + + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/proto" + "github.com/LumeraProtocol/supernode/supernode/services/cascade" +) + +// RegisterCascade represents common grpc services for registration sense. +type RegisterCascade struct { + *cascade.CascadeService +} + +// SessID retrieves SessID from the metadata. +func (service *RegisterCascade) SessID(ctx context.Context) (string, bool) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return "", false + } + + mdVals := md.Get(proto.MetadataKeySessID) + if len(mdVals) == 0 { + return "", false + } + return mdVals[0], true +} + +// TaskFromMD returns task by SessID from the metadata. +func (service *RegisterCascade) TaskFromMD(ctx context.Context) (*cascade.CascadeRegistrationTask, error) { + sessID, ok := service.SessID(ctx) + if !ok { + return nil, errors.New("not found sessID in metadata") + } + + task := service.Task(sessID) + if task == nil { + return nil, errors.Errorf("not found %q task", sessID) + } + return task, nil +} + +func (service *RegisterCascade) Desc() *grpc.ServiceDesc { + return &grpc.ServiceDesc{ServiceName: "supernode.RegisterCascade", HandlerType: (*RegisterCascade)(nil)} +} + +// NewRegisterCascade returns a new RegisterSense instance. +func NewRegisterCascade(service *cascade.CascadeService) *RegisterCascade { + return &RegisterCascade{ + CascadeService: service, + } +} diff --git a/supernode/node/supernode/server/config.go b/supernode/node/supernode/server/config.go new file mode 100644 index 00000000..722857e0 --- /dev/null +++ b/supernode/node/supernode/server/config.go @@ -0,0 +1,20 @@ +package server + +const ( + defaultListenAddresses = "0.0.0.0" + defaultPort = 4444 +) + +// Config contains settings of the supernode server. +type Config struct { + ListenAddresses string `mapstructure:"listen_addresses" json:"listen_addresses,omitempty"` + Port int `mapstructure:"port" json:"port,omitempty"` +} + +// NewConfig returns a new Config instance. +func NewConfig() *Config { + return &Config{ + ListenAddresses: defaultListenAddresses, + Port: defaultPort, + } +} diff --git a/supernode/node/supernode/server/server.go b/supernode/node/supernode/server/server.go new file mode 100644 index 00000000..4ea26161 --- /dev/null +++ b/supernode/node/supernode/server/server.go @@ -0,0 +1,141 @@ +package server + +import ( + "context" + "fmt" + "net" + "strconv" + "strings" + + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + + "github.com/LumeraProtocol/supernode/pkg/errgroup" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" +) + +type service interface { + Desc() *grpc.ServiceDesc +} + +// Server represents supernode server +type Server struct { + config *Config + services []service + name string + //secClient alts.SecClient + //secInfo *alts.SecInfo +} + +// Run starts the server +func (server *Server) Run(ctx context.Context) error { + grpclog.SetLoggerV2(log.NewLoggerWithErrorLevel()) + ctx = log.ContextWithPrefix(ctx, server.name) + + group, ctx := errgroup.WithContext(ctx) + + addresses := strings.Split(server.config.ListenAddresses, ",") + grpcServer := server.grpcServer(ctx) + if grpcServer == nil { + return fmt.Errorf("initialize grpc server failed") + } + + for _, address := range addresses { + addr := net.JoinHostPort(strings.TrimSpace(address), strconv.Itoa(server.config.Port)) + + group.Go(func() error { + return server.listen(ctx, addr, grpcServer) + }) + } + + return group.Wait() +} + +func (server *Server) listen(ctx context.Context, address string, grpcServer *grpc.Server) (err error) { + listen, err := net.Listen("tcp", address) + if err != nil { + return errors.Errorf("listen: %w", err).WithField("address", address) + } + + // The listener that will track connections. + /*listen = &connTrackListener{ + Listener: listen, + connTrack: server.connTrack, // connection tracker + }*/ + + errCh := make(chan error, 1) + go func() { + defer errors.Recover(func(recErr error) { err = recErr }) + log.WithContext(ctx).Infof("gRPC server listening on %q", address) + if err := grpcServer.Serve(listen); err != nil { + errCh <- errors.Errorf("serve: %w", err).WithField("address", address) + } + }() + + select { + case <-ctx.Done(): + log.WithContext(ctx).Infof("Shutting down gRPC server at %q", address) + grpcServer.GracefulStop() + case err := <-errCh: + return err + } + + return nil +} + +func (server *Server) grpcServer(ctx context.Context) *grpc.Server { + //if server.secClient == nil || server.secInfo == nil { + // log.WithContext(ctx).Errorln("secClient or secInfo don't initialize") + // return nil + //} + + //// Define the keep-alive parameters + //kaParams := keepalive.ServerParameters{ + // MaxConnectionIdle: 2 * time.Hour, + // MaxConnectionAge: 2 * time.Hour, + // MaxConnectionAgeGrace: 1 * time.Hour, + // Time: 1 * time.Hour, + // Timeout: 30 * time.Minute, + //} + // + //// Define the keep-alive enforcement policy + //kaPolicy := keepalive.EnforcementPolicy{ + // MinTime: 3 * time.Minute, // Minimum time a client should wait before sending keep-alive probes + // PermitWithoutStream: true, // Only allow pings when there are active streams + //} + + var grpcServer *grpc.Server + //if os.Getenv("INTEGRATION_TEST_ENV") == "true" { + // grpcServer = grpc.NewServer(middleware.UnaryInterceptor(), middleware.StreamInterceptor(), grpc.MaxSendMsgSize(100000000), + // grpc.MaxRecvMsgSize(100000000), grpc.KeepaliveParams(kaParams), // Use the keep-alive parameters + // grpc.KeepaliveEnforcementPolicy(kaPolicy)) + //} else { + // + // grpcServer = grpc.NewServer(middleware.UnaryInterceptor(), middleware.StreamInterceptor(), + // middleware.AltsCredential(server.secClient, server.secInfo), grpc.MaxSendMsgSize(100000000), + // grpc.MaxRecvMsgSize(100000000), grpc.KeepaliveParams(kaParams), // Use the keep-alive parameters + // grpc.KeepaliveEnforcementPolicy(kaPolicy)) + //} + + for _, service := range server.services { + log.WithContext(ctx).Debugf("Register services %q", service.Desc().ServiceName) + grpcServer.RegisterService(service.Desc(), service) + } + + return grpcServer +} + +// New returns a new Server instance. +func New(config *Config, name string, + //secClient alts.SecClient, + //secInfo *alts.SecInfo, + services ...service) *Server { + return &Server{ + config: config, + //secClient: secClient, + //secInfo: secInfo, + services: services, + name: name, + } +} diff --git a/supernode/services/cascade/cascade_reg_node.go b/supernode/services/cascade/cascade_reg_node.go new file mode 100644 index 00000000..cacb4594 --- /dev/null +++ b/supernode/services/cascade/cascade_reg_node.go @@ -0,0 +1,20 @@ +package cascade + +import ( + node "github.com/LumeraProtocol/supernode/supernode/node/supernode" +) + +// RegisterCascadeNodeMaker makes concrete instance of CascadeRegistrationNode +type RegisterCascadeNodeMaker struct { + node.NodeMaker +} + +// MakeNode makes concrete instance of CascadeRegistrationNode +func (maker RegisterCascadeNodeMaker) MakeNode(conn node.ConnectionInterface) node.SuperNodePeerAPIInterface { + return &CascadeRegistrationNode{RegisterCascadeInterface: conn.RegisterCascade()} +} + +// CascadeRegistrationNode represent supernode connection. +type CascadeRegistrationNode struct { + node.RegisterCascadeInterface +} diff --git a/supernode/services/cascade/config.go b/supernode/services/cascade/config.go new file mode 100644 index 00000000..caa145fb --- /dev/null +++ b/supernode/services/cascade/config.go @@ -0,0 +1,27 @@ +package cascade + +import ( + "github.com/LumeraProtocol/supernode/supernode/services/common" +) + +const ( + defaultNumberConnectedNodes = 2 + defaultPreburntTxMinConfirmations = 3 +) + +// Config contains settings of the registering Nft. +type Config struct { + common.Config `mapstructure:",squash" json:"-"` + + RaptorQServiceAddress string `mapstructure:"-" json:"-"` + RqFilesDir string + + NumberConnectedNodes int `mapstructure:"-" json:"number_connected_nodes,omitempty"` +} + +// NewConfig returns a new Config instance. +func NewConfig() *Config { + return &Config{ + NumberConnectedNodes: defaultNumberConnectedNodes, + } +} diff --git a/supernode/services/cascade/service.go b/supernode/services/cascade/service.go new file mode 100644 index 00000000..5a564458 --- /dev/null +++ b/supernode/services/cascade/service.go @@ -0,0 +1,67 @@ +package cascade + +import ( + "github.com/LumeraProtocol/supernode/p2p" + "github.com/LumeraProtocol/supernode/pkg/lumera" + "github.com/LumeraProtocol/supernode/pkg/raptorq" + "github.com/LumeraProtocol/supernode/pkg/storage" + "github.com/LumeraProtocol/supernode/pkg/storage/queries" + "github.com/LumeraProtocol/supernode/pkg/storage/rqstore" + node "github.com/LumeraProtocol/supernode/supernode/node/supernode" + "github.com/LumeraProtocol/supernode/supernode/services/common" +) + +type CascadeService struct { + *common.SuperNodeService + config *Config + + lumeraClient lumera.Client + raptorQ raptorq.RaptorQ + raptorQClient raptorq.ClientInterface + + nodeClient node.ClientInterface + rqstore rqstore.Store + historyDB queries.LocalStoreInterface +} + +// NewCascadeRegistrationTask runs a new task of the registration Sense and returns its taskID. +func (s *CascadeService) NewCascadeRegistrationTask() *CascadeRegistrationTask { + task := NewCascadeRegistrationTask(s) + s.Worker.AddTask(task) + + return task +} + +// Task returns the task of the Sense registration by the given id. +func (s *CascadeService) Task(id string) *CascadeRegistrationTask { + if s.Worker.Task(id) == nil { + return nil + } + + return s.Worker.Task(id).(*CascadeRegistrationTask) +} + +// NewCascadeService returns a new CascadeService instance. +func NewCascadeService(config *Config, + lumera lumera.Client, + fileStorage storage.FileStorageInterface, + nodeClient node.ClientInterface, + p2pClient p2p.Client, + rqC raptorq.RaptorQ, + rqClient raptorq.ClientInterface, + rqstore rqstore.Store, +) *CascadeService { + return &CascadeService{ + config: config, + SuperNodeService: common.NewSuperNodeService(fileStorage, p2pClient), + lumeraClient: lumera, + nodeClient: nodeClient, + raptorQ: rqC, + raptorQClient: rqClient, + rqstore: rqstore, + } +} + +func (s *CascadeService) GetSNAddress() string { + return s.config.SupernodeAccountAddress // FIXME : verify +} diff --git a/supernode/services/cascade/task.go b/supernode/services/cascade/task.go new file mode 100644 index 00000000..ea3c7026 --- /dev/null +++ b/supernode/services/cascade/task.go @@ -0,0 +1,63 @@ +package cascade + +import ( + "context" + + "github.com/LumeraProtocol/supernode/pkg/raptorq" + "github.com/LumeraProtocol/supernode/pkg/storage/files" + "github.com/LumeraProtocol/supernode/supernode/services/common" +) + +type RQInfo struct { + rqIDsIC uint32 + rqIDs []string + rqIDEncodeParams raptorq.EncoderParameters + + rqIDsFile []byte + rawRqFile []byte + rqIDFiles [][]byte +} + +// CascadeRegistrationTask is the task of registering new Sense. +type CascadeRegistrationTask struct { + RQInfo + *CascadeService + + *common.SuperNodeTask + *common.RegTaskHelper + storage *common.StorageHandler + + Asset *files.File // TODO : remove + assetSizeBytes int + dataHash string + + creatorSignature []byte +} + +const ( + logPrefix = "cascade" +) + +// Run starts the task +func (task *CascadeRegistrationTask) Run(ctx context.Context) error { + return task.RunHelper(ctx, task.removeArtifacts) +} + +func (task *CascadeRegistrationTask) removeArtifacts() { + task.RemoveFile(task.Asset) +} + +// NewCascadeRegistrationTask returns a new Task instance. +func NewCascadeRegistrationTask(service *CascadeService) *CascadeRegistrationTask { + + task := &CascadeRegistrationTask{ + SuperNodeTask: common.NewSuperNodeTask(logPrefix, service.historyDB), + CascadeService: service, + storage: common.NewStorageHandler(service.P2PClient, service.raptorQClient, + service.config.RaptorQServiceAddress, service.config.RqFilesDir, service.rqstore), + } + + task.RegTaskHelper = common.NewRegTaskHelper(task.SuperNodeTask, service.lumeraClient) + + return task +} diff --git a/supernode/services/cascade/upload.go b/supernode/services/cascade/upload.go new file mode 100644 index 00000000..00970198 --- /dev/null +++ b/supernode/services/cascade/upload.go @@ -0,0 +1,268 @@ +package cascade + +import ( + "context" + "encoding/hex" + "encoding/json" + + actiontypes "github.com/LumeraProtocol/lumera/x/action/types" + "github.com/LumeraProtocol/supernode/pkg/lumera/modules/supernode" + "github.com/cosmos/cosmos-sdk/client/grpc/cmtservice" + + ct "github.com/LumeraProtocol/supernode/pkg/common/task" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/logtrace" + "github.com/LumeraProtocol/supernode/pkg/raptorq" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type UploadInputDataRequest struct { + ActionID string + Filename string + DataHash string + RqIc int32 + RqMax int32 +} + +type UploadInputDataResponse struct { + Success bool + Message string +} + +func (task *CascadeRegistrationTask) UploadInputData(ctx context.Context, req *UploadInputDataRequest) (*UploadInputDataResponse, error) { + fields := logtrace.Fields{ + logtrace.FieldMethod: "UploadInputData", + logtrace.FieldRequest: req, + } + + actionRes, err := task.lumeraClient.Action().GetAction(ctx, req.ActionID) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to get action", fields) + return nil, status.Errorf(codes.Internal, "failed to get action") + } + if actionRes.GetAction().ActionID == "" { + logtrace.Error(ctx, "action not found", fields) + return nil, status.Errorf(codes.Internal, "action not found") + } + actionDetails := actionRes.GetAction() + logtrace.Info(ctx, "action has been retrieved", fields) + + latestBlock, err := task.lumeraClient.Node().GetLatestBlock(ctx) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to get latest block", fields) + return nil, status.Errorf(codes.Internal, "failed to get latest block") + } + latestBlockHeight := uint64(latestBlock.GetSdkBlock().GetHeader().Height) + latestBlockHash := latestBlock.GetBlockId().GetHash() + fields[logtrace.FieldBlockHeight] = latestBlockHeight + logtrace.Info(ctx, "latest block has been retrieved", fields) + + topSNsRes, err := task.lumeraClient.SuperNode().GetTopSuperNodesForBlock(ctx, latestBlockHeight) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to get top SNs", fields) + return nil, status.Errorf(codes.Internal, "failed to get top SNs") + } + logtrace.Info(ctx, "top sns have been fetched", fields) + + if !supernode.Exists(topSNsRes.Supernodes, task.config.SupernodeAccountAddress) { + logtrace.Error(ctx, "current supernode do not exist in the top sns list", fields) + return nil, status.Errorf(codes.Internal, "current supernode does not exist in the top sns list") + } + logtrace.Info(ctx, "current supernode exists in the top sns list", fields) + + if req.DataHash != actionDetails.Metadata.GetCascadeMetadata().DataHash { + logtrace.Error(ctx, "data hash doesn't match", fields) + return nil, status.Errorf(codes.Internal, "data hash doesn't match") + } + logtrace.Info(ctx, "request data-hash has been matched with the action data-hash", fields) + + // FIXME : use proper file + task.rqIDsIC, task.rqIDs, + task.rqIDsFile, task.rqIDEncodeParams, task.creatorSignature, err = task.raptorQ.GenRQIdentifiersFiles(ctx, + fields, + nil, + string(latestBlockHash), actionDetails.GetCreator(), + uint32(actionDetails.Metadata.GetCascadeMetadata().RqMax), + ) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to generate RQID Files", fields) + return nil, status.Errorf(codes.Internal, "failed to generate RQID Files") + } + logtrace.Info(ctx, "rq symbols have been generated", fields) + + ticket, err := task.createCascadeActionTicket(ctx, actionDetails, *latestBlock) + if err != nil { + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to create cascade ticket", fields) + return nil, status.Errorf(codes.Internal, "failed to create cascade ticket") + } + logtrace.Info(ctx, "cascade ticket created", fields) + + switch task.NetworkHandler.IsPrimary() { + case true: + <-task.NewAction(func(ctx context.Context) error { + logtrace.Info(ctx, "primary node flow, waiting for signature from peers", fields) + for { + select { + case <-ctx.Done(): + err = ctx.Err() + if err != nil { + logtrace.Info(ctx, "waiting for signature from peers cancelled or timeout", fields) + } + + logtrace.Info(ctx, "ctx done return from Validate & Register", fields) + return nil + case <-task.AllSignaturesReceivedChn: + logtrace.Info(ctx, "all signature received so start validation", fields) + + // TODO : MsgFinalizeAction + + return nil + } + } + }) + case false: + <-task.NewAction(func(ctx context.Context) error { + logtrace.Info(ctx, "secondary node flow, sending data with signature to primary node for validation", fields) + + if err = task.signAndSendCascadeTicket(ctx, task.NetworkHandler.ConnectedTo == nil, ticket, task.rqIDsFile, task.rqIDEncodeParams); err != nil { // FIXME : use the right data + fields[logtrace.FieldError] = err.Error() + logtrace.Error(ctx, "failed to sign & send cascade ticket to the primary node", fields) + return status.Errorf(codes.Internal, "failed to sign and send cascade ticket") + } + + return nil + }) + } + + return &UploadInputDataResponse{ + Success: true, + Message: "successfully uploaded input data", + }, nil +} + +// sign and send NFT ticket if not primary +func (task *CascadeRegistrationTask) signAndSendCascadeTicket(ctx context.Context, isPrimary bool, ticket []byte, data []byte, rqEncodeParams raptorq.EncoderParameters) (err error) { + secondaryNodeSignature, err := task.lumeraClient.Node().Sign(task.config.SupernodeAccountAddress, ticket) + if err != nil { + return errors.Errorf("sign ticket: %w", err) + } + + if !isPrimary { + log.WithContext(ctx).Info("send signed cascade ticket to primary node") + + cascadeNode, ok := task.NetworkHandler.ConnectedTo.SuperNodePeerAPIInterface.(*CascadeRegistrationNode) + if !ok { + return errors.Errorf("node is not SenseRegistrationNode") + } + + if err := cascadeNode.SendCascadeTicketSignature(ctx, task.config.SupernodeAccountAddress, secondaryNodeSignature, data, task.rqIDsFile, rqEncodeParams); err != nil { // FIXME : nodeID + return errors.Errorf("send signature to primary node %s at address %s: %w", task.NetworkHandler.ConnectedTo.ID, task.NetworkHandler.ConnectedTo.Address, err) + } + } + + return nil +} + +func (task *CascadeRegistrationTask) ValidateSignedTicketFromSecondaryNode(ctx context.Context, + ticket []byte, supernodeAccAddress string, supernodeSignature []byte, rqidFile []byte) error { + var err error + + fields := logtrace.Fields{ + logtrace.FieldMethod: "ValidateSignedTicketFromSecondaryNode", + logtrace.FieldSupernodeAccountAddress: supernodeAccAddress, + } + logtrace.Info(ctx, "request has been received to validate signature", fields) + + err = task.lumeraClient.Node().Verify(supernodeAccAddress, ticket, supernodeSignature) + if err != nil { + log.WithContext(ctx).WithError(err).Errorf("error verifying the secondary-supernode signature") + return errors.Errorf("verify cascade ticket signature %w", err) + } + logtrace.Info(ctx, "seconday-supernode signature has been verified", fields) + + var cascadeData ct.CascadeTicket + err = json.Unmarshal(ticket, &cascadeData) + if err != nil { + log.WithContext(ctx).WithError(err).Errorf("unmarshal cascade ticket signature") + return errors.Errorf("unmarshal cascade ticket signature %w", err) + } + logtrace.Info(ctx, "data has been unmarshalled", fields) + + if err := task.validateRqIDs(ctx, rqidFile, &cascadeData); err != nil { + log.WithContext(ctx).WithError(err).Errorf("validate rqids files") + + return errors.Errorf("validate rq & dd id files %w", err) + } + + if err = task.validateRQSymbolID(ctx, &cascadeData); err != nil { + log.WithContext(ctx).WithError(err).Errorf("valdate rq ids inside rqids file") + err = errors.Errorf("generate rqids: %w", err) + return nil + } + + task.dataHash = cascadeData.DataHash + + return nil +} + +// validates RQIDs file +func (task *CascadeRegistrationTask) validateRqIDs(ctx context.Context, dd []byte, ticket *ct.CascadeTicket) error { + snAccAddresses := []string{ticket.Creator} + + var err error + task.rawRqFile, task.rqIDFiles, err = task.ValidateIDFiles(ctx, dd, + ticket.RQIDsIC, uint32(ticket.RQIDsMax), + ticket.RQIDs, 1, + snAccAddresses, + task.lumeraClient, + ticket.CreatorSignature, + ) + if err != nil { + return errors.Errorf("validate rq_ids file: %w", err) + } + + return nil +} + +// validates actual RQ Symbol IDs inside RQIDs file +func (task *CascadeRegistrationTask) validateRQSymbolID(ctx context.Context, ticket *ct.CascadeTicket) error { + + content, err := task.Asset.Bytes() + if err != nil { + return errors.Errorf("read image contents: %w", err) + } + + return task.storage.ValidateRaptorQSymbolIDs(ctx, + content /*uint32(len(task.Ticket.AppTicketData.RQIDs))*/, 1, + hex.EncodeToString([]byte(ticket.BlockHash)), ticket.Creator, + task.rawRqFile) +} + +func (task *CascadeRegistrationTask) createCascadeActionTicket(ctx context.Context, + actionDetails *actiontypes.Action, latestBlock cmtservice.GetLatestBlockResponse) ([]byte, error) { + t := ct.CascadeTicket{ + ActionID: actionDetails.ActionID, + BlockHeight: latestBlock.GetSdkBlock().GetHeader().Height, + BlockHash: latestBlock.GetBlockId().GetHash(), + Creator: actionDetails.GetCreator(), + CreatorSignature: task.creatorSignature, + DataHash: actionDetails.Metadata.GetCascadeMetadata().DataHash, + RQIDsIC: task.rqIDsIC, + RQIDs: task.rqIDs, + RQIDsMax: actionDetails.GetMetadata().GetCascadeMetadata().RqMax, + } + ticket, err := json.Marshal(t) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed marshall the cascade ticket") + } + + return ticket, nil +} diff --git a/supernode/services/common/config.go b/supernode/services/common/config.go new file mode 100644 index 00000000..684d1fd1 --- /dev/null +++ b/supernode/services/common/config.go @@ -0,0 +1,19 @@ +package common + +const ( + defaultNumberSuperNodes = 10 +) + +// Config contains common configuration of the services. +type Config struct { + SupernodeAccountAddress string + SupernodeIPAddress string + NumberSuperNodes int +} + +// NewConfig returns a new Config instance +func NewConfig() *Config { + return &Config{ + NumberSuperNodes: defaultNumberSuperNodes, + } +} diff --git a/supernode/services/common/network_handler.go b/supernode/services/common/network_handler.go new file mode 100644 index 00000000..d9897fbf --- /dev/null +++ b/supernode/services/common/network_handler.go @@ -0,0 +1,256 @@ +package common + +import ( + "context" + "fmt" + "sync" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/lumera" + supernode "github.com/LumeraProtocol/supernode/pkg/lumera/modules/supernode" + "github.com/LumeraProtocol/supernode/pkg/types" + node "github.com/LumeraProtocol/supernode/supernode/node/supernode" +) + +// NetworkHandler common functionality related for SNs Mesh and other interconnections +type NetworkHandler struct { + task *SuperNodeTask + lumeraHandler lumera.Client + + nodeMaker node.NodeMaker + NodeClient node.ClientInterface + + acceptedMu sync.Mutex + Accepted SuperNodePeerList + + meshedNodes []types.MeshedSuperNode + // valid only for secondary node + ConnectedTo *SuperNodePeer + + superNodeAccAddress string + minNumberConnectedNodes int +} + +// NewNetworkHandler creates instance of NetworkHandler +func NewNetworkHandler(task *SuperNodeTask, + nodeClient node.ClientInterface, + nodeMaker node.NodeMaker, + lc lumera.Client, + minNumberConnectedNodes int, +) *NetworkHandler { + return &NetworkHandler{ + task: task, + nodeMaker: nodeMaker, + lumeraHandler: lc, + NodeClient: nodeClient, + minNumberConnectedNodes: minNumberConnectedNodes, + } +} + +// MeshedNodesPastelID return PastelIDs of meshed nodes +func (h *NetworkHandler) MeshedNodesPastelID() []string { + var ids []string + for _, peer := range h.meshedNodes { + ids = append(ids, peer.NodeID) + } + return ids +} + +// Session is handshake wallet to supernode +func (h *NetworkHandler) Session(_ context.Context, isPrimary bool) error { + if err := h.task.RequiredStatus(StatusTaskStarted); err != nil { + return err + } + + <-h.task.NewAction(func(ctx context.Context) error { + if isPrimary { + log.WithContext(ctx).Debug("Acts as primary node") + h.task.UpdateStatus(StatusPrimaryMode) + return nil + } + + log.WithContext(ctx).Debug("Acts as secondary node") + h.task.UpdateStatus(StatusSecondaryMode) + + return nil + }) + return nil +} + +// AcceptedNodes waits for connection supernodes, as soon as there is the required amount returns them. +func (h *NetworkHandler) AcceptedNodes(serverCtx context.Context) (SuperNodePeerList, error) { + if err := h.task.RequiredStatus(StatusPrimaryMode); err != nil { + return nil, fmt.Errorf("AcceptedNodes: %w", err) + } + + <-h.task.NewAction(func(ctx context.Context) error { + log.WithContext(ctx).Debug("Waiting for supernodes to connect") + + sub := h.task.SubscribeStatus() + for { + select { + case <-serverCtx.Done(): + return nil + case <-ctx.Done(): + return nil + case status := <-sub(): + if status.Is(StatusConnected) { + return nil + } + } + } + }) + return h.Accepted, nil +} + +// SessionNode accepts secondary node +func (h *NetworkHandler) SessionNode(_ context.Context, nodeID string) error { + h.acceptedMu.Lock() + defer h.acceptedMu.Unlock() + + if err := h.task.RequiredStatus(StatusPrimaryMode); err != nil { + return fmt.Errorf("SessionNode: %w", err) + } + + var err error + + <-h.task.NewAction(func(ctx context.Context) error { + if node := h.Accepted.ByID(nodeID); node != nil { + log.WithContext(ctx).WithField("nodeID", nodeID).Errorf("node is already registered") + err = errors.Errorf("node %q is already registered", nodeID) + return nil + } + + var someNode *SuperNodePeer + someNode, err = h.toSupernodePeer(ctx, nodeID) + if err != nil { + log.WithContext(ctx).WithField("nodeID", nodeID).WithError(err).Errorf("get node by extID") + err = errors.Errorf("get node by extID %s: %w", nodeID, err) + return nil + } + h.Accepted.Add(someNode) + + log.WithContext(ctx).WithField("nodeID", nodeID).Debug("Accept secondary node") + + if len(h.Accepted) >= h.minNumberConnectedNodes { + h.task.UpdateStatus(StatusConnected) + } + return nil + }) + return err +} + +// ConnectTo connects to primary node +func (h *NetworkHandler) ConnectTo(_ context.Context, nodeID, sessID string) error { + if err := h.task.RequiredStatus(StatusSecondaryMode); err != nil { + return err + } + + var err error + + <-h.task.NewAction(func(ctx context.Context) error { + var someNode *SuperNodePeer + someNode, err = h.toSupernodePeer(ctx, nodeID) + if err != nil { + log.WithContext(ctx).WithField("nodeID", nodeID).WithError(err).Errorf("get node by extID") + return nil + } + + if err := someNode.Connect(ctx); err != nil { + log.WithContext(ctx).WithField("nodeID", nodeID).WithError(err).Errorf("connect to node") + return nil + } + + if err = someNode.Session(ctx, h.superNodeAccAddress, sessID); err != nil { + log.WithContext(ctx).WithField("sessID", sessID).WithField("pastelID", h.superNodeAccAddress).WithError(err).Errorf("handshake with peer") + return nil + } + + h.ConnectedTo = someNode + h.task.UpdateStatus(StatusConnected) + return nil + }) + return err +} + +// MeshNodes to set info of all meshed supernodes - that will be to send +func (h *NetworkHandler) MeshNodes(_ context.Context, meshedNodes []types.MeshedSuperNode) error { + if err := h.task.RequiredStatus(StatusConnected); err != nil { + return err + } + h.meshedNodes = meshedNodes + + return nil +} + +// CheckNodeInMeshedNodes checks if the node is in the active mesh (by nodeID) +func (h *NetworkHandler) CheckNodeInMeshedNodes(nodeID string) error { + if h.meshedNodes == nil { + return errors.New("nil meshedNodes") + } + + for _, node := range h.meshedNodes { + if node.NodeID == nodeID { + return nil + } + } + + return errors.New("nodeID not found") +} + +// PastelNodeByExtKey returns information about SN by its PastelID +func (h *NetworkHandler) toSupernodePeer(ctx context.Context, supernodeAccountAddress string) (*SuperNodePeer, error) { + sn, err := h.lumeraHandler.SuperNode().GetSupernodeBySupernodeAddress(ctx, supernodeAccountAddress) + if err != nil { + return nil, err + } + + supernodeIP, err := supernode.GetLatestIP(sn) + if err != nil { + return nil, err + } + + someNode := NewSuperNode(h.NodeClient, supernodeIP, supernodeAccountAddress, h.nodeMaker) + return someNode, nil +} + +// Connect connects to grpc Server and setup pointer to concrete client wrapper +func (node *SuperNodePeer) Connect(ctx context.Context) error { + connCtx, connCancel := context.WithTimeout(ctx, defaultConnectToNodeTimeout) + defer connCancel() + + conn, err := node.ClientInterface.Connect(connCtx, node.Address) + if err != nil { + return err + } + + node.ConnectionInterface = conn + node.SuperNodePeerAPIInterface = node.MakeNode(conn) + return nil +} + +func (h *NetworkHandler) CloseSNsConnections(ctx context.Context) error { + for _, node := range h.Accepted { + if node.ConnectionInterface != nil { + if err := node.Close(); err != nil { + log.WithContext(ctx).WithError(err).Errorf("close connection to node %s", node.ID) + } + } else { + log.WithContext(ctx).Errorf("node %s has no connection", node.ID) + } + + } + + if h.ConnectedTo != nil { + if err := h.ConnectedTo.Close(); err != nil { + log.WithContext(ctx).WithError(err).Errorf("close connection to node %s", h.ConnectedTo.ID) + } + } + + return nil +} + +func (h *NetworkHandler) IsPrimary() bool { + return h.ConnectedTo == nil +} diff --git a/supernode/services/common/node_peer.go b/supernode/services/common/node_peer.go new file mode 100644 index 00000000..6dc18424 --- /dev/null +++ b/supernode/services/common/node_peer.go @@ -0,0 +1,82 @@ +package common + +import ( + "time" + + node "github.com/LumeraProtocol/supernode/supernode/node/supernode" +) + +const ( + defaultConnectToNodeTimeout = time.Second * 35 +) + +// SuperNodePeer represents a single supernode +type SuperNodePeer struct { + node.ClientInterface + node.NodeMaker + node.ConnectionInterface + node.SuperNodePeerAPIInterface + + ID string + Address string +} + +//// Connect connects to grpc Server and setup pointer to concrete client wrapper +//func (node *SuperNodePeer) Connect(ctx context.Context) error { +// connCtx, connCancel := context.WithTimeout(ctx, defaultConnectToNodeTimeout) +// defer connCancel() +// +// conn, err := node.ClientInterface.Connect(connCtx, node.Address) +// if err != nil { +// return err +// } +// +// node.ConnectionInterface = conn +// node.SuperNodePeerAPIInterface = node.MakeNode(conn) +// return nil +//} + +// NewSuperNode returns a new Node instance. +func NewSuperNode( + client node.ClientInterface, + address string, pastelID string, + nodeMaker node.NodeMaker) *SuperNodePeer { + return &SuperNodePeer{ + ClientInterface: client, + NodeMaker: nodeMaker, + Address: address, + ID: pastelID, + } +} + +// SuperNodePeerList represents muptiple SenseRegistrationNodes +type SuperNodePeerList []*SuperNodePeer + +// Add adds a new node to the list +func (list *SuperNodePeerList) Add(node *SuperNodePeer) { + *list = append(*list, node) +} + +// ByID returns a node from the list by the given id. +func (list *SuperNodePeerList) ByID(id string) *SuperNodePeer { + for _, someNode := range *list { + if someNode.ID == id { + return someNode + } + } + return nil +} + +// Remove removes a node from the list by the given id. +func (list *SuperNodePeerList) Remove(id string) { + for i, someNode := range *list { + if someNode.ID == id { + if i+1 < len(*list) { + *list = append((*list)[:i], (*list)[i+1:]...) + } else { + *list = (*list)[:i] + } + break + } + } +} diff --git a/supernode/services/common/p2p.go b/supernode/services/common/p2p.go new file mode 100644 index 00000000..a477a591 --- /dev/null +++ b/supernode/services/common/p2p.go @@ -0,0 +1,21 @@ +package common + +const ( + // UnknownDataType ... + UnknownDataType = iota // 1 + + // P2PDataRaptorQSymbol rq symbol + P2PDataRaptorQSymbol // 1 + // P2PDataCascadeMetadata cascade ID file + P2PDataCascadeMetadata // 2 + // P2PDataDDMetadata dd fp metadata file + P2PDataDDMetadata // 3 + // P2PPreviewThumbnail preview NFT thumbnail + P2PPreviewThumbnail // 4 + // P2PMediumThumbnail NFT medium thumbnail + P2PMediumThumbnail // 5 + // P2PSmallThumbnail small NFT thumbnail + P2PSmallThumbnail // 6 + // P2PDebug debug + P2PDebug // 7 +) diff --git a/supernode/services/common/reg_task_helper.go b/supernode/services/common/reg_task_helper.go new file mode 100644 index 00000000..077a1b2d --- /dev/null +++ b/supernode/services/common/reg_task_helper.go @@ -0,0 +1,138 @@ +package common + +import ( + "bytes" + "context" + "sync" + + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/lumera" + "github.com/LumeraProtocol/supernode/pkg/raptorq" + "github.com/LumeraProtocol/supernode/pkg/utils" +) + +const ( + SeparatorByte = 46 +) + +// RegTaskHelper common operations related to (any) Ticket registration +type RegTaskHelper struct { + *SuperNodeTask + + NetworkHandler *NetworkHandler + LumeraHandler *lumera.Client + + peersTicketSignatureMtx *sync.Mutex + PeersTicketSignature map[string][]byte + AllSignaturesReceivedChn chan struct{} +} + +// NewRegTaskHelper creates instance of RegTaskHelper +func NewRegTaskHelper(task *SuperNodeTask, + lumeraClient lumera.Client, +) *RegTaskHelper { + return &RegTaskHelper{ + SuperNodeTask: task, + LumeraHandler: &lumeraClient, + peersTicketSignatureMtx: &sync.Mutex{}, + PeersTicketSignature: make(map[string][]byte), + AllSignaturesReceivedChn: make(chan struct{}), + } +} + +// AddPeerTicketSignature waits for ticket signatures from other SNs and adds them into internal array +func (h *RegTaskHelper) AddPeerTicketSignature(nodeID string, signature []byte, reqStatus Status) error { + h.peersTicketSignatureMtx.Lock() + defer h.peersTicketSignatureMtx.Unlock() + + if err := h.RequiredStatus(reqStatus); err != nil { + return err + } + + var err error + + <-h.NewAction(func(ctx context.Context) error { + log.WithContext(ctx).Debugf("receive NFT ticket signature from node %s", nodeID) + if node := h.NetworkHandler.Accepted.ByID(nodeID); node == nil { + log.WithContext(ctx).WithField("node", nodeID).Errorf("node is not in Accepted list") + err = errors.Errorf("node %s not in Accepted list", nodeID) + return nil + } + + h.PeersTicketSignature[nodeID] = signature + if len(h.PeersTicketSignature) == len(h.NetworkHandler.Accepted) { + log.WithContext(ctx).Debug("all signature received") + go func() { + close(h.AllSignaturesReceivedChn) + }() + } + return nil + }) + return err +} + +// ValidateIDFiles validates received (IDs) file and its (50) IDs: +// 1. checks signatures +// 2. generates list of 50 IDs and compares them to received +func (h *RegTaskHelper) ValidateIDFiles(ctx context.Context, + data []byte, ic uint32, max uint32, ids []string, numSignRequired int, + snAccAddresses []string, + lumeraClient lumera.Client, + creatorSignaure []byte, +) ([]byte, [][]byte, error) { + + dec, err := utils.B64Decode(data) + if err != nil { + return nil, nil, errors.Errorf("decode data: %w", err) + } + + decData, err := utils.Decompress(dec) + if err != nil { + return nil, nil, errors.Errorf("decompress: %w", err) + } + + splits := bytes.Split(decData, []byte{SeparatorByte}) + if len(splits) != numSignRequired+1 { + return nil, nil, errors.New("invalid data") + } + + file, err := utils.B64Decode(splits[0]) + if err != nil { + return nil, nil, errors.Errorf("decode file: %w", err) + } + + verifications := 0 + verifiedNodes := make(map[int]bool) + for i := 1; i < numSignRequired+1; i++ { + for j := 0; j < len(snAccAddresses); j++ { + if _, ok := verifiedNodes[j]; ok { + continue + } + + err := lumeraClient.Node().Verify(snAccAddresses[j], file, creatorSignaure) // TODO : verify the signature + if err != nil { + return nil, nil, errors.Errorf("verify file signature %w", err) + } + + verifiedNodes[j] = true + verifications++ + break + } + } + + if verifications != numSignRequired { + return nil, nil, errors.Errorf("file verification failed: need %d verifications, got %d", numSignRequired, verifications) + } + + gotIDs, idFiles, err := raptorq.GetIDFiles(ctx, decData, ic, max) + if err != nil { + return nil, nil, errors.Errorf("get ids: %w", err) + } + + if err := utils.EqualStrList(gotIDs, ids); err != nil { + return nil, nil, errors.Errorf("IDs don't match: %w", err) + } + + return file, idFiles, nil +} diff --git a/supernode/services/common/service.go b/supernode/services/common/service.go new file mode 100644 index 00000000..f486bd41 --- /dev/null +++ b/supernode/services/common/service.go @@ -0,0 +1,80 @@ +package common + +import ( + "context" + "time" + + "github.com/LumeraProtocol/supernode/p2p" + "github.com/LumeraProtocol/supernode/pkg/common/task" + "github.com/LumeraProtocol/supernode/pkg/errgroup" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/storage" + "github.com/LumeraProtocol/supernode/pkg/storage/files" +) + +// SuperNodeServiceInterface common interface for Services +type SuperNodeServiceInterface interface { + RunHelper(ctx context.Context) error + NewTask() task.Task + Task(id string) task.Task +} + +// SuperNodeService common "class" for Services +type SuperNodeService struct { + *task.Worker + *files.Storage + + P2PClient p2p.Client +} + +// run starts task +func (service *SuperNodeService) run(ctx context.Context, pastelID string, prefix string) error { + ctx = log.ContextWithPrefix(ctx, prefix) + + if pastelID == "" { + return errors.New("PastelID is not specified in the config file") + } + + group, ctx := errgroup.WithContext(ctx) + group.Go(func() error { + return service.Worker.Run(ctx) + }) + if service.Storage != nil { + group.Go(func() error { + return service.Storage.Run(ctx) + }) + } + return group.Wait() +} + +// RunHelper common code for Service runner +func (service *SuperNodeService) RunHelper(ctx context.Context, pastelID string, prefix string) error { + for { + select { + case <-ctx.Done(): + log.WithContext(ctx).Error("context done - closing sn services") + return nil + case <-time.After(5 * time.Second): + if err := service.run(ctx, pastelID, prefix); err != nil { + service.Worker = task.NewWorker() + log.WithContext(ctx).WithError(err).Error("Service run failed, retrying") + } else { + log.WithContext(ctx).Info("Service run completed successfully - closing sn services") + return nil + } + } + } +} + +// NewSuperNodeService creates SuperNodeService +func NewSuperNodeService( + fileStorage storage.FileStorageInterface, + p2pClient p2p.Client, +) *SuperNodeService { + return &SuperNodeService{ + Worker: task.NewWorker(), + Storage: files.NewStorage(fileStorage), + P2PClient: p2pClient, + } +} diff --git a/supernode/services/common/status.go b/supernode/services/common/status.go new file mode 100644 index 00000000..53af3527 --- /dev/null +++ b/supernode/services/common/status.go @@ -0,0 +1,124 @@ +package common + +// List of task statuses. +const ( + StatusTaskStarted Status = iota + + // Mode + StatusPrimaryMode + StatusSecondaryMode + + // Process + StatusConnected + + StatusImageProbed + StatusAssetUploaded + StatusImageAndThumbnailCoordinateUploaded + StatusRegistrationFeeCalculated + StatusFileDecoded + + // Error + StatusErrorInvalidBurnTxID + StatusRequestTooLate + StatusNftRegGettingFailed + StatusNftRegDecodingFailed + StatusNftRegTicketInvalid + StatusListTradeTicketsFailed + StatusTradeTicketsNotFound + StatusTradeTicketMismatched + StatusTimestampVerificationFailed + StatusTimestampInvalid + StatusRQServiceConnectionFailed + StatusSymbolFileNotFound + StatusSymbolFileInvalid + StatusSymbolNotFound + StatusSymbolMismatched + StatusSymbolsNotEnough + StatusFileDecodingFailed + StatusFileReadingFailed + StatusFileMismatched + StatusFileEmpty + StatusKeyNotFound + StatusFileRestoreFailed + StatusFileExists + + // Final + StatusTaskCanceled + StatusTaskCompleted +) + +var statusNames = map[Status]string{ + StatusTaskStarted: "Task started", + StatusPrimaryMode: "Primary Mode", + StatusSecondaryMode: "Secondary Mode", + StatusConnected: "Connected", + StatusImageProbed: "Image Probed", + StatusAssetUploaded: "Asset Uploaded", + StatusImageAndThumbnailCoordinateUploaded: "Imaged And Thumbnail Coordinate Uploaded", + StatusRegistrationFeeCalculated: "Registration Fee Caculated", + StatusFileDecoded: "File Decoded", + StatusErrorInvalidBurnTxID: "Error Invalid Burn TxID", + StatusRequestTooLate: "Request too late", + StatusNftRegGettingFailed: "NFT registered getting failed", + StatusNftRegDecodingFailed: "NFT registered decoding failed", + StatusNftRegTicketInvalid: "NFT registered ticket invalid", + StatusListTradeTicketsFailed: "Could not get available trade tickets", + StatusTradeTicketsNotFound: "Trade tickets not found", + StatusTradeTicketMismatched: "Trade ticket mismatched", + StatusTimestampVerificationFailed: "Could not verify timestamp", + StatusTimestampInvalid: "Timestamp invalid", + StatusRQServiceConnectionFailed: "RQ Service connection failed", + StatusSymbolFileNotFound: "Symbol file not found", + StatusSymbolFileInvalid: "Symbol file invalid", + StatusSymbolNotFound: "Symbol not found", + StatusSymbolMismatched: "Symbol mismatched", + StatusSymbolsNotEnough: "Symbols not enough", + StatusFileDecodingFailed: "File decoding failed", + StatusFileReadingFailed: "File reading failed", + StatusFileEmpty: "File empty", + StatusFileMismatched: "File mismatched", + StatusKeyNotFound: "Key not found", + StatusFileExists: "File hash exists", + StatusFileRestoreFailed: "File restore failed", + StatusTaskCanceled: "Task Canceled", + StatusTaskCompleted: "Task Completed", +} + +// Status represents status of the task +type Status byte + +func (status Status) String() string { + if name, ok := statusNames[status]; ok { + return name + } + return "" +} + +// IsFinal returns true if the status is the final. +func (status Status) IsFinal() bool { + return status == StatusTaskCanceled || status == StatusTaskCompleted +} + +// IsFailure returns true if the task failed due to an error +func (status Status) IsFailure() bool { + return status == StatusTaskCanceled || status == StatusRequestTooLate || + status == StatusNftRegGettingFailed || status == StatusNftRegDecodingFailed || + status == StatusNftRegTicketInvalid || status == StatusListTradeTicketsFailed || + status == StatusTradeTicketsNotFound || status == StatusTradeTicketMismatched || + status == StatusTimestampVerificationFailed || status == StatusTimestampInvalid || + status == StatusRQServiceConnectionFailed || status == StatusSymbolFileNotFound || + status == StatusSymbolFileInvalid || status == StatusSymbolNotFound || + status == StatusSymbolMismatched || status == StatusSymbolsNotEnough || + status == StatusFileDecodingFailed || status == StatusFileReadingFailed || + status == StatusFileEmpty || status == StatusFileMismatched || + status == StatusKeyNotFound || status == StatusFileRestoreFailed || status == StatusFileExists +} + +// StatusNames returns a sorted list of status names. +func StatusNames() []string { + list := make([]string, len(statusNames)) + for i, name := range statusNames { + list[i] = name + } + return list +} diff --git a/supernode/services/common/status_test.go b/supernode/services/common/status_test.go new file mode 100644 index 00000000..3f6de1be --- /dev/null +++ b/supernode/services/common/status_test.go @@ -0,0 +1,350 @@ +package common + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStatusNames(t *testing.T) { + t.Parallel() + + testCases := []struct { + expectedStatues []Status + }{ + { + expectedStatues: []Status{ + StatusTaskStarted, + StatusPrimaryMode, + StatusSecondaryMode, + + // Process + StatusConnected, + StatusImageProbed, + StatusAssetUploaded, + StatusImageAndThumbnailCoordinateUploaded, + StatusRegistrationFeeCalculated, + StatusFileDecoded, + + // Error + StatusErrorInvalidBurnTxID, + StatusRequestTooLate, + StatusNftRegGettingFailed, + StatusNftRegDecodingFailed, + StatusNftRegTicketInvalid, + StatusListTradeTicketsFailed, + StatusTradeTicketsNotFound, + StatusTradeTicketMismatched, + StatusTimestampVerificationFailed, + StatusTimestampInvalid, + StatusRQServiceConnectionFailed, + StatusSymbolFileNotFound, + StatusSymbolFileInvalid, + StatusSymbolNotFound, + StatusSymbolMismatched, + StatusSymbolsNotEnough, + StatusFileDecodingFailed, + StatusFileReadingFailed, + StatusFileMismatched, + StatusFileEmpty, + StatusKeyNotFound, + StatusFileRestoreFailed, + StatusFileExists, + + // Final + StatusTaskCanceled, + StatusTaskCompleted, + }, + }, + } + + for i, testCase := range testCases { + testCase := testCase + + t.Run(fmt.Sprintf("testCase:%d", i), func(t *testing.T) { + t.Parallel() + + var expectedNames []string + for _, status := range testCase.expectedStatues { + expectedNames = append(expectedNames, StatusNames()[status]) + } + + assert.Equal(t, expectedNames, StatusNames()) + }) + + } +} + +func TestStatusString(t *testing.T) { + t.Parallel() + + testCases := []struct { + status Status + expectedValue string + }{ + { + status: StatusTaskStarted, + expectedValue: StatusNames()[StatusTaskStarted], + }, { + status: StatusFileDecoded, + expectedValue: StatusNames()[StatusFileDecoded], + }, { + status: StatusRequestTooLate, + expectedValue: StatusNames()[StatusRequestTooLate], + }, { + status: StatusNftRegGettingFailed, + expectedValue: StatusNames()[StatusNftRegGettingFailed], + }, { + status: StatusNftRegDecodingFailed, + expectedValue: StatusNames()[StatusNftRegDecodingFailed], + }, { + status: StatusNftRegTicketInvalid, + expectedValue: StatusNames()[StatusNftRegTicketInvalid], + }, { + status: StatusListTradeTicketsFailed, + expectedValue: StatusNames()[StatusListTradeTicketsFailed], + }, { + status: StatusTradeTicketsNotFound, + expectedValue: StatusNames()[StatusTradeTicketsNotFound], + }, { + status: StatusTradeTicketMismatched, + expectedValue: StatusNames()[StatusTradeTicketMismatched], + }, { + status: StatusTimestampVerificationFailed, + expectedValue: StatusNames()[StatusTimestampVerificationFailed], + }, { + status: StatusTimestampInvalid, + expectedValue: StatusNames()[StatusTimestampInvalid], + }, { + status: StatusRQServiceConnectionFailed, + expectedValue: StatusNames()[StatusRQServiceConnectionFailed], + }, { + status: StatusSymbolFileNotFound, + expectedValue: StatusNames()[StatusSymbolFileNotFound], + }, { + status: StatusSymbolFileInvalid, + expectedValue: StatusNames()[StatusSymbolFileInvalid], + }, { + status: StatusSymbolNotFound, + expectedValue: StatusNames()[StatusSymbolNotFound], + }, { + status: StatusSymbolMismatched, + expectedValue: StatusNames()[StatusSymbolMismatched], + }, { + status: StatusSymbolsNotEnough, + expectedValue: StatusNames()[StatusSymbolsNotEnough], + }, { + status: StatusFileDecodingFailed, + expectedValue: StatusNames()[StatusFileDecodingFailed], + }, { + status: StatusFileReadingFailed, + expectedValue: StatusNames()[StatusFileReadingFailed], + }, { + status: StatusFileMismatched, + expectedValue: StatusNames()[StatusFileMismatched], + }, { + status: StatusFileEmpty, + expectedValue: StatusNames()[StatusFileEmpty], + }, { + status: StatusTaskCanceled, + expectedValue: StatusNames()[StatusTaskCanceled], + }, { + status: StatusTaskCompleted, + expectedValue: StatusNames()[StatusTaskCompleted], + }, + } + + for _, testCase := range testCases { + testCase := testCase + + t.Run(fmt.Sprintf("status:%v/value:%s", testCase.status, testCase.expectedValue), func(t *testing.T) { + t.Parallel() + + value := testCase.status.String() + assert.Equal(t, testCase.expectedValue, value) + }) + } +} + +func TestStatusIsFinal(t *testing.T) { + t.Parallel() + + testCases := []struct { + status Status + expectedValue bool + }{ + { + status: StatusTaskStarted, + expectedValue: false, + }, { + status: StatusFileDecoded, + expectedValue: false, + }, { + status: StatusRequestTooLate, + expectedValue: false, + }, { + status: StatusNftRegGettingFailed, + expectedValue: false, + }, { + status: StatusNftRegDecodingFailed, + expectedValue: false, + }, { + status: StatusNftRegTicketInvalid, + expectedValue: false, + }, { + status: StatusListTradeTicketsFailed, + expectedValue: false, + }, { + status: StatusTradeTicketsNotFound, + expectedValue: false, + }, { + status: StatusTradeTicketMismatched, + expectedValue: false, + }, { + status: StatusTimestampVerificationFailed, + expectedValue: false, + }, { + status: StatusTimestampInvalid, + expectedValue: false, + }, { + status: StatusRQServiceConnectionFailed, + expectedValue: false, + }, { + status: StatusSymbolFileNotFound, + expectedValue: false, + }, { + status: StatusSymbolFileInvalid, + expectedValue: false, + }, { + status: StatusSymbolNotFound, + expectedValue: false, + }, { + status: StatusSymbolMismatched, + expectedValue: false, + }, { + status: StatusSymbolsNotEnough, + expectedValue: false, + }, { + status: StatusFileDecodingFailed, + expectedValue: false, + }, { + status: StatusFileReadingFailed, + expectedValue: false, + }, { + status: StatusFileMismatched, + expectedValue: false, + }, { + status: StatusFileEmpty, + expectedValue: false, + }, { + status: StatusTaskCanceled, + expectedValue: true, + }, { + status: StatusTaskCompleted, + expectedValue: true, + }, + } + + for _, testCase := range testCases { + testCase := testCase + + t.Run(fmt.Sprintf("status:%v/value:%v", testCase.status, testCase.expectedValue), func(t *testing.T) { + t.Parallel() + + value := testCase.status.IsFinal() + assert.Equal(t, testCase.expectedValue, value) + }) + } +} + +func TestStatusIsFailure(t *testing.T) { + t.Parallel() + + testCases := []struct { + status Status + expectedValue bool + }{ + { + status: StatusTaskStarted, + expectedValue: false, + }, { + status: StatusFileDecoded, + expectedValue: false, + }, { + status: StatusRequestTooLate, + expectedValue: true, + }, { + status: StatusNftRegGettingFailed, + expectedValue: true, + }, { + status: StatusNftRegDecodingFailed, + expectedValue: true, + }, { + status: StatusNftRegTicketInvalid, + expectedValue: true, + }, { + status: StatusListTradeTicketsFailed, + expectedValue: true, + }, { + status: StatusTradeTicketsNotFound, + expectedValue: true, + }, { + status: StatusTradeTicketMismatched, + expectedValue: true, + }, { + status: StatusTimestampVerificationFailed, + expectedValue: true, + }, { + status: StatusTimestampInvalid, + expectedValue: true, + }, { + status: StatusRQServiceConnectionFailed, + expectedValue: true, + }, { + status: StatusSymbolFileNotFound, + expectedValue: true, + }, { + status: StatusSymbolFileInvalid, + expectedValue: true, + }, { + status: StatusSymbolNotFound, + expectedValue: true, + }, { + status: StatusSymbolMismatched, + expectedValue: true, + }, { + status: StatusSymbolsNotEnough, + expectedValue: true, + }, { + status: StatusFileDecodingFailed, + expectedValue: true, + }, { + status: StatusFileReadingFailed, + expectedValue: true, + }, { + status: StatusFileMismatched, + expectedValue: true, + }, { + status: StatusFileEmpty, + expectedValue: true, + }, { + status: StatusTaskCanceled, + expectedValue: true, + }, { + status: StatusTaskCompleted, + expectedValue: false, + }, + } + + for _, testCase := range testCases { + testCase := testCase + + t.Run(fmt.Sprintf("status:%v/value:%v", testCase.status, testCase.expectedValue), func(t *testing.T) { + t.Parallel() + + value := testCase.status.IsFailure() + assert.Equal(t, testCase.expectedValue, value) + }) + } +} diff --git a/supernode/services/common/storage_handler.go b/supernode/services/common/storage_handler.go new file mode 100644 index 00000000..99e778da --- /dev/null +++ b/supernode/services/common/storage_handler.go @@ -0,0 +1,383 @@ +package common + +import ( + "context" + "encoding/json" + "fmt" + "io/fs" + "math" + "os" + "path/filepath" + "sort" + "time" + + "github.com/LumeraProtocol/supernode/p2p" + "github.com/LumeraProtocol/supernode/pkg/errors" + "github.com/LumeraProtocol/supernode/pkg/log" + rqnode "github.com/LumeraProtocol/supernode/pkg/raptorq" + "github.com/LumeraProtocol/supernode/pkg/storage/files" + "github.com/LumeraProtocol/supernode/pkg/storage/rqstore" + "github.com/LumeraProtocol/supernode/pkg/utils" + "github.com/cenkalti/backoff" +) + +const ( + loadSymbolsBatchSize = 2500 + storeSymbolsPercent = 10 + concurrency = 1 +) + +// StorageHandler provides common logic for RQ and P2P operations +type StorageHandler struct { + P2PClient p2p.Client + RqClient rqnode.ClientInterface + + rqAddress string + rqDir string + + TaskID string + TxID string + + store rqstore.Store + semaphore chan struct{} +} + +// NewStorageHandler creates instance of StorageHandler +func NewStorageHandler(p2p p2p.Client, rq rqnode.ClientInterface, + rqAddress string, rqDir string, store rqstore.Store) *StorageHandler { + + return &StorageHandler{ + P2PClient: p2p, + RqClient: rq, + rqAddress: rqAddress, + rqDir: rqDir, + store: store, + semaphore: make(chan struct{}, concurrency), + } +} + +// StoreFileIntoP2P stores file into P2P +func (h *StorageHandler) StoreFileIntoP2P(ctx context.Context, file *files.File, typ int) (string, error) { + data, err := file.Bytes() + if err != nil { + return "", errors.Errorf("store file %s into p2p", file.Name()) + } + return h.StoreBytesIntoP2P(ctx, data, typ) +} + +// StoreBytesIntoP2P into P2P actual data +func (h *StorageHandler) StoreBytesIntoP2P(ctx context.Context, data []byte, typ int) (string, error) { + return h.P2PClient.Store(ctx, data, typ) +} + +// StoreBatch stores into P2P array of bytes arrays +func (h *StorageHandler) StoreBatch(ctx context.Context, list [][]byte, typ int) error { + val := ctx.Value(log.TaskIDKey) + taskID := "" + if val != nil { + taskID = fmt.Sprintf("%v", val) + } + log.WithContext(ctx).WithField("task_id", taskID).Info("task_id in storeList") + + return h.P2PClient.StoreBatch(ctx, list, typ, taskID) +} + +// GenerateRaptorQSymbols calls RQ service to produce RQ Symbols +func (h *StorageHandler) GenerateRaptorQSymbols(ctx context.Context, data []byte, name string) (map[string][]byte, error) { + if h.RqClient == nil { + log.WithContext(ctx).Warnf("RQ Server is not initialized") + return nil, errors.Errorf("RQ Server is not initialized") + } + + b := backoff.NewExponentialBackOff() + b.MaxElapsedTime = 3 * time.Minute + b.InitialInterval = 200 * time.Millisecond + + var conn rqnode.Connection + if err := backoff.Retry(backoff.Operation(func() error { + var err error + conn, err = h.RqClient.Connect(ctx, h.rqAddress) + if err != nil { + return errors.Errorf("connect to raptorq service: %w", err) + } + + return nil + }), b); err != nil { + return nil, fmt.Errorf("retry connect to raptorq service: %w", err) + } + defer func() { + if err := conn.Close(); err != nil { + log.WithContext(ctx).WithError(err).Error("error closing rq-connection") + } + }() + + rqService := conn.RaptorQ(&rqnode.Config{ + RqFilesDir: h.rqDir, + }) + + b.Reset() + + // encodeResp := &rqnode.EncodeResponse{} + if err := backoff.Retry(backoff.Operation(func() error { + var err error + // encodeResp, err = rqService.RQEncode(ctx, data, h.TxID, h.store) + _, err = rqService.Encode(ctx, rqnode.EncodeRequest{}) // FIXME : use the resp + // encodeResp = &encodeRes + if err != nil { + return errors.Errorf("create raptorq symbol from data %s: %w", name, err) + } + + return nil + }), b); err != nil { + return nil, fmt.Errorf("retry do rqencode service: %w", err) + } + + return map[string][]byte{}, nil // FIXME : return proper symbols +} + +// GetRaptorQEncodeInfo calls RQ service to get Encoding info and list of RQIDs +func (h *StorageHandler) GetRaptorQEncodeInfo(ctx context.Context, + data []byte, num uint32, hash string, pastelID string, +) (encodeInfo *rqnode.EncodeResponse, err error) { + if h.RqClient == nil { + log.WithContext(ctx).Warnf("RQ Server is not initialized") + return nil, errors.Errorf("RQ Server is not initialized") + } + + b := backoff.NewExponentialBackOff() + b.MaxElapsedTime = 3 * time.Minute + b.InitialInterval = 500 * time.Millisecond + + var conn rqnode.Connection + if err := backoff.Retry(backoff.Operation(func() error { + var err error + conn, err = h.RqClient.Connect(ctx, h.rqAddress) + if err != nil { + return errors.Errorf("connect to raptorq service: %w", err) + } + + return nil + }), b); err != nil { + return nil, fmt.Errorf("retry connect to raptorq service: %w", err) + } + defer func() { + if err := conn.Close(); err != nil { + log.WithContext(ctx).WithError(err).Error("error closing rq-connection") + } + }() + + rqService := conn.RaptorQ(&rqnode.Config{ + RqFilesDir: h.rqDir, + }) + + b.Reset() + if err := backoff.Retry(backoff.Operation(func() error { + var err error + // encodeInfo, err = rqService.EncodeMetaData(ctx, data, num, hash, pastelID) // TODO : remove + encodeI, err := rqService.EncodeMetaData(ctx, rqnode.EncodeMetadataRequest{ + Path: "", // FIXME + FilesNumber: num, + BlockHash: hash, + PastelId: pastelID, + }) + if err != nil { + return errors.Errorf("get raptorq encode info: %w", err) + } + encodeInfo = &encodeI + return nil + }), b); err != nil { + return nil, fmt.Errorf("retry do encode info on raptorq service: %w", err) + } + + return encodeInfo, nil +} + +// ValidateRaptorQSymbolIDs calls RQ service to get Encoding info and list of RQIDs and compares them to the similar data received from WN +func (h *StorageHandler) ValidateRaptorQSymbolIDs(ctx context.Context, + data []byte, num uint32, hash string, pastelID string, + haveData []byte) error { + + if len(haveData) == 0 { + return errors.Errorf("no symbols identifiers") + } + + encodeInfo, err := h.GetRaptorQEncodeInfo(ctx, data, num, hash, pastelID) + if err != nil { + return err + } + + // scan return symbol Id files + filesMap, err := scanSymbolIDFiles(encodeInfo.Path) + if err != nil { + return errors.Errorf("scan symbol id files folder %s: %w", encodeInfo.Path, err) + } + + if len(filesMap) != int(num) { // FIXME : copies == num ? + return errors.Errorf("symbol id files count not match: expect %d, output %d", num, len(filesMap)) + } + + // pick just one file generated to compare + var gotFile, haveFile rqnode.RawSymbolIDFile + for _, v := range filesMap { + gotFile = v + break + } + + if err := json.Unmarshal(haveData, &haveFile); err != nil { + return errors.Errorf("decode raw rq file: %w", err) + } + + if err := utils.EqualStrList(gotFile.SymbolIdentifiers, haveFile.SymbolIdentifiers); err != nil { + return errors.Errorf("raptor symbol mismatched: %w", err) + } + return nil +} + +// scan symbol id files in "meta" folder, return map of file Ids & contents of file (as list of line) +func scanSymbolIDFiles(dirPath string) (map[string]rqnode.RawSymbolIDFile, error) { + filesMap := make(map[string]rqnode.RawSymbolIDFile) + + err := filepath.Walk(dirPath, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return errors.Errorf("scan a path %s: %w", path, err) + } + + if info.IsDir() { + // TODO - compare it to root + return nil + } + + fileID := filepath.Base(path) + + configFile, err := os.Open(path) + if err != nil { + return errors.Errorf("opening file: %s - err: %w", path, err) + } + defer configFile.Close() + + file := rqnode.RawSymbolIDFile{} + jsonParser := json.NewDecoder(configFile) + if err = jsonParser.Decode(&file); err != nil { + return errors.Errorf("parsing file: %s - err: %w", path, err) + } + + filesMap[fileID] = file + + return nil + }) + + if err != nil { + return nil, err + } + + return filesMap, nil +} + +func (h *StorageHandler) StoreRaptorQSymbolsIntoP2P(ctx context.Context, data []byte, name string) error { + h.semaphore <- struct{}{} // Acquire slot + defer func() { + <-h.semaphore // Release the semaphore slot + }() + + // Generate the keys for RaptorQ symbols, with empty values + log.WithContext(ctx).Info("generating RaptorQ symbols") + keysMap, err := h.GenerateRaptorQSymbols(ctx, data, name) + if err != nil { + return err + } + log.WithContext(ctx).WithField("count", len(keysMap)).Info("generated RaptorQ symbols") + + if h.TxID == "" { + return errors.New("txid is not set, cannot store rq symbols") + } + + dir, err := h.store.GetDirectoryByTxID(h.TxID) + if err != nil { + return fmt.Errorf("error fetching symbols dir from rq DB: %w", err) + } + + // Create a slice of keys from keysMap and sort it + keys := make([]string, 0, len(keysMap)) + for key := range keysMap { + keys = append(keys, key) + } + sort.Strings(keys) // Sort the keys alphabetically + + if len(keys) > loadSymbolsBatchSize { + // Calculate 15% of the total keys, rounded up + requiredKeysCount := int(math.Ceil(float64(len(keys)) * storeSymbolsPercent / 100)) + + // Get the subset of keys (15%) + if requiredKeysCount > len(keys) { + requiredKeysCount = len(keys) // Ensure we don't exceed the available keys count + } + keys = keys[:requiredKeysCount] + } + + // Iterate over sorted keys in batches + batchKeys := make(map[string][]byte) + count := 0 + + log.WithContext(ctx).WithField("count", len(keys)).Info("storing raptorQ symbols") + for _, key := range keys { + batchKeys[key] = nil + count++ + if count%loadSymbolsBatchSize == 0 { + if err := h.storeSymbolsInP2P(ctx, dir, batchKeys); err != nil { + return err + } + batchKeys = make(map[string][]byte) // Reset batchKeys after storing + } + } + + // Store any remaining symbols in the last batch + if len(batchKeys) > 0 { + if err := h.storeSymbolsInP2P(ctx, dir, batchKeys); err != nil { + return err + } + } + + if err := h.store.UpdateIsFirstBatchStored(h.TxID); err != nil { + return fmt.Errorf("error updating first batch stored flag in rq DB: %w", err) + } + log.WithContext(ctx).WithField("curr-time", time.Now().UTC()).WithField("count", len(keys)).Info("stored RaptorQ symbols") + + return nil +} + +func (h *StorageHandler) storeSymbolsInP2P(ctx context.Context, dir string, batchKeys map[string][]byte) error { + val := ctx.Value(log.TaskIDKey) + taskID := "" + if val != nil { + taskID = fmt.Sprintf("%v", val) + } + // Load symbols from the database for the current batch + log.WithContext(ctx).WithField("count", len(batchKeys)).Info("loading batch symbols") + loadedSymbols, err := utils.LoadSymbols(dir, batchKeys) + if err != nil { + return fmt.Errorf("load batch symbols from db: %w", err) + } + + log.WithContext(ctx).WithField("count", len(loadedSymbols)).Info("loaded batch symbols, storing now") + // Prepare batch for P2P storage return nil + result := make([][]byte, len(loadedSymbols)) + i := 0 + for key, value := range loadedSymbols { + result[i] = value + loadedSymbols[key] = nil // Release the reference for faster memory cleanup + i++ + } + + // Store the loaded symbols in P2P + if err := h.P2PClient.StoreBatch(ctx, result, P2PDataRaptorQSymbol, taskID); err != nil { + return fmt.Errorf("store batch raptorq symbols in p2p: %w", err) + } + log.WithContext(ctx).WithField("count", len(loadedSymbols)).Info("stored batch symbols") + + if err := utils.DeleteSymbols(ctx, dir, batchKeys); err != nil { + return fmt.Errorf("delete batch symbols from db: %w", err) + } + log.WithContext(ctx).WithField("count", len(loadedSymbols)).Info("deleted batch symbols") + + return nil +} diff --git a/supernode/services/common/supernode_task.go b/supernode/services/common/supernode_task.go new file mode 100644 index 00000000..1cb663f9 --- /dev/null +++ b/supernode/services/common/supernode_task.go @@ -0,0 +1,64 @@ +package common + +import ( + "context" + "fmt" + + "github.com/LumeraProtocol/supernode/pkg/common/task" + "github.com/LumeraProtocol/supernode/pkg/common/task/state" + "github.com/LumeraProtocol/supernode/pkg/log" + "github.com/LumeraProtocol/supernode/pkg/storage/files" + "github.com/LumeraProtocol/supernode/pkg/storage/queries" +) + +// TaskCleanerFunc pointer to func that removes artefacts +type TaskCleanerFunc func() + +// SuperNodeTask base "class" for Task +type SuperNodeTask struct { + task.Task + + LogPrefix string +} + +// RunHelper common code for Task runner +func (task *SuperNodeTask) RunHelper(ctx context.Context, clean TaskCleanerFunc) error { + ctx = task.context(ctx) + log.WithContext(ctx).Debug("Start task") + defer log.WithContext(ctx).Info("Task canceled") + defer task.Cancel() + + task.SetStatusNotifyFunc(func(status *state.Status) { + log.WithContext(ctx).WithField("status", status.String()).Debug("States updated") + }) + + defer clean() + + return task.RunAction(ctx) +} + +func (task *SuperNodeTask) context(ctx context.Context) context.Context { + return log.ContextWithPrefix(ctx, fmt.Sprintf("%s-%s", task.LogPrefix, task.ID())) +} + +// RemoveFile removes file from FS (TODO: move to gonode.common) +func (task *SuperNodeTask) RemoveFile(file *files.File) { + if file != nil { + log.Debugf("remove file: %s", file.Name()) + if err := file.Remove(); err != nil { + log.Debugf("remove file failed: %s", err.Error()) + } + } +} + +// NewSuperNodeTask returns a new Task instance. +func NewSuperNodeTask(logPrefix string, historyDB queries.LocalStoreInterface) *SuperNodeTask { + snt := &SuperNodeTask{ + Task: task.New(StatusTaskStarted), + LogPrefix: logPrefix, + } + + snt.InitialiseHistoryDB(historyDB) + + return snt +} diff --git a/tests/integration/p2p/p2p_integration_test.go b/tests/integration/p2p/p2p_integration_test.go index 46d6f392..9e8dde2e 100644 --- a/tests/integration/p2p/p2p_integration_test.go +++ b/tests/integration/p2p/p2p_integration_test.go @@ -1,5 +1,3 @@ -//go:build integration - package integration import ( @@ -18,12 +16,11 @@ import ( "github.com/LumeraProtocol/supernode/p2p" "github.com/LumeraProtocol/supernode/p2p/kademlia" - "github.com/LumeraProtocol/supernode/pkg/lumera" + ltc "github.com/LumeraProtocol/supernode/pkg/net/credentials" + "github.com/LumeraProtocol/supernode/pkg/net/credentials/alts/conn" "github.com/LumeraProtocol/supernode/pkg/storage/rqstore" "github.com/LumeraProtocol/supernode/pkg/testutil" "github.com/LumeraProtocol/supernode/pkg/utils" - ltc "github.com/LumeraProtocol/supernode/pkg/net/credentials" - "github.com/LumeraProtocol/supernode/pkg/net/credentials/alts/conn" ) func TestP2PBasicIntegration(t *testing.T) { @@ -147,7 +144,7 @@ func SetupTestP2PNodes(t *testing.T, ctx context.Context) ([]p2p.Client, []*rqst var rqStores []*rqstore.SQLiteRQStore kr := testutil.CreateTestKeyring() - + // Create test accounts accountNames := make([]string, 0) numP2PNodes := kademlia.Alpha + 1 @@ -161,22 +158,16 @@ func SetupTestP2PNodes(t *testing.T, ctx context.Context) ([]p2p.Client, []*rqst for i := 0; i < numP2PNodes; i++ { nodeConfigs = append(nodeConfigs, ltc.LumeraAddress{ Identity: accountAddresses[i], - Host: "127.0.0.1", - Port: uint16(9000+i), + Host: "127.0.0.1", + Port: uint16(9000 + i), }) } // Create and start nodes for i, config := range nodeConfigs { - tClient, err := lumera.NewTendermintClient( - lumera.WithKeyring(kr), - ) + mockClient, err := testutil.NewMockLumeraClient(kr, accountAddresses) require.NoError(t, err, "failed to create tendermint client") - // cast to lumera.Client - lumeraClient, ok := tClient.(*lumera.Client) - require.True(t, ok, "failed to cast to lumera.Client") - // Create data directory for the node dataDir := fmt.Sprintf("./data/node%d", i) err = os.MkdirAll(dataDir, 0755) @@ -189,11 +180,11 @@ func SetupTestP2PNodes(t *testing.T, ctx context.Context) ([]p2p.Client, []*rqst } p2pConfig := &p2p.Config{ - ListenAddress: config.Host, - Port: config.Port, - DataDir: dataDir, - ID: config.Identity, - BootstrapNodes: strings.Join(bootstrapAddresses, ","), + ListenAddress: config.Host, + Port: config.Port, + DataDir: dataDir, + ID: config.Identity, + BootstrapNodes: strings.Join(bootstrapAddresses, ","), } // Initialize SQLite RQ store for each node @@ -205,7 +196,7 @@ func SetupTestP2PNodes(t *testing.T, ctx context.Context) ([]p2p.Client, []*rqst require.NoError(t, err, "failed to create rqstore for node %d: %v", i, err) rqStores = append(rqStores, rqStore) - service, err := p2p.New(ctx, p2pConfig, lumeraClient, rqStore, nil, nil) + service, err := p2p.New(ctx, p2pConfig, mockClient, kr, rqStore, nil, nil) require.NoError(t, err, "failed to create p2p service for node %d: %v", i, err) // Start P2P service