diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index 9773a6f3..a92bfa3a 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -22,6 +22,7 @@ import ( cascadeService "github.com/LumeraProtocol/supernode/supernode/services/cascade" "github.com/LumeraProtocol/supernode/supernode/services/common" supernodeService "github.com/LumeraProtocol/supernode/supernode/services/common/supernode" + "github.com/LumeraProtocol/supernode/supernode/services/verifier" cKeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/spf13/cobra" @@ -53,9 +54,27 @@ The supernode will connect to the Lumera network and begin participating in the // Initialize Lumera client lumeraClient, err := initLumeraClient(ctx, appConfig, kr) if err != nil { - logtrace.Fatal(ctx, "Failed to initialize Lumera client", logtrace.Fields{"error": err.Error()}) + logtrace.Fatal(ctx, "Failed to connect Lumera, please check your configuration", logtrace.Fields{"error": err.Error()}) } + // Verify config matches chain registration before starting services + logtrace.Info(ctx, "Verifying configuration against chain registration", logtrace.Fields{}) + configVerifier := verifier.NewConfigVerifier(appConfig, lumeraClient, kr) + verificationResult, err := configVerifier.VerifyConfig(ctx) + if err != nil { + logtrace.Fatal(ctx, "Config verification failed", logtrace.Fields{"error": err.Error()}) + } + + if !verificationResult.IsValid() { + logtrace.Fatal(ctx, "Config verification failed", logtrace.Fields{"summary": verificationResult.Summary()}) + } + + if verificationResult.HasWarnings() { + logtrace.Warn(ctx, "Config verification warnings", logtrace.Fields{"summary": verificationResult.Summary()}) + } + + logtrace.Info(ctx, "Configuration verification successful", logtrace.Fields{}) + // Initialize RaptorQ store for Cascade processing rqStore, err := initRQStore(ctx, appConfig) if err != nil { diff --git a/supernode/services/verifier/interface.go b/supernode/services/verifier/interface.go new file mode 100644 index 00000000..9ec3d9ab --- /dev/null +++ b/supernode/services/verifier/interface.go @@ -0,0 +1,55 @@ +package verifier + +import ( + "context" + "strings" +) + +// ConfigVerifierService defines the interface for config verification service +type ConfigVerifierService interface { + // VerifyConfig performs comprehensive config validation against chain + VerifyConfig(ctx context.Context) (*VerificationResult, error) +} + +// VerificationResult contains the results of config verification +type VerificationResult struct { + Valid bool `json:"valid"` + Errors []ConfigError `json:"errors,omitempty"` + Warnings []ConfigError `json:"warnings,omitempty"` +} + +// ConfigError represents a configuration validation error or warning +type ConfigError struct { + Field string `json:"field"` + Expected string `json:"expected,omitempty"` + Actual string `json:"actual,omitempty"` + Message string `json:"message"` +} + +// IsValid returns true if all verifications passed +func (vr *VerificationResult) IsValid() bool { + return vr.Valid && len(vr.Errors) == 0 +} + +// HasWarnings returns true if there are any warnings +func (vr *VerificationResult) HasWarnings() bool { + return len(vr.Warnings) > 0 +} + +// Summary returns a human-readable summary of verification results +func (vr *VerificationResult) Summary() string { + if vr.IsValid() && !vr.HasWarnings() { + return "✓ Config verification successful" + } + + var summary string + for _, err := range vr.Errors { + summary += "✗ " + err.Message + "\n" + } + + for _, warn := range vr.Warnings { + summary += "⚠ " + warn.Message + "\n" + } + + return strings.TrimSuffix(summary, "\n") +} \ No newline at end of file diff --git a/supernode/services/verifier/verifier.go b/supernode/services/verifier/verifier.go new file mode 100644 index 00000000..d8a39c87 --- /dev/null +++ b/supernode/services/verifier/verifier.go @@ -0,0 +1,224 @@ +package verifier + +import ( + "context" + "fmt" + "strings" + + "github.com/LumeraProtocol/supernode/pkg/lumera" + "github.com/LumeraProtocol/supernode/pkg/logtrace" + "github.com/LumeraProtocol/supernode/supernode/config" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + sdk "github.com/cosmos/cosmos-sdk/types" + sntypes "github.com/LumeraProtocol/lumera/x/supernode/v1/types" +) + +// ConfigVerifier implements ConfigVerifierService +type ConfigVerifier struct { + config *config.Config + lumeraClient lumera.Client + keyring keyring.Keyring +} + +// NewConfigVerifier creates a new config verifier service +func NewConfigVerifier(cfg *config.Config, client lumera.Client, kr keyring.Keyring) ConfigVerifierService { + return &ConfigVerifier{ + config: cfg, + lumeraClient: client, + keyring: kr, + } +} + +// VerifyConfig performs comprehensive config validation against chain +func (cv *ConfigVerifier) VerifyConfig(ctx context.Context) (*VerificationResult, error) { + result := &VerificationResult{ + Valid: true, + Errors: []ConfigError{}, + Warnings: []ConfigError{}, + } + + logtrace.Debug(ctx, "Starting config verification", logtrace.Fields{ + "identity": cv.config.SupernodeConfig.Identity, + "key_name": cv.config.SupernodeConfig.KeyName, + "p2p_port": cv.config.P2PConfig.Port, + }) + + // Check 1: Verify keyring contains the key + if err := cv.checkKeyExists(result); err != nil { + return result, err + } + + // Check 2: Verify key resolves to correct identity + if err := cv.checkIdentityMatches(result); err != nil { + return result, err + } + + // If keyring checks failed, don't proceed with chain queries + if !result.IsValid() { + return result, nil + } + + // Check 3: Query chain for supernode registration + supernode, err := cv.checkSupernodeExists(ctx, result) + if err != nil { + return result, err + } + + // If supernode doesn't exist, don't proceed with field comparisons + if supernode == nil { + return result, nil + } + + // Check 4: Verify P2P port matches + cv.checkP2PPortMatches(result, supernode) + + // Check 5: Verify supernode state is active + cv.checkSupernodeState(result, supernode) + + // Check 6: Check supernode port alignment with on-chain registration + cv.checkSupernodePortAlignment(result, supernode) + + // Check 7: Check host alignment with on-chain registration (warning only - may differ due to load balancer) + cv.checkHostAlignment(result, supernode) + + logtrace.Info(ctx, "Config verification completed", logtrace.Fields{ + "valid": result.IsValid(), + "errors": len(result.Errors), + "warnings": len(result.Warnings), + }) + + return result, nil +} + +// checkKeyExists verifies the configured key exists in keyring +func (cv *ConfigVerifier) checkKeyExists(result *VerificationResult) error { + _, err := cv.keyring.Key(cv.config.SupernodeConfig.KeyName) + if err != nil { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{ + Field: "key_name", + Actual: cv.config.SupernodeConfig.KeyName, + Message: fmt.Sprintf("Key '%s' not found in keyring", cv.config.SupernodeConfig.KeyName), + }) + } + return nil +} + +// checkIdentityMatches verifies key resolves to configured identity +func (cv *ConfigVerifier) checkIdentityMatches(result *VerificationResult) error { + keyInfo, err := cv.keyring.Key(cv.config.SupernodeConfig.KeyName) + if err != nil { + // Already handled in checkKeyExists + return nil + } + + pubKey, err := keyInfo.GetPubKey() + if err != nil { + return fmt.Errorf("failed to get public key for key '%s': %w", cv.config.SupernodeConfig.KeyName, err) + } + + addr := sdk.AccAddress(pubKey.Address()) + if addr.String() != cv.config.SupernodeConfig.Identity { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{ + Field: "identity", + Expected: addr.String(), + Actual: cv.config.SupernodeConfig.Identity, + Message: fmt.Sprintf("Key '%s' resolves to %s but config identity is %s", cv.config.SupernodeConfig.KeyName, addr.String(), cv.config.SupernodeConfig.Identity), + }) + } + return nil +} + +// checkSupernodeExists queries chain for supernode registration +func (cv *ConfigVerifier) checkSupernodeExists(ctx context.Context, result *VerificationResult) (*sntypes.SuperNode, error) { + sn, err := cv.lumeraClient.SuperNode().GetSupernodeBySupernodeAddress(ctx, cv.config.SupernodeConfig.Identity) + if err != nil { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{ + Field: "registration", + Actual: "not_registered", + Message: fmt.Sprintf("Supernode not registered on chain for address %s", cv.config.SupernodeConfig.Identity), + }) + return nil, nil + } + return sn, nil +} + +// checkP2PPortMatches compares config P2P port with chain +func (cv *ConfigVerifier) checkP2PPortMatches(result *VerificationResult, supernode *sntypes.SuperNode) { + configPort := fmt.Sprintf("%d", cv.config.P2PConfig.Port) + chainPort := supernode.P2PPort + + if chainPort != "" && chainPort != configPort { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{ + Field: "p2p_port", + Expected: chainPort, + Actual: configPort, + Message: fmt.Sprintf("P2P port mismatch: config=%s, chain=%s", configPort, chainPort), + }) + } +} + +// checkSupernodeState verifies supernode is in active state +func (cv *ConfigVerifier) checkSupernodeState(result *VerificationResult, supernode *sntypes.SuperNode) { + if len(supernode.States) > 0 { + lastState := supernode.States[len(supernode.States)-1] + if lastState.State.String() != "SUPERNODE_STATE_ACTIVE" { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{ + Field: "state", + Expected: "SUPERNODE_STATE_ACTIVE", + Actual: lastState.State.String(), + Message: fmt.Sprintf("Supernode state is %s (expected ACTIVE)", lastState.State.String()), + }) + } + } +} + +// checkSupernodePortAlignment compares supernode port with on-chain registered port (error if mismatch) +func (cv *ConfigVerifier) checkSupernodePortAlignment(result *VerificationResult, supernode *sntypes.SuperNode) { + if len(supernode.PrevIpAddresses) > 0 { + chainAddress := supernode.PrevIpAddresses[len(supernode.PrevIpAddresses)-1].Address + + // Extract port from chain address + var chainPort string + if idx := strings.LastIndex(chainAddress, ":"); idx != -1 { + chainPort = chainAddress[idx+1:] + } + + configPort := fmt.Sprintf("%d", cv.config.SupernodeConfig.Port) + if chainPort != "" && chainPort != configPort { + result.Valid = false + result.Errors = append(result.Errors, ConfigError{ + Field: "supernode_port", + Expected: chainPort, + Actual: configPort, + Message: fmt.Sprintf("Supernode port mismatch: config=%s, chain=%s", configPort, chainPort), + }) + } + } +} + +// checkHostAlignment compares host with on-chain registered host (warning only - may differ due to load balancer) +func (cv *ConfigVerifier) checkHostAlignment(result *VerificationResult, supernode *sntypes.SuperNode) { + if len(supernode.PrevIpAddresses) > 0 { + chainAddress := supernode.PrevIpAddresses[len(supernode.PrevIpAddresses)-1].Address + + // Extract host from chain address + chainHost := chainAddress + if idx := strings.LastIndex(chainAddress, ":"); idx != -1 { + chainHost = chainAddress[:idx] + } + + if chainHost != cv.config.SupernodeConfig.Host { + result.Warnings = append(result.Warnings, ConfigError{ + Field: "host", + Expected: cv.config.SupernodeConfig.Host, + Actual: chainHost, + Message: fmt.Sprintf("Host mismatch: config=%s, chain=%s", cv.config.SupernodeConfig.Host, chainHost), + }) + } + } +} \ No newline at end of file diff --git a/supernode/services/verifier/verifier_test.go b/supernode/services/verifier/verifier_test.go new file mode 100644 index 00000000..a0532efc --- /dev/null +++ b/supernode/services/verifier/verifier_test.go @@ -0,0 +1,152 @@ +package verifier + +import ( + "testing" + + "github.com/LumeraProtocol/supernode/supernode/config" + "github.com/stretchr/testify/assert" +) + +func TestNewConfigVerifier(t *testing.T) { + cfg := &config.Config{ + SupernodeConfig: config.SupernodeConfig{ + Identity: "lumera1testaddress", + KeyName: "test-key", + Host: "192.168.1.100", + }, + P2PConfig: config.P2PConfig{ + Port: 4445, + }, + } + + // Test that NewConfigVerifier returns a non-nil service + verifier := NewConfigVerifier(cfg, nil, nil) + assert.NotNil(t, verifier) + assert.Implements(t, (*ConfigVerifierService)(nil), verifier) +} + +func TestVerificationResult_IsValid(t *testing.T) { + tests := []struct { + name string + result *VerificationResult + expected bool + }{ + { + name: "valid with no errors", + result: &VerificationResult{ + Valid: true, + Errors: []ConfigError{}, + }, + expected: true, + }, + { + name: "invalid with errors", + result: &VerificationResult{ + Valid: false, + Errors: []ConfigError{ + {Message: "test error"}, + }, + }, + expected: false, + }, + { + name: "valid flag true but has errors", + result: &VerificationResult{ + Valid: true, + Errors: []ConfigError{ + {Message: "test error"}, + }, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.expected, tt.result.IsValid()) + }) + } +} + +func TestVerificationResult_HasWarnings(t *testing.T) { + tests := []struct { + name string + result *VerificationResult + expected bool + }{ + { + name: "no warnings", + result: &VerificationResult{ + Warnings: []ConfigError{}, + }, + expected: false, + }, + { + name: "has warnings", + result: &VerificationResult{ + Warnings: []ConfigError{ + {Message: "test warning"}, + }, + }, + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.expected, tt.result.HasWarnings()) + }) + } +} + +func TestVerificationResult_Summary(t *testing.T) { + tests := []struct { + name string + result *VerificationResult + contains []string + }{ + { + name: "success with no warnings", + result: &VerificationResult{ + Valid: true, + Errors: []ConfigError{}, + Warnings: []ConfigError{}, + }, + contains: []string{"✓ Config verification successful"}, + }, + { + name: "error message", + result: &VerificationResult{ + Valid: false, + Errors: []ConfigError{ + { + Message: "Key not found", + }, + }, + }, + contains: []string{"✗ Key not found"}, + }, + { + name: "warning message", + result: &VerificationResult{ + Valid: true, + Errors: []ConfigError{}, + Warnings: []ConfigError{ + { + Message: "Host mismatch: config=localhost, chain=192.168.1.1", + }, + }, + }, + contains: []string{"⚠ Host mismatch: config=localhost, chain=192.168.1.1"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + summary := tt.result.Summary() + for _, expected := range tt.contains { + assert.Contains(t, summary, expected) + } + }) + } +} \ No newline at end of file diff --git a/tests/system/config.test-1.yml b/tests/system/config.test-1.yml index eb214cd4..ac3cced4 100644 --- a/tests/system/config.test-1.yml +++ b/tests/system/config.test-1.yml @@ -2,7 +2,7 @@ supernode: key_name: "testkey1" identity: "lumera1em87kgrvgttrkvuamtetyaagjrhnu3vjy44at4" - host: "0.0.0.0" + host: "localhost" port: 4444 gateway_port: 8002 diff --git a/tests/system/config.test-2.yml b/tests/system/config.test-2.yml index 1b044a89..7cb80cf9 100644 --- a/tests/system/config.test-2.yml +++ b/tests/system/config.test-2.yml @@ -3,7 +3,7 @@ supernode: key_name: "testkey2" identity: "lumera1cf0ms9ttgdvz6zwlqfty4tjcawhuaq69p40w0c" - host: "0.0.0.0" + host: "localhost" port: 4446 gateway_port: 8003 diff --git a/tests/system/config.test-3.yml b/tests/system/config.test-3.yml index 2a259066..55e4e12e 100644 --- a/tests/system/config.test-3.yml +++ b/tests/system/config.test-3.yml @@ -3,7 +3,7 @@ supernode: key_name: "testkey3" identity: "lumera1cjyc4ruq739e2lakuhargejjkr0q5vg6x3d7kp" - host: "0.0.0.0" + host: "localhost" port: 4448 gateway_port: 8004