diff --git a/.changelog/6387.internal.0.md b/.changelog/6387.internal.0.md new file mode 100644 index 00000000000..3c30eeb9750 --- /dev/null +++ b/.changelog/6387.internal.0.md @@ -0,0 +1,4 @@ +go/registry/api: Allow at most one runtime SGX role + +A stricter node registration rule is behind the future flag, +and therefore not breaking. diff --git a/.changelog/6387.internal.1.md b/.changelog/6387.internal.1.md new file mode 100644 index 00000000000..4101aba7016 --- /dev/null +++ b/.changelog/6387.internal.1.md @@ -0,0 +1,4 @@ +Add support for per-role quote policies + +This enables a more relaxed general policy but a stricter requirements +for nodes that can access the key manager (e.g. compute/observer nodes). diff --git a/go/common/node/node.go b/go/common/node/node.go index 27bd22bbd8b..b2941358de5 100644 --- a/go/common/node/node.go +++ b/go/common/node/node.go @@ -230,6 +230,18 @@ func (m RolesMask) IsSingleRole() bool { return m != 0 && m&(m-1) == 0 && m&RoleReserved == 0 } +// AtMostOneRuntimeSGXRole returns true when RoleMask has at most one SGX runtime role. +func (m RolesMask) AtMostOneRuntimeSGXRole() bool { + sgxRoles := m & (RoleComputeWorker | RoleObserver | RoleKeyManager) + if sgxRoles.IsEmptyRole() { + return true + } + if sgxRoles.IsSingleRole() { + return true + } + return false +} + func (m RolesMask) String() string { if m&RoleReserved != 0 { return "[invalid roles]" @@ -332,7 +344,7 @@ func (n *Node) UnmarshalCBOR(data []byte) error { } // ValidateBasic performs basic descriptor validity checks. -func (n *Node) ValidateBasic(strictVersion bool) error { +func (n *Node) ValidateBasic(strictVersion bool, isFeatureVersion242 bool) error { v := n.Versioned.V switch strictVersion { case true: @@ -366,6 +378,13 @@ func (n *Node) ValidateBasic(strictVersion bool) error { return fmt.Errorf("invalid role specified") } + // Make sure a node can have at most one runtime SGX role. + if isFeatureVersion242 { + if !n.Roles.AtMostOneRuntimeSGXRole() { + return fmt.Errorf("multiple runtime SGX roles (roles: %s)", n.Roles) + } + } + return nil } @@ -574,7 +593,7 @@ func HashRAK(rak signature.PublicKey) hash.Hash { } // Verify verifies the node's TEE capabilities, at the provided timestamp and height. -func (c *CapabilityTEE) Verify(teeCfg *TEEFeatures, ts time.Time, height uint64, constraints []byte, nodeID signature.PublicKey, isFeatureVersion242 bool) error { +func (c *CapabilityTEE) Verify(teeCfg *TEEFeatures, ts time.Time, height uint64, constraints []byte, nodeID signature.PublicKey, nodeRoles RolesMask, isFeatureVersion242 bool) error { switch c.Hardware { case TEEHardwareIntelSGX: // Parse SGX remote attestation. @@ -596,7 +615,7 @@ func (c *CapabilityTEE) Verify(teeCfg *TEEFeatures, ts time.Time, height uint64, } // Verify SGX remote attestation. - return sa.Verify(teeCfg, ts, height, &sc, c.RAK, c.REK, nodeID) + return sa.Verify(teeCfg, ts, height, &sc, c.RAK, c.REK, nodeID, nodeRoles) default: return ErrInvalidTEEHardware } diff --git a/go/common/node/node_test.go b/go/common/node/node_test.go index 65d45a45f59..4533fa6ccc7 100644 --- a/go/common/node/node_test.go +++ b/go/common/node/node_test.go @@ -124,11 +124,11 @@ func TestReservedRoles(t *testing.T) { Versioned: cbor.NewVersioned(LatestNodeDescriptorVersion), Roles: 0xFFFFFFFF, } - err := n.ValidateBasic(false) + err := n.ValidateBasic(false, false) require.Error(err, "ValidateBasic should fail for reserved roles") n.Roles = 0 - err = n.ValidateBasic(false) + err = n.ValidateBasic(false, false) require.Error(err, "ValidateBasic should fail for empty roles") } @@ -139,7 +139,7 @@ func TestNodeDescriptorV2(t *testing.T) { Versioned: cbor.NewVersioned(2), Roles: RoleComputeWorker | roleReserved3, } - require.Error(v1.ValidateBasic(false), "V1 descriptors should not be allowed anymore") + require.Error(v1.ValidateBasic(false, false), "V1 descriptors should not be allowed anymore") v2 := nodeV2{ Versioned: cbor.NewVersioned(2), @@ -156,7 +156,7 @@ func TestNodeDescriptorV2(t *testing.T) { err := cbor.Unmarshal(raw, &v3) require.NoError(err, "cbor.Unmarshal") - err = v3.ValidateBasic(false) + err = v3.ValidateBasic(false, false) require.NoError(err, "ValidateBasic") require.True(v3.HasRoles(RoleComputeWorker)) require.False(v3.HasRoles(roleReserved3)) @@ -170,7 +170,7 @@ func TestNodeDescriptorV2(t *testing.T) { err = cbor.Unmarshal(raw, &v3) require.NoError(err, "cbor.Unmarshal") - err = v3.ValidateBasic(false) + err = v3.ValidateBasic(false, false) require.NoError(err, "ValidateBasic") require.True(v3.HasRoles(RoleComputeWorker)) require.False(v3.HasRoles(roleReserved3)) @@ -468,6 +468,38 @@ func TestNodeDeserialization(t *testing.T) { } } +func TestAtMostOneRuntimeSGXRole(t *testing.T) { + require := require.New(t) + + for _, tc := range []struct { + name string + roles RolesMask + want bool + }{ + // Valid: no SGX roles. + {"validator only", RoleValidator, true}, + {"validator + storage-rpc", RoleValidator | RoleStorageRPC, true}, + // Valid: single SGX role. + {"compute only", RoleComputeWorker, true}, + {"observer only", RoleObserver, true}, + {"key manager only", RoleKeyManager, true}, + {"empty", RoleEmpty, true}, + // Valid: non-SGX roles are ignored. + {"compute + validator", RoleComputeWorker | RoleValidator, true}, + {"key manager + validator", RoleKeyManager | RoleValidator, true}, + {"compute + storage-rpc", RoleComputeWorker | RoleStorageRPC, true}, + {"compute + validator + storage-rpc", RoleComputeWorker | RoleValidator | RoleStorageRPC, true}, + // Invalid: multiple SGX roles. + {"compute + observer", RoleComputeWorker | RoleObserver, false}, + {"compute + key manager", RoleComputeWorker | RoleKeyManager, false}, + {"compute + key manager + validator", RoleComputeWorker | RoleKeyManager | RoleValidator, false}, + } { + t.Run(tc.name, func(t *testing.T) { + require.Equal(tc.want, tc.roles.AtMostOneRuntimeSGXRole()) + }) + } +} + func TestNodeSoftwareVersion(t *testing.T) { require := require.New(t) diff --git a/go/common/node/sgx.go b/go/common/node/sgx.go index d2803ffb73b..0567ad7c9ab 100644 --- a/go/common/node/sgx.go +++ b/go/common/node/sgx.go @@ -32,9 +32,17 @@ type SGXConstraints struct { // Enclaves is the allowed MRENCLAVE/MRSIGNER pairs. Enclaves []sgx.EnclaveIdentity `json:"enclaves,omitempty"` - // Policy is the quote policy. + // Policy is the default quote policy. The default policy must be satisfied + // unless there exists a corresponding per-role policy. Policy *quote.Policy `json:"policy,omitempty"` + // PerRolePolicy defines additional role specific quote policies, that overwrite + // Policy when node with these roles does an attestation. + // + // A valid entry is for either [RoleComputeWorker] or [RoleObserver]. Single entry + // should not encode multiple roles. + PerRolePolicy map[RolesMask]quote.Policy `json:"per_role_policy,omitempty"` + // MaxAttestationAge is the maximum attestation age (in blocks). MaxAttestationAge uint64 `json:"max_attestation_age,omitempty"` } @@ -111,23 +119,63 @@ func (sc *SGXConstraints) ValidateBasic(cfg *TEEFeatures, isFeatureVersion242 bo return fmt.Errorf("unsupported SGX constraints version: %d", sc.V) } - if sc.Policy == nil { - return nil + validatePolicy := func(policy *quote.Policy) error { + if policy == nil { + return nil + } + + // Check for TDX enablement. + if !cfg.SGX.TDX && policy.PCS != nil && policy.PCS.TDX != nil { + return fmt.Errorf("TDX policy not supported") + } + + // Check that policy is compliant with the current feature version. + return policy.Validate(isFeatureVersion242) + } + + validatePerRolePolicyEntry := func(role RolesMask, policy quote.Policy) error { + if !role.IsSingleRole() { + return fmt.Errorf("quote policies should have a single role") + } + if role != RoleComputeWorker && role != RoleObserver { + return fmt.Errorf("invalid role: only compute or observer role allowed") + } + if policy.IAS != nil { + return fmt.Errorf("invalid policy: IAS not allowed") + } + return validatePolicy(&policy) } - // Check for TDX enablement. - if !cfg.SGX.TDX && sc.Policy.PCS != nil && sc.Policy.PCS.TDX != nil { - return fmt.Errorf("TDX policy not supported") + if err := validatePolicy(sc.Policy); err != nil { + return fmt.Errorf("invalid default policy: %w", err) } - // Check that policy is compliant with the current feature version. - if err := sc.Policy.Validate(isFeatureVersion242); err != nil { - return fmt.Errorf("invalid policy: %w", err) + if !isFeatureVersion242 && sc.PerRolePolicy != nil { + return fmt.Errorf("per role policy should be nil until feature version 24.2") + } + + for role, policy := range sc.PerRolePolicy { + if err := validatePerRolePolicyEntry(role, policy); err != nil { + return fmt.Errorf("invalid per role policy entry (role: %s): %w", role, err) + } } return nil } +// PolicyFor returns a matching per-role policy when present, or otherwise falls back to the default policy. +// +// This function expects role mask that has at most one runtime SGX role. +func (sc *SGXConstraints) PolicyFor(roles RolesMask) *quote.Policy { + for role, policy := range sc.PerRolePolicy { + if role&roles == 0 { + continue + } + return &policy + } + return sc.Policy +} + // ContainsEnclave returns true iff the allowed enclave list in SGX constraints contain the given // enclave identity. func (sc *SGXConstraints) ContainsEnclave(eid sgx.EnclaveIdentity) bool { @@ -223,16 +271,23 @@ func (sa *SGXAttestation) Verify( rak signature.PublicKey, rek *x25519.PublicKey, nodeID signature.PublicKey, + nodeRoles RolesMask, ) error { if cfg == nil { cfg = &emptyFeatures } // Use defaults from consensus parameters. + // TODO: Handle default constraints overwrite consistently. + // See https://github.com/oasisprotocol/oasis-core/issues/6459. cfg.SGX.ApplyDefaultConstraints(sc) + // Prior to 24.2 nodeRoles might have multiple roles, but as per-role policies are guaranteed + // to be empty this works fine. + policy := sc.PolicyFor(nodeRoles) + // Verify the quote. - verifiedQuote, err := sa.Quote.Verify(sc.Policy, ts) + verifiedQuote, err := sa.Quote.Verify(policy, ts) if err != nil { return err } diff --git a/go/common/node/sgx_test.go b/go/common/node/sgx_test.go index e252d7611ba..c69ee77f78a 100644 --- a/go/common/node/sgx_test.go +++ b/go/common/node/sgx_test.go @@ -78,6 +78,159 @@ func TestSGXConstraintsV1NilPolicy(t *testing.T) { require.NoError(err, "ValidateBasic V1 SGX constraints with nil policy") } +func TestSGXConstraintsPerRolePolicyValidation(t *testing.T) { + tests := []struct { + name string + cfg *TEEFeatures + isFeatureVersion242 bool + perRolePolicy map[RolesMask]quote.Policy + errContains string + }{ + { + name: "non-nil per-role policy before 24.2 is invalid", + cfg: &TEEFeatures{SGX: TEEFeaturesSGX{PCS: true}}, + isFeatureVersion242: false, + perRolePolicy: map[RolesMask]quote.Policy{}, + errContains: "per role policy should be nil until feature version 24.2", + }, + { + name: "compute role policy is valid", + cfg: &TEEFeatures{SGX: TEEFeaturesSGX{PCS: true}}, + isFeatureVersion242: true, + perRolePolicy: map[RolesMask]quote.Policy{ + RoleComputeWorker: {}, + }, + }, + { + name: "multi-role policy key is invalid", + cfg: &TEEFeatures{SGX: TEEFeaturesSGX{PCS: true}}, + isFeatureVersion242: true, + perRolePolicy: map[RolesMask]quote.Policy{ + RoleComputeWorker | RoleObserver: {}, + }, + errContains: "quote policies should have a single role", + }, + { + name: "key manager role policy is invalid", + cfg: &TEEFeatures{SGX: TEEFeaturesSGX{PCS: true}}, + isFeatureVersion242: true, + perRolePolicy: map[RolesMask]quote.Policy{ + RoleKeyManager: {}, + }, + errContains: "invalid role: only compute or observer role allowed", + }, + { + name: "empty role policy key is invalid", + cfg: &TEEFeatures{SGX: TEEFeaturesSGX{PCS: true}}, + isFeatureVersion242: true, + perRolePolicy: map[RolesMask]quote.Policy{ + RoleEmpty: {}, + }, + errContains: "quote policies should have a single role", + }, + { + name: "reserved role policy key is invalid", + cfg: &TEEFeatures{SGX: TEEFeaturesSGX{PCS: true}}, + isFeatureVersion242: true, + perRolePolicy: map[RolesMask]quote.Policy{ + roleReserved3: {}, + }, + errContains: "quote policies should have a single role", + }, + { + name: "tdx policy in per-role entry requires tdx feature", + cfg: &TEEFeatures{SGX: TEEFeaturesSGX{PCS: true}}, + isFeatureVersion242: true, + perRolePolicy: map[RolesMask]quote.Policy{ + RoleComputeWorker: { + PCS: &pcs.QuotePolicy{ + TDX: &pcs.TdxQuotePolicy{}, + }, + }, + }, + errContains: "TDX policy not supported", + }, + { + name: "non-empty IAS per role entry not allowed", + cfg: &TEEFeatures{SGX: TEEFeaturesSGX{PCS: true}}, + isFeatureVersion242: true, + perRolePolicy: map[RolesMask]quote.Policy{ + RoleComputeWorker: { + IAS: &ias.QuotePolicy{}, + }, + }, + errContains: "IAS not allowed", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + sc := SGXConstraints{ + Versioned: cbor.NewVersioned(1), + PerRolePolicy: tc.perRolePolicy, + } + + err := sc.ValidateBasic(tc.cfg, tc.isFeatureVersion242) + if tc.errContains == "" { + require.NoError(t, err) + return + } + require.Error(t, err) + require.ErrorContains(t, err, tc.errContains) + }) + } +} + +func TestSGXConstraintsPolicyFor(t *testing.T) { + defaultPolicy := quote.Policy{ + PCS: &pcs.QuotePolicy{TCBValidityPeriod: 20}, + } + rolePolicy := quote.Policy{ + PCS: &pcs.QuotePolicy{TCBValidityPeriod: 10}, + } + + for _, tc := range []struct { + name string + roles RolesMask + perRolePolicy map[RolesMask]quote.Policy + want quote.Policy + }{ + { + name: "observer with no per-role override should use default", + roles: RoleObserver, + perRolePolicy: nil, + want: defaultPolicy, + }, + { + name: "compute with per-role override should use compute policy", + roles: RoleComputeWorker, + perRolePolicy: map[RolesMask]quote.Policy{ + RoleComputeWorker: rolePolicy, + }, + want: rolePolicy, + }, + { + name: "client with empty per-role should use default", + roles: RoleEmpty, + perRolePolicy: map[RolesMask]quote.Policy{ + RoleComputeWorker: rolePolicy, + }, + want: defaultPolicy, + }, + } { + t.Run(tc.name, func(t *testing.T) { + sc := SGXConstraints{ + Versioned: cbor.NewVersioned(1), + Policy: &defaultPolicy, + PerRolePolicy: tc.perRolePolicy, + } + + got := sc.PolicyFor(tc.roles) + require.EqualValues(t, &tc.want, got) + }) + } +} + func TestSGXAttestationV0(t *testing.T) { require := require.New(t) diff --git a/go/consensus/cometbft/apps/keymanager/secrets/status.go b/go/consensus/cometbft/apps/keymanager/secrets/status.go index fc978655dcd..8cd31160e5f 100644 --- a/go/consensus/cometbft/apps/keymanager/secrets/status.go +++ b/go/consensus/cometbft/apps/keymanager/secrets/status.go @@ -112,7 +112,7 @@ nextNode: continue nextNode } - initResponse, err := VerifyExtraInfo(ctx.Logger(), n.ID, kmrt, nodeRt, ts, height, params, isFeatureVersion242) + initResponse, err := VerifyExtraInfo(ctx.Logger(), n.ID, n.Roles, kmrt, nodeRt, ts, height, params, isFeatureVersion242) if err != nil { ctx.Logger().Error("failed to validate ExtraInfo", append(vars, "err", err)...) continue nextNode @@ -227,6 +227,7 @@ nextNode: func VerifyExtraInfo( logger *logging.Logger, nodeID signature.PublicKey, + nodeRoles node.RolesMask, rt *registry.Runtime, nodeRt *node.Runtime, ts time.Time, @@ -234,7 +235,7 @@ func VerifyExtraInfo( params *registry.ConsensusParameters, isFeatureVersion242 bool, ) (*secrets.InitResponse, error) { - if err := registry.VerifyNodeRuntimeEnclaveIDs(logger, nodeID, nodeRt, rt, params.TEEFeatures, ts, height, isFeatureVersion242); err != nil { + if err := registry.VerifyNodeRuntimeEnclaveIDs(logger, nodeID, nodeRoles, nodeRt, rt, params.TEEFeatures, ts, height, isFeatureVersion242); err != nil { return nil, err } if nodeRt.ExtraInfo == nil { diff --git a/go/consensus/cometbft/apps/scheduler/scheduler.go b/go/consensus/cometbft/apps/scheduler/scheduler.go index 8becc443755..950c5d444f4 100644 --- a/go/consensus/cometbft/apps/scheduler/scheduler.go +++ b/go/consensus/cometbft/apps/scheduler/scheduler.go @@ -439,6 +439,7 @@ func isSuitableExecutorWorker( uint64(ctx.LastHeight()), activeDeployment.TEE, n.node.ID, + n.node.Roles, isFeatureVersion242, ); err != nil { ctx.Logger().Warn("failed to verify node TEE attestation", diff --git a/go/oasis-node/cmd/debug/byzantine/steps_test.go b/go/oasis-node/cmd/debug/byzantine/steps_test.go index 38e1afb7645..27aafac3a86 100644 --- a/go/oasis-node/cmd/debug/byzantine/steps_test.go +++ b/go/oasis-node/cmd/debug/byzantine/steps_test.go @@ -31,5 +31,5 @@ func TestFakeCapabilitySGX(t *testing.T) { ias.SetSkipVerify() ias.SetAllowDebugEnclaves() - require.NoError(t, fakeCapabilitiesSGX.TEE.Verify(&teeCfg, time.Now(), 1, cs, nodeID, true), "fakeCapabilitiesSGX not valid") + require.NoError(t, fakeCapabilitiesSGX.TEE.Verify(&teeCfg, time.Now(), 1, cs, nodeID, node.RoleEmpty, true), "fakeCapabilitiesSGX not valid") } diff --git a/go/oasis-node/cmd/genesis/migrate.go b/go/oasis-node/cmd/genesis/migrate.go index 9456d27bb9e..fb41acbe616 100644 --- a/go/oasis-node/cmd/genesis/migrate.go +++ b/go/oasis-node/cmd/genesis/migrate.go @@ -285,7 +285,7 @@ NodeLoop: ) continue } - if err = node.ValidateBasic(false); err != nil { + if err = node.ValidateBasic(false, false); err != nil { logger.Warn("removing node not passing basic validation check", "err", err, "node_id", node.ID, diff --git a/go/oasis-node/cmd/node/node.go b/go/oasis-node/cmd/node/node.go index 4cbaa595d1b..3351ef821e1 100644 --- a/go/oasis-node/cmd/node/node.go +++ b/go/oasis-node/cmd/node/node.go @@ -11,6 +11,7 @@ import ( "github.com/oasisprotocol/oasis-core/go/common/grpc" "github.com/oasisprotocol/oasis-core/go/common/identity" "github.com/oasisprotocol/oasis-core/go/common/logging" + "github.com/oasisprotocol/oasis-core/go/common/node" "github.com/oasisprotocol/oasis-core/go/common/persistent" "github.com/oasisprotocol/oasis-core/go/common/version" "github.com/oasisprotocol/oasis-core/go/config" @@ -205,12 +206,29 @@ func (n *Node) initRuntimeWorkers(genesisDoc *genesisAPI.Document) error { } n.svcMgr.Register(n.RuntimeRegistry) + // Determine runtime role. + runtimeRoles := node.RoleEmpty + switch config.GlobalConfig.Mode { + case config.ModeCompute: + runtimeRoles = node.RoleComputeWorker + case config.ModeKeyManager: + runtimeRoles = node.RoleKeyManager + case config.ModeClient, config.ModeStatelessClient: + if config.GlobalConfig.Registration.Entity != "" || config.GlobalConfig.Registration.EntityID != "" { + runtimeRoles |= node.RoleObserver + } + if config.GlobalConfig.Storage.PublicRPCEnabled { + runtimeRoles |= node.RoleStorageRPC + } + } + // Initialize the common worker. n.CommonWorker, err = workerCommon.New( n, n.dataDir, n.chainContext, n.Identity, + runtimeRoles, n.Consensus, n.LightService, n.P2P, diff --git a/go/registry/api/api.go b/go/registry/api/api.go index 88810da2feb..4a5623418aa 100644 --- a/go/registry/api/api.go +++ b/go/registry/api/api.go @@ -524,7 +524,7 @@ func VerifyRegisterNodeArgs( // nolint: gocyclo ) return nil, nil, ErrInvalidSignature } - if err := n.ValidateBasic(!isGenesis && !isSanityCheck); err != nil { + if err := n.ValidateBasic(!isGenesis && !isSanityCheck, isFeatureVersion242); err != nil { logger.Error("RegisterNode: invalid node descriptor", "node", n, "err", err, @@ -574,9 +574,6 @@ func VerifyRegisterNodeArgs( // nolint: gocyclo return nil, nil, fmt.Errorf("%w: expiration period greater than allowed", ErrInvalidArgument) } - // TODO: Key manager nodes maybe should be restricted to only being a - // key manager at the expense of breaking some of our test configs. - var runtimes []*Runtime switch len(n.Runtimes) { case 0: @@ -629,7 +626,7 @@ func VerifyRegisterNodeArgs( // nolint: gocyclo // both validators and compute nodes and have out of date attestation evidence. Removing // such nodes could lead to consensus not having the proper majority. This is safe as // attestation evidence is independently verified before scheduling committees. - if err := VerifyNodeRuntimeEnclaveIDs(logger, n.ID, rt, regRt, params.TEEFeatures, now, height, isFeatureVersion242); err != nil && !isSanityCheck && !isGenesis { + if err := VerifyNodeRuntimeEnclaveIDs(logger, n.ID, n.Roles, rt, regRt, params.TEEFeatures, now, height, isFeatureVersion242); err != nil && !isSanityCheck && !isGenesis { return nil, nil, err } @@ -806,6 +803,7 @@ func VerifyRegisterNodeArgs( // nolint: gocyclo func VerifyNodeRuntimeEnclaveIDs( logger *logging.Logger, nodeID signature.PublicKey, + nodeRoles node.RolesMask, rt *node.Runtime, regRt *Runtime, teeCfg *node.TEEFeatures, @@ -840,7 +838,7 @@ func VerifyNodeRuntimeEnclaveIDs( continue } - if err := rt.Capabilities.TEE.Verify(teeCfg, ts, height, rtVersionInfo.TEE, nodeID, isFeatureVersion242); err != nil { + if err := rt.Capabilities.TEE.Verify(teeCfg, ts, height, rtVersionInfo.TEE, nodeID, nodeRoles, isFeatureVersion242); err != nil { logger.Error("VerifyNodeRuntimeEnclaveIDs: failed to validate attestation", "node_id", nodeID, "runtime_id", rt.ID, diff --git a/go/registry/api/api_test.go b/go/registry/api/api_test.go index 96b654be558..7fa80253230 100644 --- a/go/registry/api/api_test.go +++ b/go/registry/api/api_test.go @@ -115,6 +115,9 @@ func TestVerifyRegisterNodeArgs(t *testing.T) { Nodes: []signature.PublicKey{nodeSigner.Public()}, } + runtimeID := common.NewTestNamespaceFromSeed([]byte("test namespace"), 0) + rtLookup.runtimes[runtimeID] = &Runtime{} + for _, tc := range []struct { n node.Node err error @@ -257,6 +260,64 @@ func TestVerifyRegisterNodeArgs(t *testing.T) { "observer without runtimes is not allowed", true, }, + { + node.Node{ + Versioned: cbor.NewVersioned(2), + ID: nodeSigner.Public(), + EntityID: entityID1, + Consensus: node.ConsensusInfo{ + ID: nodeConsensusSigner.Public(), + Addresses: []node.ConsensusAddress{ + {ID: nodeConsensusSigner.Public(), Address: node.Address{IP: net.IPv4(127, 0, 0, 1), Port: 9000}}, + }, + }, + TLS: node.TLSInfo{ + PubKey: nodeTLSSigner.Public(), + }, + P2P: node.P2PInfo{ + ID: nodeP2PSigner.Public(), + Addresses: []node.Address{{IP: net.IPv4(127, 0, 0, 1), Port: 9002}}, + }, + VRF: node.VRFInfo{ + ID: nodeVRFSigner.Public(), + }, + Roles: node.RoleValidator | node.RoleComputeWorker | node.RoleKeyManager, + Expiration: 11, + Runtimes: []*node.Runtime{{ID: runtimeID}}, + }, + nil, + "multiple SGX runtime roles allowed with old consensus feature version", + false, + }, + { + node.Node{ + Versioned: cbor.NewVersioned(2), + ID: nodeSigner.Public(), + EntityID: entityID1, + Consensus: node.ConsensusInfo{ + ID: nodeConsensusSigner.Public(), + Addresses: []node.ConsensusAddress{ + {ID: nodeConsensusSigner.Public(), Address: node.Address{IP: net.IPv4(127, 0, 0, 1), Port: 9000}}, + }, + }, + TLS: node.TLSInfo{ + PubKey: nodeTLSSigner.Public(), + }, + P2P: node.P2PInfo{ + ID: nodeP2PSigner.Public(), + Addresses: []node.Address{{IP: net.IPv4(127, 0, 0, 1), Port: 9002}}, + }, + VRF: node.VRFInfo{ + ID: nodeVRFSigner.Public(), + }, + Roles: node.RoleValidator | node.RoleComputeWorker | node.RoleKeyManager, + Expiration: 11, + Runtimes: []*node.Runtime{{ID: runtimeID}}, + }, + ErrInvalidArgument, + "multiple SGX runtime roles are not allowed", + true, + }, } { signedNode, err := node.MultiSignNode( diff --git a/go/registry/api/runtime.go b/go/registry/api/runtime.go index 722ed53febf..61b2e52032c 100644 --- a/go/registry/api/runtime.go +++ b/go/registry/api/runtime.go @@ -597,6 +597,9 @@ func (r *Runtime) ValidateDeployments(now beacon.EpochTime, params *ConsensusPar if err := cs.ValidateBasic(params.TEEFeatures, isFeatureVersion242); err != nil { return fmt.Errorf("%w: invalid SGX TEE constraints", ErrInvalidArgument) } + if r.Kind == KindKeyManager && cs.PerRolePolicy != nil { + return fmt.Errorf("%w: invalid SGX TEE constraints: keymanager runtime with per-role policies", ErrInvalidArgument) + } if len(cs.Enclaves) == 0 { return fmt.Errorf("%w: invalid SGX TEE constraints", ErrNoEnclaveForRuntime) } diff --git a/go/registry/api/runtime_test.go b/go/registry/api/runtime_test.go index 562f65a3278..60fd7af28b4 100644 --- a/go/registry/api/runtime_test.go +++ b/go/registry/api/runtime_test.go @@ -18,6 +18,8 @@ import ( "github.com/oasisprotocol/oasis-core/go/common/logging" "github.com/oasisprotocol/oasis-core/go/common/node" "github.com/oasisprotocol/oasis-core/go/common/quantity" + "github.com/oasisprotocol/oasis-core/go/common/sgx" + "github.com/oasisprotocol/oasis-core/go/common/sgx/quote" "github.com/oasisprotocol/oasis-core/go/common/version" "github.com/oasisprotocol/oasis-core/go/scheduler/api" ) @@ -255,6 +257,47 @@ func TestVerifyRuntime(t *testing.T) { nil, "valid runtime", }, + { + Runtime{ + Versioned: cbor.NewVersioned(3), + EntityID: signature.NewPublicKey("1234567890000000000000000000000000000000000000000000000000000000"), + ID: keymanagerID, + Kind: KindKeyManager, + TEEHardware: node.TEEHardwareIntelSGX, + Deployments: []*VersionInfo{ + { + Version: version.Version{ + Major: 1, + Minor: 0, + Patch: 0, + }, + TEE: cbor.Marshal(node.SGXConstraints{ + Versioned: cbor.NewVersioned(1), + Enclaves: []sgx.EnclaveIdentity{ + {}, + }, + PerRolePolicy: map[node.RolesMask]quote.Policy{ + node.RoleComputeWorker: {}, + }, + }), + }, + }, + AdmissionPolicy: RuntimeAdmissionPolicy{ + AnyNode: &AnyNodeRuntimeAdmissionPolicy{}, + }, + GovernanceModel: GovernanceConsensus, + }, + func(cp *ConsensusParameters) { + cp.TEEFeatures = &node.TEEFeatures{ + SGX: node.TEEFeaturesSGX{ + PCS: true, + }, + } + cp.DebugAllowTestRuntimes = true + }, + ErrInvalidArgument, + "keymanager runtime with per-role SGX policy should be rejected", + }, { Runtime{ Versioned: cbor.NewVersioned(3), diff --git a/go/runtime/host/host.go b/go/runtime/host/host.go index 63bf465e4ef..6e9e4858e5d 100644 --- a/go/runtime/host/host.go +++ b/go/runtime/host/host.go @@ -22,6 +22,10 @@ type Config struct { // ID is the runtime identifier. ID common.Namespace + // RuntimeRoles is the runtime role mask the node will register RONL component with. + // In case of ROFL only nodes (no registration) this is expected to be empty role. + RuntimeRoles node.RolesMask + // Component is the component that should be provisioned. Component *bundle.ExplodedComponent diff --git a/go/runtime/host/sgx/common/common.go b/go/runtime/host/sgx/common/common.go index 51568a842b1..107f7fc5c74 100644 --- a/go/runtime/host/sgx/common/common.go +++ b/go/runtime/host/sgx/common/common.go @@ -48,7 +48,7 @@ func GetQuotePolicy( return nil, fmt.Errorf("malformed runtime SGX constraints: %w", err) } - return sc.Policy, nil + return sc.PolicyFor(cfg.RuntimeRoles), nil } return fallbackPolicy, nil case component.ROFL: diff --git a/go/runtime/registry/host.go b/go/runtime/registry/host.go index f653b140865..6410184e187 100644 --- a/go/runtime/registry/host.go +++ b/go/runtime/registry/host.go @@ -21,25 +21,27 @@ type RuntimeHostNode struct { host *composite.Host - runtime Runtime - provisioner host.Provisioner - handler host.RuntimeHandler - logManager *log.Manager + runtime Runtime + runtimeRoles node.RolesMask + provisioner host.Provisioner + handler host.RuntimeHandler + logManager *log.Manager rofls map[component.ID]version.Version } // NewRuntimeHostNode creates a new runtime host node. -func NewRuntimeHostNode(runtime Runtime, provisioner host.Provisioner, handler host.RuntimeHandler, logManager *log.Manager) (*RuntimeHostNode, error) { +func NewRuntimeHostNode(runtime Runtime, runtimeRoles node.RolesMask, provisioner host.Provisioner, handler host.RuntimeHandler, logManager *log.Manager) (*RuntimeHostNode, error) { h := composite.NewHost(runtime.ID()) return &RuntimeHostNode{ - host: h, - logManager: logManager, - runtime: runtime, - handler: handler, - provisioner: provisioner, - rofls: make(map[component.ID]version.Version), + host: h, + logManager: logManager, + runtime: runtime, + runtimeRoles: runtimeRoles, + handler: handler, + provisioner: provisioner, + rofls: make(map[component.ID]version.Version), }, nil } @@ -67,6 +69,7 @@ func (n *RuntimeHostNode) ProvisionHostedRuntimeComponent(comp *bundle.ExplodedC cfg := host.Config{ ID: n.runtime.ID(), + RuntimeRoles: n.runtimeRoles, Component: comp, MessageHandler: handler, LocalConfig: getLocalConfig(n.runtime.ID(), comp.ID()), diff --git a/go/upgrade/migrations/consensus_242.go b/go/upgrade/migrations/consensus_242.go index 57ad426d23e..ce8098656b9 100644 --- a/go/upgrade/migrations/consensus_242.go +++ b/go/upgrade/migrations/consensus_242.go @@ -15,9 +15,12 @@ import ( // are allowed to query runtime key shares. // - The `FMSPCWhitelist` field in the quote policy, which defines which processor packages // and platform instances are allowed. +// - The `PerRolePolicy` field in the `SGXConstraints`, which defines additional role +// specific policies that may overwrite the default policy. // - An updated key manager policy update transaction that applies a new policy at the epoch // boundary. // - A stricter node registration rule where observer nodes must include runtimes. +// - A stricter node registration rule where at most one runtime SGX role is allowed. const Consensus242 = "consensus242" // Version242 is the Oasis Core 24.2 version. diff --git a/go/worker/common/committee/node.go b/go/worker/common/committee/node.go index 7c1ce78deee..ae23114191b 100644 --- a/go/worker/common/committee/node.go +++ b/go/worker/common/committee/node.go @@ -11,6 +11,7 @@ import ( beacon "github.com/oasisprotocol/oasis-core/go/beacon/api" "github.com/oasisprotocol/oasis-core/go/common/identity" "github.com/oasisprotocol/oasis-core/go/common/logging" + "github.com/oasisprotocol/oasis-core/go/common/node" "github.com/oasisprotocol/oasis-core/go/common/service" "github.com/oasisprotocol/oasis-core/go/common/version" "github.com/oasisprotocol/oasis-core/go/config" @@ -59,6 +60,7 @@ type Node struct { HostNode control.NodeController Identity *identity.Identity + RuntimeRoles node.RolesMask KeyManager keymanager.Backend KeyManagerClient *KeyManagerClientWrapper Consensus consensus.Service @@ -637,6 +639,7 @@ func NewNode( provisioner host.Provisioner, rtRegistry runtimeRegistry.Registry, identity *identity.Identity, + runtimeRoles node.RolesMask, keymanager keymanager.Backend, consensus consensus.Service, lightProvider consensus.LightProvider, @@ -664,6 +667,7 @@ func NewNode( Runtime: runtime, RuntimeRegistry: rtRegistry, Identity: identity, + RuntimeRoles: runtimeRoles, KeyManager: keymanager, Consensus: consensus, LightProvider: lightProvider, @@ -686,7 +690,7 @@ func NewNode( handler := runtimeRegistry.NewRuntimeHostHandler(&nodeEnvironment{n}, n.Runtime, consensus) // Prepare the runtime host node helpers. - rhn, err := runtimeRegistry.NewRuntimeHostNode(runtime, provisioner, handler, rtRegistry.GetLogManager()) + rhn, err := runtimeRegistry.NewRuntimeHostNode(runtime, runtimeRoles, provisioner, handler, rtRegistry.GetLogManager()) if err != nil { return nil, err } diff --git a/go/worker/common/worker.go b/go/worker/common/worker.go index fb0b8b3f2ec..005bea4f366 100644 --- a/go/worker/common/worker.go +++ b/go/worker/common/worker.go @@ -6,6 +6,7 @@ import ( "github.com/oasisprotocol/oasis-core/go/common" "github.com/oasisprotocol/oasis-core/go/common/identity" "github.com/oasisprotocol/oasis-core/go/common/logging" + "github.com/oasisprotocol/oasis-core/go/common/node" "github.com/oasisprotocol/oasis-core/go/config" consensus "github.com/oasisprotocol/oasis-core/go/consensus/api" control "github.com/oasisprotocol/oasis-core/go/control/api" @@ -25,6 +26,7 @@ type Worker struct { DataDir string ChainContext string Identity *identity.Identity + runtimeRoles node.RolesMask Consensus consensus.Service LightProvider consensus.LightProvider P2P p2p.Service @@ -161,6 +163,7 @@ func (w *Worker) registerRuntime(runtime runtimeRegistry.Runtime) error { w.Provisioner, w.RuntimeRegistry, w.Identity, + w.runtimeRoles, w.KeyManager, w.Consensus, w.LightProvider, @@ -185,6 +188,7 @@ func New( dataDir string, chainContext string, identity *identity.Identity, + runtimeRoles node.RolesMask, consensus consensus.Service, lightProvider consensus.LightProvider, p2p p2p.Service, @@ -215,6 +219,7 @@ func New( DataDir: dataDir, ChainContext: chainContext, Identity: identity, + runtimeRoles: runtimeRoles, Consensus: consensus, LightProvider: lightProvider, P2P: p2p, diff --git a/go/worker/keymanager/init.go b/go/worker/keymanager/init.go index 5e36a5fe867..a7d8c70a32a 100644 --- a/go/worker/keymanager/init.go +++ b/go/worker/keymanager/init.go @@ -82,7 +82,7 @@ func New( handler := runtimeRegistry.NewRuntimeHostHandler(&workerEnvironment{w}, w.runtime, w.commonWorker.Consensus) // Prepare the runtime host node helpers. - w.RuntimeHostNode, err = runtimeRegistry.NewRuntimeHostNode(w.runtime, provisioner, handler, w.commonWorker.RuntimeRegistry.GetLogManager()) + w.RuntimeHostNode, err = runtimeRegistry.NewRuntimeHostNode(w.runtime, node.RoleKeyManager, provisioner, handler, w.commonWorker.RuntimeRegistry.GetLogManager()) if err != nil { return nil, fmt.Errorf("worker/keymanager: failed to create runtime host helpers: %w", err) }